Skip to content

Commit 0c2d822

Browse files
committed
Merge branch 'netem-use-a-seeded-prng-for-loss-and-corruption-events'
François Michel says: ==================== netem: use a seeded PRNG for loss and corruption events In order to reproduce bugs or performance evaluation of network protocols and applications, it is useful to have reproducible test suites and tools. This patch adds a way to specify a PRNG seed through the TCA_NETEM_PRNG_SEED attribute for generating netem loss and corruption events. Initializing the qdisc with the same seed leads to the exact same loss and corruption patterns. If no seed is explicitly specified, the qdisc generates a random seed using get_random_u64(). This patch can be and has been tested using tc from the following iproute2-next fork: https://github.com/francoismichel/iproute2-next For instance, setting the seed 42424242 on the loopback with a loss rate of 10% will systematically drop the 5th, 12th and 24th packet when sending 25 packets. ==================== Link: https://lore.kernel.org/r/20230815092348.1449179-1-francois.michel@uclouvain.be Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents a5e5b2c + 3cad70b commit 0c2d822

2 files changed

Lines changed: 35 additions & 15 deletions

File tree

include/uapi/linux/pkt_sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -603,6 +603,7 @@ enum {
603603
TCA_NETEM_JITTER64,
604604
TCA_NETEM_SLOT,
605605
TCA_NETEM_SLOT_DIST,
606+
TCA_NETEM_PRNG_SEED,
606607
__TCA_NETEM_MAX,
607608
};
608609

net/sched/sch_netem.c

Lines changed: 34 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,11 @@ struct netem_sched_data {
105105
u32 rho;
106106
} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
107107

108+
struct prng {
109+
u64 seed;
110+
struct rnd_state prng_state;
111+
} prng;
112+
108113
struct disttable *delay_dist;
109114

110115
enum {
@@ -179,15 +184,16 @@ static void init_crandom(struct crndstate *state, unsigned long rho)
179184
* Next number depends on last value.
180185
* rho is scaled to avoid floating point.
181186
*/
182-
static u32 get_crandom(struct crndstate *state)
187+
static u32 get_crandom(struct crndstate *state, struct prng *p)
183188
{
184189
u64 value, rho;
185190
unsigned long answer;
191+
struct rnd_state *s = &p->prng_state;
186192

187193
if (!state || state->rho == 0) /* no correlation */
188-
return get_random_u32();
194+
return prandom_u32_state(s);
189195

190-
value = get_random_u32();
196+
value = prandom_u32_state(s);
191197
rho = (u64)state->rho + 1;
192198
answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
193199
state->last = answer;
@@ -201,7 +207,7 @@ static u32 get_crandom(struct crndstate *state)
201207
static bool loss_4state(struct netem_sched_data *q)
202208
{
203209
struct clgstate *clg = &q->clg;
204-
u32 rnd = get_random_u32();
210+
u32 rnd = prandom_u32_state(&q->prng.prng_state);
205211

206212
/*
207213
* Makes a comparison between rnd and the transition
@@ -266,18 +272,19 @@ static bool loss_4state(struct netem_sched_data *q)
266272
static bool loss_gilb_ell(struct netem_sched_data *q)
267273
{
268274
struct clgstate *clg = &q->clg;
275+
struct rnd_state *s = &q->prng.prng_state;
269276

270277
switch (clg->state) {
271278
case GOOD_STATE:
272-
if (get_random_u32() < clg->a1)
279+
if (prandom_u32_state(s) < clg->a1)
273280
clg->state = BAD_STATE;
274-
if (get_random_u32() < clg->a4)
281+
if (prandom_u32_state(s) < clg->a4)
275282
return true;
276283
break;
277284
case BAD_STATE:
278-
if (get_random_u32() < clg->a2)
285+
if (prandom_u32_state(s) < clg->a2)
279286
clg->state = GOOD_STATE;
280-
if (get_random_u32() > clg->a3)
287+
if (prandom_u32_state(s) > clg->a3)
281288
return true;
282289
}
283290

@@ -289,7 +296,7 @@ static bool loss_event(struct netem_sched_data *q)
289296
switch (q->loss_model) {
290297
case CLG_RANDOM:
291298
/* Random packet drop 0 => none, ~0 => all */
292-
return q->loss && q->loss >= get_crandom(&q->loss_cor);
299+
return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
293300

294301
case CLG_4_STATES:
295302
/* 4state loss model algorithm (used also for GI model)
@@ -318,6 +325,7 @@ static bool loss_event(struct netem_sched_data *q)
318325
*/
319326
static s64 tabledist(s64 mu, s32 sigma,
320327
struct crndstate *state,
328+
struct prng *prng,
321329
const struct disttable *dist)
322330
{
323331
s64 x;
@@ -327,7 +335,7 @@ static s64 tabledist(s64 mu, s32 sigma,
327335
if (sigma == 0)
328336
return mu;
329337

330-
rnd = get_crandom(state);
338+
rnd = get_crandom(state, prng);
331339

332340
/* default uniform distribution */
333341
if (dist == NULL)
@@ -449,7 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
449457
skb->prev = NULL;
450458

451459
/* Random duplication */
452-
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
460+
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
453461
++count;
454462

455463
/* Drop packet? */
@@ -492,7 +500,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
492500
* If packet is going to be hardware checksummed, then
493501
* do it now in software before we mangle it.
494502
*/
495-
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
503+
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
496504
if (skb_is_gso(skb)) {
497505
skb = netem_segment(skb, sch, to_free);
498506
if (!skb)
@@ -530,12 +538,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
530538
cb = netem_skb_cb(skb);
531539
if (q->gap == 0 || /* not doing reordering */
532540
q->counter < q->gap - 1 || /* inside last reordering gap */
533-
q->reorder < get_crandom(&q->reorder_cor)) {
541+
q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
534542
u64 now;
535543
s64 delay;
536544

537545
delay = tabledist(q->latency, q->jitter,
538-
&q->delay_cor, q->delay_dist);
546+
&q->delay_cor, &q->prng, q->delay_dist);
539547

540548
now = ktime_get_ns();
541549

@@ -639,7 +647,7 @@ static void get_slot_next(struct netem_sched_data *q, u64 now)
639647
else
640648
next_delay = tabledist(q->slot_config.dist_delay,
641649
(s32)(q->slot_config.dist_jitter),
642-
NULL, q->slot_dist);
650+
NULL, &q->prng, q->slot_dist);
643651

644652
q->slot.slot_next = now + next_delay;
645653
q->slot.packets_left = q->slot_config.max_packets;
@@ -922,6 +930,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
922930
[TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
923931
[TCA_NETEM_JITTER64] = { .type = NLA_S64 },
924932
[TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
933+
[TCA_NETEM_PRNG_SEED] = { .type = NLA_U64 },
925934
};
926935

927936
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -1040,6 +1049,12 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
10401049
/* capping jitter to the range acceptable by tabledist() */
10411050
q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
10421051

1052+
if (tb[TCA_NETEM_PRNG_SEED])
1053+
q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1054+
else
1055+
q->prng.seed = get_random_u64();
1056+
prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1057+
10431058
unlock:
10441059
sch_tree_unlock(sch);
10451060

@@ -1203,6 +1218,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
12031218
goto nla_put_failure;
12041219
}
12051220

1221+
if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1222+
TCA_NETEM_PAD))
1223+
goto nla_put_failure;
1224+
12061225
return nla_nest_end(skb, nla);
12071226

12081227
nla_put_failure:

0 commit comments

Comments
 (0)