Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sched/sch_netem.c Network emulator
4 *
5 * Many of the algorithms and ideas for this came from
6 * NIST Net which is not copyrighted.
7 *
8 * Authors: Stephen Hemminger <shemminger@osdl.org>
9 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
10 */
11
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <linux/vmalloc.h>
20#include <linux/prandom.h>
21#include <linux/rtnetlink.h>
22#include <linux/reciprocal_div.h>
23#include <linux/rbtree.h>
24
25#include <net/gso.h>
26#include <net/netlink.h>
27#include <net/pkt_sched.h>
28#include <net/inet_ecn.h>
29
30#define VERSION "1.3"
31
32/* Network Emulation Queuing algorithm.
33 ====================================
34
35 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
36 Network Emulation Tool
37 [2] Luigi Rizzo, DummyNet for FreeBSD
38
39 ----------------------------------------------------------------
40
41 This started out as a simple way to delay outgoing packets to
42 test TCP but has grown to include most of the functionality
43 of a full blown network emulator like NISTnet. It can delay
44 packets and add random jitter (and correlation). The random
45 distribution can be loaded from a table as well to provide
46 normal, Pareto, or experimental curves. Packet loss,
47 duplication, and reordering can also be emulated.
48
49 This qdisc does not do classification that can be handled in
50 layering other disciplines. It does not need to do bandwidth
51 control either since that can be handled by using token
52 bucket or other rate control.
53
54 Correlated Loss Generator models
55
56 Added generation of correlated loss according to the
57 "Gilbert-Elliot" model, a 4-state markov model.
58
59 References:
60 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
61 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
62 and intuitive loss model for packet networks and its implementation
63 in the Netem module in the Linux kernel", available in [1]
64
65 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
66 Fabio Ludovici <fabio.ludovici at yahoo.it>
67*/
68
69struct disttable {
70 u32 size;
71 s16 table[] __counted_by(size);
72};
73
74struct netem_sched_data {
75 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
76 struct rb_root t_root;
77
78 /* a linear queue; reduces rbtree rebalancing when jitter is low */
79 struct sk_buff *t_head;
80 struct sk_buff *t_tail;
81
82 u32 t_len;
83
84 /* optional qdisc for classful handling (NULL at netem init) */
85 struct Qdisc *qdisc;
86
87 struct qdisc_watchdog watchdog;
88
89 s64 latency;
90 s64 jitter;
91
92 u32 loss;
93 u32 ecn;
94 u32 limit;
95 u32 counter;
96 u32 gap;
97 u32 duplicate;
98 u32 reorder;
99 u32 corrupt;
100 u64 rate;
101 s32 packet_overhead;
102 u32 cell_size;
103 struct reciprocal_value cell_size_reciprocal;
104 s32 cell_overhead;
105
106 struct crndstate {
107 u32 last;
108 u32 rho;
109 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
110
111 struct prng {
112 u64 seed;
113 struct rnd_state prng_state;
114 } prng;
115
116 struct disttable *delay_dist;
117
118 enum {
119 CLG_RANDOM,
120 CLG_4_STATES,
121 CLG_GILB_ELL,
122 } loss_model;
123
124 enum {
125 TX_IN_GAP_PERIOD = 1,
126 TX_IN_BURST_PERIOD,
127 LOST_IN_GAP_PERIOD,
128 LOST_IN_BURST_PERIOD,
129 } _4_state_model;
130
131 enum {
132 GOOD_STATE = 1,
133 BAD_STATE,
134 } GE_state_model;
135
136 /* Correlated Loss Generation models */
137 struct clgstate {
138 /* state of the Markov chain */
139 u8 state;
140
141 /* 4-states and Gilbert-Elliot models */
142 u32 a1; /* p13 for 4-states or p for GE */
143 u32 a2; /* p31 for 4-states or r for GE */
144 u32 a3; /* p32 for 4-states or h for GE */
145 u32 a4; /* p14 for 4-states or 1-k for GE */
146 u32 a5; /* p23 used only in 4-states */
147 } clg;
148
149 struct tc_netem_slot slot_config;
150 struct slotstate {
151 u64 slot_next;
152 s32 packets_left;
153 s32 bytes_left;
154 } slot;
155
156 struct disttable *slot_dist;
157};
158
159/* Time stamp put into socket buffer control block
160 * Only valid when skbs are in our internal t(ime)fifo queue.
161 *
162 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
163 * and skb->next & skb->prev are scratch space for a qdisc,
164 * we save skb->tstamp value in skb->cb[] before destroying it.
165 */
166struct netem_skb_cb {
167 u64 time_to_send;
168};
169
170static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
171{
172 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
173 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
174 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
175}
176
177/* init_crandom - initialize correlated random number generator
178 * Use entropy source for initial seed.
179 */
180static void init_crandom(struct crndstate *state, unsigned long rho)
181{
182 state->rho = rho;
183 state->last = get_random_u32();
184}
185
186/* get_crandom - correlated random number generator
187 * Next number depends on last value.
188 * rho is scaled to avoid floating point.
189 */
190static u32 get_crandom(struct crndstate *state, struct prng *p)
191{
192 u64 value, rho;
193 unsigned long answer;
194 struct rnd_state *s = &p->prng_state;
195
196 if (!state || state->rho == 0) /* no correlation */
197 return prandom_u32_state(s);
198
199 value = prandom_u32_state(s);
200 rho = (u64)state->rho + 1;
201 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
202 state->last = answer;
203 return answer;
204}
205
206/* loss_4state - 4-state model loss generator
207 * Generates losses according to the 4-state Markov chain adopted in
208 * the GI (General and Intuitive) loss model.
209 */
210static bool loss_4state(struct netem_sched_data *q)
211{
212 struct clgstate *clg = &q->clg;
213 u32 rnd = prandom_u32_state(&q->prng.prng_state);
214
215 /*
216 * Makes a comparison between rnd and the transition
217 * probabilities outgoing from the current state, then decides the
218 * next state and if the next packet has to be transmitted or lost.
219 * The four states correspond to:
220 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
221 * LOST_IN_GAP_PERIOD => isolated losses within a gap period
222 * LOST_IN_BURST_PERIOD => lost packets within a burst period
223 * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
224 */
225 switch (clg->state) {
226 case TX_IN_GAP_PERIOD:
227 if (rnd < clg->a4) {
228 clg->state = LOST_IN_GAP_PERIOD;
229 return true;
230 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
231 clg->state = LOST_IN_BURST_PERIOD;
232 return true;
233 } else if (clg->a1 + clg->a4 < rnd) {
234 clg->state = TX_IN_GAP_PERIOD;
235 }
236
237 break;
238 case TX_IN_BURST_PERIOD:
239 if (rnd < clg->a5) {
240 clg->state = LOST_IN_BURST_PERIOD;
241 return true;
242 } else {
243 clg->state = TX_IN_BURST_PERIOD;
244 }
245
246 break;
247 case LOST_IN_BURST_PERIOD:
248 if (rnd < clg->a3)
249 clg->state = TX_IN_BURST_PERIOD;
250 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
251 clg->state = TX_IN_GAP_PERIOD;
252 } else if (clg->a2 + clg->a3 < rnd) {
253 clg->state = LOST_IN_BURST_PERIOD;
254 return true;
255 }
256 break;
257 case LOST_IN_GAP_PERIOD:
258 clg->state = TX_IN_GAP_PERIOD;
259 break;
260 }
261
262 return false;
263}
264
265/* loss_gilb_ell - Gilbert-Elliot model loss generator
266 * Generates losses according to the Gilbert-Elliot loss model or
267 * its special cases (Gilbert or Simple Gilbert)
268 *
269 * Makes a comparison between random number and the transition
270 * probabilities outgoing from the current state, then decides the
271 * next state. A second random number is extracted and the comparison
272 * with the loss probability of the current state decides if the next
273 * packet will be transmitted or lost.
274 */
275static bool loss_gilb_ell(struct netem_sched_data *q)
276{
277 struct clgstate *clg = &q->clg;
278 struct rnd_state *s = &q->prng.prng_state;
279
280 switch (clg->state) {
281 case GOOD_STATE:
282 if (prandom_u32_state(s) < clg->a1)
283 clg->state = BAD_STATE;
284 if (prandom_u32_state(s) < clg->a4)
285 return true;
286 break;
287 case BAD_STATE:
288 if (prandom_u32_state(s) < clg->a2)
289 clg->state = GOOD_STATE;
290 if (prandom_u32_state(s) > clg->a3)
291 return true;
292 }
293
294 return false;
295}
296
297static bool loss_event(struct netem_sched_data *q)
298{
299 switch (q->loss_model) {
300 case CLG_RANDOM:
301 /* Random packet drop 0 => none, ~0 => all */
302 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
303
304 case CLG_4_STATES:
305 /* 4state loss model algorithm (used also for GI model)
306 * Extracts a value from the markov 4 state loss generator,
307 * if it is 1 drops a packet and if needed writes the event in
308 * the kernel logs
309 */
310 return loss_4state(q);
311
312 case CLG_GILB_ELL:
313 /* Gilbert-Elliot loss model algorithm
314 * Extracts a value from the Gilbert-Elliot loss generator,
315 * if it is 1 drops a packet and if needed writes the event in
316 * the kernel logs
317 */
318 return loss_gilb_ell(q);
319 }
320
321 return false; /* not reached */
322}
323
324
325/* tabledist - return a pseudo-randomly distributed value with mean mu and
326 * std deviation sigma. Uses table lookup to approximate the desired
327 * distribution, and a uniformly-distributed pseudo-random source.
328 */
329static s64 tabledist(s64 mu, s32 sigma,
330 struct crndstate *state,
331 struct prng *prng,
332 const struct disttable *dist)
333{
334 s64 x;
335 long t;
336 u32 rnd;
337
338 if (sigma == 0)
339 return mu;
340
341 rnd = get_crandom(state, prng);
342
343 /* default uniform distribution */
344 if (dist == NULL)
345 return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
346
347 t = dist->table[rnd % dist->size];
348 x = (sigma % NETEM_DIST_SCALE) * t;
349 if (x >= 0)
350 x += NETEM_DIST_SCALE/2;
351 else
352 x -= NETEM_DIST_SCALE/2;
353
354 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
355}
356
357static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
358{
359 len += q->packet_overhead;
360
361 if (q->cell_size) {
362 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
363
364 if (len > cells * q->cell_size) /* extra cell needed for remainder */
365 cells++;
366 len = cells * (q->cell_size + q->cell_overhead);
367 }
368
369 return div64_u64(len * NSEC_PER_SEC, q->rate);
370}
371
372static void tfifo_reset(struct Qdisc *sch)
373{
374 struct netem_sched_data *q = qdisc_priv(sch);
375 struct rb_node *p = rb_first(&q->t_root);
376
377 while (p) {
378 struct sk_buff *skb = rb_to_skb(p);
379
380 p = rb_next(p);
381 rb_erase(&skb->rbnode, &q->t_root);
382 rtnl_kfree_skbs(skb, skb);
383 }
384
385 rtnl_kfree_skbs(q->t_head, q->t_tail);
386 q->t_head = NULL;
387 q->t_tail = NULL;
388 q->t_len = 0;
389}
390
391static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
392{
393 struct netem_sched_data *q = qdisc_priv(sch);
394 u64 tnext = netem_skb_cb(nskb)->time_to_send;
395
396 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
397 if (q->t_tail)
398 q->t_tail->next = nskb;
399 else
400 q->t_head = nskb;
401 q->t_tail = nskb;
402 } else {
403 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
404
405 while (*p) {
406 struct sk_buff *skb;
407
408 parent = *p;
409 skb = rb_to_skb(parent);
410 if (tnext >= netem_skb_cb(skb)->time_to_send)
411 p = &parent->rb_right;
412 else
413 p = &parent->rb_left;
414 }
415 rb_link_node(&nskb->rbnode, parent, p);
416 rb_insert_color(&nskb->rbnode, &q->t_root);
417 }
418 q->t_len++;
419 sch->q.qlen++;
420}
421
422/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
423 * when we statistically choose to corrupt one, we instead segment it, returning
424 * the first packet to be corrupted, and re-enqueue the remaining frames
425 */
426static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
427 struct sk_buff **to_free)
428{
429 struct sk_buff *segs;
430 netdev_features_t features = netif_skb_features(skb);
431
432 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
433
434 if (IS_ERR_OR_NULL(segs)) {
435 qdisc_drop(skb, sch, to_free);
436 return NULL;
437 }
438 consume_skb(skb);
439 return segs;
440}
441
442/*
443 * Insert one skb into qdisc.
444 * Note: parent depends on return value to account for queue length.
445 * NET_XMIT_DROP: queue length didn't change.
446 * NET_XMIT_SUCCESS: one skb was queued.
447 */
448static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
449 struct sk_buff **to_free)
450{
451 struct netem_sched_data *q = qdisc_priv(sch);
452 /* We don't fill cb now as skb_unshare() may invalidate it */
453 struct netem_skb_cb *cb;
454 struct sk_buff *skb2 = NULL;
455 struct sk_buff *segs = NULL;
456 unsigned int prev_len = qdisc_pkt_len(skb);
457 int count = 1;
458
459 /* Do not fool qdisc_drop_all() */
460 skb->prev = NULL;
461
462 /* Random duplication */
463 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
464 ++count;
465
466 /* Drop packet? */
467 if (loss_event(q)) {
468 if (q->ecn && INET_ECN_set_ce(skb))
469 qdisc_qstats_drop(sch); /* mark packet */
470 else
471 --count;
472 }
473 if (count == 0) {
474 qdisc_qstats_drop(sch);
475 __qdisc_drop(skb, to_free);
476 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
477 }
478
479 /* If a delay is expected, orphan the skb. (orphaning usually takes
480 * place at TX completion time, so _before_ the link transit delay)
481 */
482 if (q->latency || q->jitter || q->rate)
483 skb_orphan_partial(skb);
484
485 /*
486 * If we need to duplicate packet, then clone it before
487 * original is modified.
488 */
489 if (count > 1)
490 skb2 = skb_clone(skb, GFP_ATOMIC);
491
492 /*
493 * Randomized packet corruption.
494 * Make copy if needed since we are modifying
495 * If packet is going to be hardware checksummed, then
496 * do it now in software before we mangle it.
497 */
498 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
499 if (skb_is_gso(skb)) {
500 skb = netem_segment(skb, sch, to_free);
501 if (!skb)
502 goto finish_segs;
503
504 segs = skb->next;
505 skb_mark_not_on_list(skb);
506 qdisc_skb_cb(skb)->pkt_len = skb->len;
507 }
508
509 skb = skb_unshare(skb, GFP_ATOMIC);
510 if (unlikely(!skb)) {
511 qdisc_qstats_drop(sch);
512 goto finish_segs;
513 }
514 if (skb->ip_summed == CHECKSUM_PARTIAL &&
515 skb_checksum_help(skb)) {
516 qdisc_drop(skb, sch, to_free);
517 skb = NULL;
518 goto finish_segs;
519 }
520
521 skb->data[get_random_u32_below(skb_headlen(skb))] ^=
522 1<<get_random_u32_below(8);
523 }
524
525 if (unlikely(q->t_len >= sch->limit)) {
526 /* re-link segs, so that qdisc_drop_all() frees them all */
527 skb->next = segs;
528 qdisc_drop_all(skb, sch, to_free);
529 if (skb2)
530 __qdisc_drop(skb2, to_free);
531 return NET_XMIT_DROP;
532 }
533
534 /*
535 * If doing duplication then re-insert at top of the
536 * qdisc tree, since parent queuer expects that only one
537 * skb will be queued.
538 */
539 if (skb2) {
540 struct Qdisc *rootq = qdisc_root_bh(sch);
541 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
542
543 q->duplicate = 0;
544 rootq->enqueue(skb2, rootq, to_free);
545 q->duplicate = dupsave;
546 skb2 = NULL;
547 }
548
549 qdisc_qstats_backlog_inc(sch, skb);
550
551 cb = netem_skb_cb(skb);
552 if (q->gap == 0 || /* not doing reordering */
553 q->counter < q->gap - 1 || /* inside last reordering gap */
554 q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
555 u64 now;
556 s64 delay;
557
558 delay = tabledist(q->latency, q->jitter,
559 &q->delay_cor, &q->prng, q->delay_dist);
560
561 now = ktime_get_ns();
562
563 if (q->rate) {
564 struct netem_skb_cb *last = NULL;
565
566 if (sch->q.tail)
567 last = netem_skb_cb(sch->q.tail);
568 if (q->t_root.rb_node) {
569 struct sk_buff *t_skb;
570 struct netem_skb_cb *t_last;
571
572 t_skb = skb_rb_last(&q->t_root);
573 t_last = netem_skb_cb(t_skb);
574 if (!last ||
575 t_last->time_to_send > last->time_to_send)
576 last = t_last;
577 }
578 if (q->t_tail) {
579 struct netem_skb_cb *t_last =
580 netem_skb_cb(q->t_tail);
581
582 if (!last ||
583 t_last->time_to_send > last->time_to_send)
584 last = t_last;
585 }
586
587 if (last) {
588 /*
589 * Last packet in queue is reference point (now),
590 * calculate this time bonus and subtract
591 * from delay.
592 */
593 delay -= last->time_to_send - now;
594 delay = max_t(s64, 0, delay);
595 now = last->time_to_send;
596 }
597
598 delay += packet_time_ns(qdisc_pkt_len(skb), q);
599 }
600
601 cb->time_to_send = now + delay;
602 ++q->counter;
603 tfifo_enqueue(skb, sch);
604 } else {
605 /*
606 * Do re-ordering by putting one out of N packets at the front
607 * of the queue.
608 */
609 cb->time_to_send = ktime_get_ns();
610 q->counter = 0;
611
612 __qdisc_enqueue_head(skb, &sch->q);
613 sch->qstats.requeues++;
614 }
615
616finish_segs:
617 if (skb2)
618 __qdisc_drop(skb2, to_free);
619
620 if (segs) {
621 unsigned int len, last_len;
622 int rc, nb;
623
624 len = skb ? skb->len : 0;
625 nb = skb ? 1 : 0;
626
627 while (segs) {
628 skb2 = segs->next;
629 skb_mark_not_on_list(segs);
630 qdisc_skb_cb(segs)->pkt_len = segs->len;
631 last_len = segs->len;
632 rc = qdisc_enqueue(segs, sch, to_free);
633 if (rc != NET_XMIT_SUCCESS) {
634 if (net_xmit_drop_count(rc))
635 qdisc_qstats_drop(sch);
636 } else {
637 nb++;
638 len += last_len;
639 }
640 segs = skb2;
641 }
642 /* Parent qdiscs accounted for 1 skb of size @prev_len */
643 qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
644 } else if (!skb) {
645 return NET_XMIT_DROP;
646 }
647 return NET_XMIT_SUCCESS;
648}
649
650/* Delay the next round with a new future slot with a
651 * correct number of bytes and packets.
652 */
653
654static void get_slot_next(struct netem_sched_data *q, u64 now)
655{
656 s64 next_delay;
657
658 if (!q->slot_dist)
659 next_delay = q->slot_config.min_delay +
660 (get_random_u32() *
661 (q->slot_config.max_delay -
662 q->slot_config.min_delay) >> 32);
663 else
664 next_delay = tabledist(q->slot_config.dist_delay,
665 (s32)(q->slot_config.dist_jitter),
666 NULL, &q->prng, q->slot_dist);
667
668 q->slot.slot_next = now + next_delay;
669 q->slot.packets_left = q->slot_config.max_packets;
670 q->slot.bytes_left = q->slot_config.max_bytes;
671}
672
673static struct sk_buff *netem_peek(struct netem_sched_data *q)
674{
675 struct sk_buff *skb = skb_rb_first(&q->t_root);
676 u64 t1, t2;
677
678 if (!skb)
679 return q->t_head;
680 if (!q->t_head)
681 return skb;
682
683 t1 = netem_skb_cb(skb)->time_to_send;
684 t2 = netem_skb_cb(q->t_head)->time_to_send;
685 if (t1 < t2)
686 return skb;
687 return q->t_head;
688}
689
690static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
691{
692 if (skb == q->t_head) {
693 q->t_head = skb->next;
694 if (!q->t_head)
695 q->t_tail = NULL;
696 } else {
697 rb_erase(&skb->rbnode, &q->t_root);
698 }
699}
700
701static struct sk_buff *netem_dequeue(struct Qdisc *sch)
702{
703 struct netem_sched_data *q = qdisc_priv(sch);
704 struct sk_buff *skb;
705
706tfifo_dequeue:
707 skb = __qdisc_dequeue_head(&sch->q);
708 if (skb) {
709deliver:
710 qdisc_qstats_backlog_dec(sch, skb);
711 qdisc_bstats_update(sch, skb);
712 return skb;
713 }
714 skb = netem_peek(q);
715 if (skb) {
716 u64 time_to_send;
717 u64 now = ktime_get_ns();
718
719 /* if more time remaining? */
720 time_to_send = netem_skb_cb(skb)->time_to_send;
721 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
722 get_slot_next(q, now);
723
724 if (time_to_send <= now && q->slot.slot_next <= now) {
725 netem_erase_head(q, skb);
726 q->t_len--;
727 skb->next = NULL;
728 skb->prev = NULL;
729 /* skb->dev shares skb->rbnode area,
730 * we need to restore its value.
731 */
732 skb->dev = qdisc_dev(sch);
733
734 if (q->slot.slot_next) {
735 q->slot.packets_left--;
736 q->slot.bytes_left -= qdisc_pkt_len(skb);
737 if (q->slot.packets_left <= 0 ||
738 q->slot.bytes_left <= 0)
739 get_slot_next(q, now);
740 }
741
742 if (q->qdisc) {
743 unsigned int pkt_len = qdisc_pkt_len(skb);
744 struct sk_buff *to_free = NULL;
745 int err;
746
747 err = qdisc_enqueue(skb, q->qdisc, &to_free);
748 kfree_skb_list(to_free);
749 if (err != NET_XMIT_SUCCESS) {
750 if (net_xmit_drop_count(err))
751 qdisc_qstats_drop(sch);
752 sch->qstats.backlog -= pkt_len;
753 sch->q.qlen--;
754 qdisc_tree_reduce_backlog(sch, 1, pkt_len);
755 }
756 goto tfifo_dequeue;
757 }
758 sch->q.qlen--;
759 goto deliver;
760 }
761
762 if (q->qdisc) {
763 skb = q->qdisc->ops->dequeue(q->qdisc);
764 if (skb) {
765 sch->q.qlen--;
766 goto deliver;
767 }
768 }
769
770 qdisc_watchdog_schedule_ns(&q->watchdog,
771 max(time_to_send,
772 q->slot.slot_next));
773 }
774
775 if (q->qdisc) {
776 skb = q->qdisc->ops->dequeue(q->qdisc);
777 if (skb) {
778 sch->q.qlen--;
779 goto deliver;
780 }
781 }
782 return NULL;
783}
784
785static void netem_reset(struct Qdisc *sch)
786{
787 struct netem_sched_data *q = qdisc_priv(sch);
788
789 qdisc_reset_queue(sch);
790 tfifo_reset(sch);
791 if (q->qdisc)
792 qdisc_reset(q->qdisc);
793 qdisc_watchdog_cancel(&q->watchdog);
794}
795
796static void dist_free(struct disttable *d)
797{
798 kvfree(d);
799}
800
801/*
802 * Distribution data is a variable size payload containing
803 * signed 16 bit values.
804 */
805
806static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
807{
808 size_t n = nla_len(attr)/sizeof(__s16);
809 const __s16 *data = nla_data(attr);
810 struct disttable *d;
811 int i;
812
813 if (!n || n > NETEM_DIST_MAX)
814 return -EINVAL;
815
816 d = kvmalloc(struct_size(d, table, n), GFP_KERNEL);
817 if (!d)
818 return -ENOMEM;
819
820 d->size = n;
821 for (i = 0; i < n; i++)
822 d->table[i] = data[i];
823
824 *tbl = d;
825 return 0;
826}
827
828static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
829{
830 const struct tc_netem_slot *c = nla_data(attr);
831
832 q->slot_config = *c;
833 if (q->slot_config.max_packets == 0)
834 q->slot_config.max_packets = INT_MAX;
835 if (q->slot_config.max_bytes == 0)
836 q->slot_config.max_bytes = INT_MAX;
837
838 /* capping dist_jitter to the range acceptable by tabledist() */
839 q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
840
841 q->slot.packets_left = q->slot_config.max_packets;
842 q->slot.bytes_left = q->slot_config.max_bytes;
843 if (q->slot_config.min_delay | q->slot_config.max_delay |
844 q->slot_config.dist_jitter)
845 q->slot.slot_next = ktime_get_ns();
846 else
847 q->slot.slot_next = 0;
848}
849
850static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
851{
852 const struct tc_netem_corr *c = nla_data(attr);
853
854 init_crandom(&q->delay_cor, c->delay_corr);
855 init_crandom(&q->loss_cor, c->loss_corr);
856 init_crandom(&q->dup_cor, c->dup_corr);
857}
858
859static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
860{
861 const struct tc_netem_reorder *r = nla_data(attr);
862
863 q->reorder = r->probability;
864 init_crandom(&q->reorder_cor, r->correlation);
865}
866
867static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
868{
869 const struct tc_netem_corrupt *r = nla_data(attr);
870
871 q->corrupt = r->probability;
872 init_crandom(&q->corrupt_cor, r->correlation);
873}
874
875static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
876{
877 const struct tc_netem_rate *r = nla_data(attr);
878
879 q->rate = r->rate;
880 q->packet_overhead = r->packet_overhead;
881 q->cell_size = r->cell_size;
882 q->cell_overhead = r->cell_overhead;
883 if (q->cell_size)
884 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
885 else
886 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
887}
888
889static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
890{
891 const struct nlattr *la;
892 int rem;
893
894 nla_for_each_nested(la, attr, rem) {
895 u16 type = nla_type(la);
896
897 switch (type) {
898 case NETEM_LOSS_GI: {
899 const struct tc_netem_gimodel *gi = nla_data(la);
900
901 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
902 pr_info("netem: incorrect gi model size\n");
903 return -EINVAL;
904 }
905
906 q->loss_model = CLG_4_STATES;
907
908 q->clg.state = TX_IN_GAP_PERIOD;
909 q->clg.a1 = gi->p13;
910 q->clg.a2 = gi->p31;
911 q->clg.a3 = gi->p32;
912 q->clg.a4 = gi->p14;
913 q->clg.a5 = gi->p23;
914 break;
915 }
916
917 case NETEM_LOSS_GE: {
918 const struct tc_netem_gemodel *ge = nla_data(la);
919
920 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
921 pr_info("netem: incorrect ge model size\n");
922 return -EINVAL;
923 }
924
925 q->loss_model = CLG_GILB_ELL;
926 q->clg.state = GOOD_STATE;
927 q->clg.a1 = ge->p;
928 q->clg.a2 = ge->r;
929 q->clg.a3 = ge->h;
930 q->clg.a4 = ge->k1;
931 break;
932 }
933
934 default:
935 pr_info("netem: unknown loss type %u\n", type);
936 return -EINVAL;
937 }
938 }
939
940 return 0;
941}
942
943static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
944 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
945 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
946 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
947 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
948 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
949 [TCA_NETEM_ECN] = { .type = NLA_U32 },
950 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
951 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
952 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
953 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
954 [TCA_NETEM_PRNG_SEED] = { .type = NLA_U64 },
955};
956
957static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
958 const struct nla_policy *policy, int len)
959{
960 int nested_len = nla_len(nla) - NLA_ALIGN(len);
961
962 if (nested_len < 0) {
963 pr_info("netem: invalid attributes len %d\n", nested_len);
964 return -EINVAL;
965 }
966
967 if (nested_len >= nla_attr_size(0))
968 return nla_parse_deprecated(tb, maxtype,
969 nla_data(nla) + NLA_ALIGN(len),
970 nested_len, policy, NULL);
971
972 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
973 return 0;
974}
975
976/* Parse netlink message to set options */
977static int netem_change(struct Qdisc *sch, struct nlattr *opt,
978 struct netlink_ext_ack *extack)
979{
980 struct netem_sched_data *q = qdisc_priv(sch);
981 struct nlattr *tb[TCA_NETEM_MAX + 1];
982 struct disttable *delay_dist = NULL;
983 struct disttable *slot_dist = NULL;
984 struct tc_netem_qopt *qopt;
985 struct clgstate old_clg;
986 int old_loss_model = CLG_RANDOM;
987 int ret;
988
989 qopt = nla_data(opt);
990 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
991 if (ret < 0)
992 return ret;
993
994 if (tb[TCA_NETEM_DELAY_DIST]) {
995 ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
996 if (ret)
997 goto table_free;
998 }
999
1000 if (tb[TCA_NETEM_SLOT_DIST]) {
1001 ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
1002 if (ret)
1003 goto table_free;
1004 }
1005
1006 sch_tree_lock(sch);
1007 /* backup q->clg and q->loss_model */
1008 old_clg = q->clg;
1009 old_loss_model = q->loss_model;
1010
1011 if (tb[TCA_NETEM_LOSS]) {
1012 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
1013 if (ret) {
1014 q->loss_model = old_loss_model;
1015 q->clg = old_clg;
1016 goto unlock;
1017 }
1018 } else {
1019 q->loss_model = CLG_RANDOM;
1020 }
1021
1022 if (delay_dist)
1023 swap(q->delay_dist, delay_dist);
1024 if (slot_dist)
1025 swap(q->slot_dist, slot_dist);
1026 sch->limit = qopt->limit;
1027
1028 q->latency = PSCHED_TICKS2NS(qopt->latency);
1029 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1030 q->limit = qopt->limit;
1031 q->gap = qopt->gap;
1032 q->counter = 0;
1033 q->loss = qopt->loss;
1034 q->duplicate = qopt->duplicate;
1035
1036 /* for compatibility with earlier versions.
1037 * if gap is set, need to assume 100% probability
1038 */
1039 if (q->gap)
1040 q->reorder = ~0;
1041
1042 if (tb[TCA_NETEM_CORR])
1043 get_correlation(q, tb[TCA_NETEM_CORR]);
1044
1045 if (tb[TCA_NETEM_REORDER])
1046 get_reorder(q, tb[TCA_NETEM_REORDER]);
1047
1048 if (tb[TCA_NETEM_CORRUPT])
1049 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1050
1051 if (tb[TCA_NETEM_RATE])
1052 get_rate(q, tb[TCA_NETEM_RATE]);
1053
1054 if (tb[TCA_NETEM_RATE64])
1055 q->rate = max_t(u64, q->rate,
1056 nla_get_u64(tb[TCA_NETEM_RATE64]));
1057
1058 if (tb[TCA_NETEM_LATENCY64])
1059 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1060
1061 if (tb[TCA_NETEM_JITTER64])
1062 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1063
1064 if (tb[TCA_NETEM_ECN])
1065 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1066
1067 if (tb[TCA_NETEM_SLOT])
1068 get_slot(q, tb[TCA_NETEM_SLOT]);
1069
1070 /* capping jitter to the range acceptable by tabledist() */
1071 q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1072
1073 if (tb[TCA_NETEM_PRNG_SEED])
1074 q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1075 else
1076 q->prng.seed = get_random_u64();
1077 prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1078
1079unlock:
1080 sch_tree_unlock(sch);
1081
1082table_free:
1083 dist_free(delay_dist);
1084 dist_free(slot_dist);
1085 return ret;
1086}
1087
1088static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1089 struct netlink_ext_ack *extack)
1090{
1091 struct netem_sched_data *q = qdisc_priv(sch);
1092 int ret;
1093
1094 qdisc_watchdog_init(&q->watchdog, sch);
1095
1096 if (!opt)
1097 return -EINVAL;
1098
1099 q->loss_model = CLG_RANDOM;
1100 ret = netem_change(sch, opt, extack);
1101 if (ret)
1102 pr_info("netem: change failed\n");
1103 return ret;
1104}
1105
1106static void netem_destroy(struct Qdisc *sch)
1107{
1108 struct netem_sched_data *q = qdisc_priv(sch);
1109
1110 qdisc_watchdog_cancel(&q->watchdog);
1111 if (q->qdisc)
1112 qdisc_put(q->qdisc);
1113 dist_free(q->delay_dist);
1114 dist_free(q->slot_dist);
1115}
1116
1117static int dump_loss_model(const struct netem_sched_data *q,
1118 struct sk_buff *skb)
1119{
1120 struct nlattr *nest;
1121
1122 nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1123 if (nest == NULL)
1124 goto nla_put_failure;
1125
1126 switch (q->loss_model) {
1127 case CLG_RANDOM:
1128 /* legacy loss model */
1129 nla_nest_cancel(skb, nest);
1130 return 0; /* no data */
1131
1132 case CLG_4_STATES: {
1133 struct tc_netem_gimodel gi = {
1134 .p13 = q->clg.a1,
1135 .p31 = q->clg.a2,
1136 .p32 = q->clg.a3,
1137 .p14 = q->clg.a4,
1138 .p23 = q->clg.a5,
1139 };
1140
1141 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1142 goto nla_put_failure;
1143 break;
1144 }
1145 case CLG_GILB_ELL: {
1146 struct tc_netem_gemodel ge = {
1147 .p = q->clg.a1,
1148 .r = q->clg.a2,
1149 .h = q->clg.a3,
1150 .k1 = q->clg.a4,
1151 };
1152
1153 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1154 goto nla_put_failure;
1155 break;
1156 }
1157 }
1158
1159 nla_nest_end(skb, nest);
1160 return 0;
1161
1162nla_put_failure:
1163 nla_nest_cancel(skb, nest);
1164 return -1;
1165}
1166
1167static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1168{
1169 const struct netem_sched_data *q = qdisc_priv(sch);
1170 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1171 struct tc_netem_qopt qopt;
1172 struct tc_netem_corr cor;
1173 struct tc_netem_reorder reorder;
1174 struct tc_netem_corrupt corrupt;
1175 struct tc_netem_rate rate;
1176 struct tc_netem_slot slot;
1177
1178 qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1179 UINT_MAX);
1180 qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1181 UINT_MAX);
1182 qopt.limit = q->limit;
1183 qopt.loss = q->loss;
1184 qopt.gap = q->gap;
1185 qopt.duplicate = q->duplicate;
1186 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1187 goto nla_put_failure;
1188
1189 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1190 goto nla_put_failure;
1191
1192 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1193 goto nla_put_failure;
1194
1195 cor.delay_corr = q->delay_cor.rho;
1196 cor.loss_corr = q->loss_cor.rho;
1197 cor.dup_corr = q->dup_cor.rho;
1198 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1199 goto nla_put_failure;
1200
1201 reorder.probability = q->reorder;
1202 reorder.correlation = q->reorder_cor.rho;
1203 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1204 goto nla_put_failure;
1205
1206 corrupt.probability = q->corrupt;
1207 corrupt.correlation = q->corrupt_cor.rho;
1208 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1209 goto nla_put_failure;
1210
1211 if (q->rate >= (1ULL << 32)) {
1212 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1213 TCA_NETEM_PAD))
1214 goto nla_put_failure;
1215 rate.rate = ~0U;
1216 } else {
1217 rate.rate = q->rate;
1218 }
1219 rate.packet_overhead = q->packet_overhead;
1220 rate.cell_size = q->cell_size;
1221 rate.cell_overhead = q->cell_overhead;
1222 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1223 goto nla_put_failure;
1224
1225 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1226 goto nla_put_failure;
1227
1228 if (dump_loss_model(q, skb) != 0)
1229 goto nla_put_failure;
1230
1231 if (q->slot_config.min_delay | q->slot_config.max_delay |
1232 q->slot_config.dist_jitter) {
1233 slot = q->slot_config;
1234 if (slot.max_packets == INT_MAX)
1235 slot.max_packets = 0;
1236 if (slot.max_bytes == INT_MAX)
1237 slot.max_bytes = 0;
1238 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1239 goto nla_put_failure;
1240 }
1241
1242 if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1243 TCA_NETEM_PAD))
1244 goto nla_put_failure;
1245
1246 return nla_nest_end(skb, nla);
1247
1248nla_put_failure:
1249 nlmsg_trim(skb, nla);
1250 return -1;
1251}
1252
1253static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1254 struct sk_buff *skb, struct tcmsg *tcm)
1255{
1256 struct netem_sched_data *q = qdisc_priv(sch);
1257
1258 if (cl != 1 || !q->qdisc) /* only one class */
1259 return -ENOENT;
1260
1261 tcm->tcm_handle |= TC_H_MIN(1);
1262 tcm->tcm_info = q->qdisc->handle;
1263
1264 return 0;
1265}
1266
1267static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1268 struct Qdisc **old, struct netlink_ext_ack *extack)
1269{
1270 struct netem_sched_data *q = qdisc_priv(sch);
1271
1272 *old = qdisc_replace(sch, new, &q->qdisc);
1273 return 0;
1274}
1275
1276static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1277{
1278 struct netem_sched_data *q = qdisc_priv(sch);
1279 return q->qdisc;
1280}
1281
1282static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1283{
1284 return 1;
1285}
1286
1287static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1288{
1289 if (!walker->stop) {
1290 if (!tc_qdisc_stats_dump(sch, 1, walker))
1291 return;
1292 }
1293}
1294
1295static const struct Qdisc_class_ops netem_class_ops = {
1296 .graft = netem_graft,
1297 .leaf = netem_leaf,
1298 .find = netem_find,
1299 .walk = netem_walk,
1300 .dump = netem_dump_class,
1301};
1302
1303static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1304 .id = "netem",
1305 .cl_ops = &netem_class_ops,
1306 .priv_size = sizeof(struct netem_sched_data),
1307 .enqueue = netem_enqueue,
1308 .dequeue = netem_dequeue,
1309 .peek = qdisc_peek_dequeued,
1310 .init = netem_init,
1311 .reset = netem_reset,
1312 .destroy = netem_destroy,
1313 .change = netem_change,
1314 .dump = netem_dump,
1315 .owner = THIS_MODULE,
1316};
1317MODULE_ALIAS_NET_SCH("netem");
1318
1319
1320static int __init netem_module_init(void)
1321{
1322 pr_info("netem: version " VERSION "\n");
1323 return register_qdisc(&netem_qdisc_ops);
1324}
1325static void __exit netem_module_exit(void)
1326{
1327 unregister_qdisc(&netem_qdisc_ops);
1328}
1329module_init(netem_module_init)
1330module_exit(netem_module_exit)
1331MODULE_LICENSE("GPL");
1332MODULE_DESCRIPTION("Network characteristics emulator qdisc");
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/skbuff.h>
23#include <linux/vmalloc.h>
24#include <linux/rtnetlink.h>
25#include <linux/reciprocal_div.h>
26#include <linux/rbtree.h>
27
28#include <net/netlink.h>
29#include <net/pkt_sched.h>
30#include <net/inet_ecn.h>
31
32#define VERSION "1.3"
33
34/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
55
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
69*/
70
71struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
74
75 /* optional qdisc for classful handling (NULL at netem init) */
76 struct Qdisc *qdisc;
77
78 struct qdisc_watchdog watchdog;
79
80 s64 latency;
81 s64 jitter;
82
83 u32 loss;
84 u32 ecn;
85 u32 limit;
86 u32 counter;
87 u32 gap;
88 u32 duplicate;
89 u32 reorder;
90 u32 corrupt;
91 u64 rate;
92 s32 packet_overhead;
93 u32 cell_size;
94 struct reciprocal_value cell_size_reciprocal;
95 s32 cell_overhead;
96
97 struct crndstate {
98 u32 last;
99 u32 rho;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
138 struct tc_netem_slot slot_config;
139 struct slotstate {
140 u64 slot_next;
141 s32 packets_left;
142 s32 bytes_left;
143 } slot;
144
145};
146
147/* Time stamp put into socket buffer control block
148 * Only valid when skbs are in our internal t(ime)fifo queue.
149 *
150 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
151 * and skb->next & skb->prev are scratch space for a qdisc,
152 * we save skb->tstamp value in skb->cb[] before destroying it.
153 */
154struct netem_skb_cb {
155 u64 time_to_send;
156};
157
158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159{
160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
163}
164
165/* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
167 */
168static void init_crandom(struct crndstate *state, unsigned long rho)
169{
170 state->rho = rho;
171 state->last = prandom_u32();
172}
173
174/* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
177 */
178static u32 get_crandom(struct crndstate *state)
179{
180 u64 value, rho;
181 unsigned long answer;
182
183 if (state->rho == 0) /* no correlation */
184 return prandom_u32();
185
186 value = prandom_u32();
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191}
192
193/* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
196 */
197static bool loss_4state(struct netem_sched_data *q)
198{
199 struct clgstate *clg = &q->clg;
200 u32 rnd = prandom_u32();
201
202 /*
203 * Makes a comparison between rnd and the transition
204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
211 */
212 switch (clg->state) {
213 case TX_IN_GAP_PERIOD:
214 if (rnd < clg->a4) {
215 clg->state = LOST_IN_BURST_PERIOD;
216 return true;
217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
218 clg->state = LOST_IN_GAP_PERIOD;
219 return true;
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
223
224 break;
225 case TX_IN_BURST_PERIOD:
226 if (rnd < clg->a5) {
227 clg->state = LOST_IN_GAP_PERIOD;
228 return true;
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
232
233 break;
234 case LOST_IN_GAP_PERIOD:
235 if (rnd < clg->a3)
236 clg->state = TX_IN_BURST_PERIOD;
237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
238 clg->state = TX_IN_GAP_PERIOD;
239 } else if (clg->a2 + clg->a3 < rnd) {
240 clg->state = LOST_IN_GAP_PERIOD;
241 return true;
242 }
243 break;
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
246 break;
247 }
248
249 return false;
250}
251
252/* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262static bool loss_gilb_ell(struct netem_sched_data *q)
263{
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
267 case GOOD_STATE:
268 if (prandom_u32() < clg->a1)
269 clg->state = BAD_STATE;
270 if (prandom_u32() < clg->a4)
271 return true;
272 break;
273 case BAD_STATE:
274 if (prandom_u32() < clg->a2)
275 clg->state = GOOD_STATE;
276 if (prandom_u32() > clg->a3)
277 return true;
278 }
279
280 return false;
281}
282
283static bool loss_event(struct netem_sched_data *q)
284{
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
295 */
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
303 */
304 return loss_gilb_ell(q);
305 }
306
307 return false; /* not reached */
308}
309
310
311/* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
314 */
315static s64 tabledist(s64 mu, s32 sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
318{
319 s64 x;
320 long t;
321 u32 rnd;
322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328 /* default uniform distribution */
329 if (dist == NULL)
330 return ((rnd % (2 * sigma)) + mu) - sigma;
331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340}
341
342static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
343{
344 len += q->packet_overhead;
345
346 if (q->cell_size) {
347 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
348
349 if (len > cells * q->cell_size) /* extra cell needed for remainder */
350 cells++;
351 len = cells * (q->cell_size + q->cell_overhead);
352 }
353
354 return div64_u64(len * NSEC_PER_SEC, q->rate);
355}
356
357static void tfifo_reset(struct Qdisc *sch)
358{
359 struct netem_sched_data *q = qdisc_priv(sch);
360 struct rb_node *p = rb_first(&q->t_root);
361
362 while (p) {
363 struct sk_buff *skb = rb_to_skb(p);
364
365 p = rb_next(p);
366 rb_erase(&skb->rbnode, &q->t_root);
367 rtnl_kfree_skbs(skb, skb);
368 }
369}
370
371static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
372{
373 struct netem_sched_data *q = qdisc_priv(sch);
374 u64 tnext = netem_skb_cb(nskb)->time_to_send;
375 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
376
377 while (*p) {
378 struct sk_buff *skb;
379
380 parent = *p;
381 skb = rb_to_skb(parent);
382 if (tnext >= netem_skb_cb(skb)->time_to_send)
383 p = &parent->rb_right;
384 else
385 p = &parent->rb_left;
386 }
387 rb_link_node(&nskb->rbnode, parent, p);
388 rb_insert_color(&nskb->rbnode, &q->t_root);
389 sch->q.qlen++;
390}
391
392/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
393 * when we statistically choose to corrupt one, we instead segment it, returning
394 * the first packet to be corrupted, and re-enqueue the remaining frames
395 */
396static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
397 struct sk_buff **to_free)
398{
399 struct sk_buff *segs;
400 netdev_features_t features = netif_skb_features(skb);
401
402 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
403
404 if (IS_ERR_OR_NULL(segs)) {
405 qdisc_drop(skb, sch, to_free);
406 return NULL;
407 }
408 consume_skb(skb);
409 return segs;
410}
411
412static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
413{
414 skb->next = qh->head;
415
416 if (!qh->head)
417 qh->tail = skb;
418 qh->head = skb;
419 qh->qlen++;
420}
421
422/*
423 * Insert one skb into qdisc.
424 * Note: parent depends on return value to account for queue length.
425 * NET_XMIT_DROP: queue length didn't change.
426 * NET_XMIT_SUCCESS: one skb was queued.
427 */
428static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
429 struct sk_buff **to_free)
430{
431 struct netem_sched_data *q = qdisc_priv(sch);
432 /* We don't fill cb now as skb_unshare() may invalidate it */
433 struct netem_skb_cb *cb;
434 struct sk_buff *skb2;
435 struct sk_buff *segs = NULL;
436 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
437 int nb = 0;
438 int count = 1;
439 int rc = NET_XMIT_SUCCESS;
440
441 /* Random duplication */
442 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
443 ++count;
444
445 /* Drop packet? */
446 if (loss_event(q)) {
447 if (q->ecn && INET_ECN_set_ce(skb))
448 qdisc_qstats_drop(sch); /* mark packet */
449 else
450 --count;
451 }
452 if (count == 0) {
453 qdisc_qstats_drop(sch);
454 __qdisc_drop(skb, to_free);
455 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
456 }
457
458 /* If a delay is expected, orphan the skb. (orphaning usually takes
459 * place at TX completion time, so _before_ the link transit delay)
460 */
461 if (q->latency || q->jitter || q->rate)
462 skb_orphan_partial(skb);
463
464 /*
465 * If we need to duplicate packet, then re-insert at top of the
466 * qdisc tree, since parent queuer expects that only one
467 * skb will be queued.
468 */
469 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
470 struct Qdisc *rootq = qdisc_root(sch);
471 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
472
473 q->duplicate = 0;
474 rootq->enqueue(skb2, rootq, to_free);
475 q->duplicate = dupsave;
476 }
477
478 /*
479 * Randomized packet corruption.
480 * Make copy if needed since we are modifying
481 * If packet is going to be hardware checksummed, then
482 * do it now in software before we mangle it.
483 */
484 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
485 if (skb_is_gso(skb)) {
486 segs = netem_segment(skb, sch, to_free);
487 if (!segs)
488 return NET_XMIT_DROP;
489 } else {
490 segs = skb;
491 }
492
493 skb = segs;
494 segs = segs->next;
495
496 skb = skb_unshare(skb, GFP_ATOMIC);
497 if (unlikely(!skb)) {
498 qdisc_qstats_drop(sch);
499 goto finish_segs;
500 }
501 if (skb->ip_summed == CHECKSUM_PARTIAL &&
502 skb_checksum_help(skb)) {
503 qdisc_drop(skb, sch, to_free);
504 goto finish_segs;
505 }
506
507 skb->data[prandom_u32() % skb_headlen(skb)] ^=
508 1<<(prandom_u32() % 8);
509 }
510
511 if (unlikely(sch->q.qlen >= sch->limit))
512 return qdisc_drop_all(skb, sch, to_free);
513
514 qdisc_qstats_backlog_inc(sch, skb);
515
516 cb = netem_skb_cb(skb);
517 if (q->gap == 0 || /* not doing reordering */
518 q->counter < q->gap - 1 || /* inside last reordering gap */
519 q->reorder < get_crandom(&q->reorder_cor)) {
520 u64 now;
521 s64 delay;
522
523 delay = tabledist(q->latency, q->jitter,
524 &q->delay_cor, q->delay_dist);
525
526 now = ktime_get_ns();
527
528 if (q->rate) {
529 struct netem_skb_cb *last = NULL;
530
531 if (sch->q.tail)
532 last = netem_skb_cb(sch->q.tail);
533 if (q->t_root.rb_node) {
534 struct sk_buff *t_skb;
535 struct netem_skb_cb *t_last;
536
537 t_skb = skb_rb_last(&q->t_root);
538 t_last = netem_skb_cb(t_skb);
539 if (!last ||
540 t_last->time_to_send > last->time_to_send) {
541 last = t_last;
542 }
543 }
544
545 if (last) {
546 /*
547 * Last packet in queue is reference point (now),
548 * calculate this time bonus and subtract
549 * from delay.
550 */
551 delay -= last->time_to_send - now;
552 delay = max_t(s64, 0, delay);
553 now = last->time_to_send;
554 }
555
556 delay += packet_time_ns(qdisc_pkt_len(skb), q);
557 }
558
559 cb->time_to_send = now + delay;
560 ++q->counter;
561 tfifo_enqueue(skb, sch);
562 } else {
563 /*
564 * Do re-ordering by putting one out of N packets at the front
565 * of the queue.
566 */
567 cb->time_to_send = ktime_get_ns();
568 q->counter = 0;
569
570 netem_enqueue_skb_head(&sch->q, skb);
571 sch->qstats.requeues++;
572 }
573
574finish_segs:
575 if (segs) {
576 while (segs) {
577 skb2 = segs->next;
578 segs->next = NULL;
579 qdisc_skb_cb(segs)->pkt_len = segs->len;
580 last_len = segs->len;
581 rc = qdisc_enqueue(segs, sch, to_free);
582 if (rc != NET_XMIT_SUCCESS) {
583 if (net_xmit_drop_count(rc))
584 qdisc_qstats_drop(sch);
585 } else {
586 nb++;
587 len += last_len;
588 }
589 segs = skb2;
590 }
591 sch->q.qlen += nb;
592 if (nb > 1)
593 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
594 }
595 return NET_XMIT_SUCCESS;
596}
597
598/* Delay the next round with a new future slot with a
599 * correct number of bytes and packets.
600 */
601
602static void get_slot_next(struct netem_sched_data *q, u64 now)
603{
604 q->slot.slot_next = now + q->slot_config.min_delay +
605 (prandom_u32() *
606 (q->slot_config.max_delay -
607 q->slot_config.min_delay) >> 32);
608 q->slot.packets_left = q->slot_config.max_packets;
609 q->slot.bytes_left = q->slot_config.max_bytes;
610}
611
612static struct sk_buff *netem_dequeue(struct Qdisc *sch)
613{
614 struct netem_sched_data *q = qdisc_priv(sch);
615 struct sk_buff *skb;
616 struct rb_node *p;
617
618tfifo_dequeue:
619 skb = __qdisc_dequeue_head(&sch->q);
620 if (skb) {
621 qdisc_qstats_backlog_dec(sch, skb);
622deliver:
623 qdisc_bstats_update(sch, skb);
624 return skb;
625 }
626 p = rb_first(&q->t_root);
627 if (p) {
628 u64 time_to_send;
629 u64 now = ktime_get_ns();
630
631 skb = rb_to_skb(p);
632
633 /* if more time remaining? */
634 time_to_send = netem_skb_cb(skb)->time_to_send;
635 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
636 get_slot_next(q, now);
637
638 if (time_to_send <= now && q->slot.slot_next <= now) {
639 rb_erase(p, &q->t_root);
640 sch->q.qlen--;
641 qdisc_qstats_backlog_dec(sch, skb);
642 skb->next = NULL;
643 skb->prev = NULL;
644 /* skb->dev shares skb->rbnode area,
645 * we need to restore its value.
646 */
647 skb->dev = qdisc_dev(sch);
648
649#ifdef CONFIG_NET_CLS_ACT
650 /*
651 * If it's at ingress let's pretend the delay is
652 * from the network (tstamp will be updated).
653 */
654 if (skb->tc_redirected && skb->tc_from_ingress)
655 skb->tstamp = 0;
656#endif
657
658 if (q->slot.slot_next) {
659 q->slot.packets_left--;
660 q->slot.bytes_left -= qdisc_pkt_len(skb);
661 if (q->slot.packets_left <= 0 ||
662 q->slot.bytes_left <= 0)
663 get_slot_next(q, now);
664 }
665
666 if (q->qdisc) {
667 unsigned int pkt_len = qdisc_pkt_len(skb);
668 struct sk_buff *to_free = NULL;
669 int err;
670
671 err = qdisc_enqueue(skb, q->qdisc, &to_free);
672 kfree_skb_list(to_free);
673 if (err != NET_XMIT_SUCCESS &&
674 net_xmit_drop_count(err)) {
675 qdisc_qstats_drop(sch);
676 qdisc_tree_reduce_backlog(sch, 1,
677 pkt_len);
678 }
679 goto tfifo_dequeue;
680 }
681 goto deliver;
682 }
683
684 if (q->qdisc) {
685 skb = q->qdisc->ops->dequeue(q->qdisc);
686 if (skb)
687 goto deliver;
688 }
689
690 qdisc_watchdog_schedule_ns(&q->watchdog,
691 max(time_to_send,
692 q->slot.slot_next));
693 }
694
695 if (q->qdisc) {
696 skb = q->qdisc->ops->dequeue(q->qdisc);
697 if (skb)
698 goto deliver;
699 }
700 return NULL;
701}
702
703static void netem_reset(struct Qdisc *sch)
704{
705 struct netem_sched_data *q = qdisc_priv(sch);
706
707 qdisc_reset_queue(sch);
708 tfifo_reset(sch);
709 if (q->qdisc)
710 qdisc_reset(q->qdisc);
711 qdisc_watchdog_cancel(&q->watchdog);
712}
713
714static void dist_free(struct disttable *d)
715{
716 kvfree(d);
717}
718
719/*
720 * Distribution data is a variable size payload containing
721 * signed 16 bit values.
722 */
723
724static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
725{
726 struct netem_sched_data *q = qdisc_priv(sch);
727 size_t n = nla_len(attr)/sizeof(__s16);
728 const __s16 *data = nla_data(attr);
729 spinlock_t *root_lock;
730 struct disttable *d;
731 int i;
732
733 if (n > NETEM_DIST_MAX)
734 return -EINVAL;
735
736 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
737 if (!d)
738 return -ENOMEM;
739
740 d->size = n;
741 for (i = 0; i < n; i++)
742 d->table[i] = data[i];
743
744 root_lock = qdisc_root_sleeping_lock(sch);
745
746 spin_lock_bh(root_lock);
747 swap(q->delay_dist, d);
748 spin_unlock_bh(root_lock);
749
750 dist_free(d);
751 return 0;
752}
753
754static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
755{
756 const struct tc_netem_slot *c = nla_data(attr);
757
758 q->slot_config = *c;
759 if (q->slot_config.max_packets == 0)
760 q->slot_config.max_packets = INT_MAX;
761 if (q->slot_config.max_bytes == 0)
762 q->slot_config.max_bytes = INT_MAX;
763 q->slot.packets_left = q->slot_config.max_packets;
764 q->slot.bytes_left = q->slot_config.max_bytes;
765 if (q->slot_config.min_delay | q->slot_config.max_delay)
766 q->slot.slot_next = ktime_get_ns();
767 else
768 q->slot.slot_next = 0;
769}
770
771static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
772{
773 const struct tc_netem_corr *c = nla_data(attr);
774
775 init_crandom(&q->delay_cor, c->delay_corr);
776 init_crandom(&q->loss_cor, c->loss_corr);
777 init_crandom(&q->dup_cor, c->dup_corr);
778}
779
780static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
781{
782 const struct tc_netem_reorder *r = nla_data(attr);
783
784 q->reorder = r->probability;
785 init_crandom(&q->reorder_cor, r->correlation);
786}
787
788static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
789{
790 const struct tc_netem_corrupt *r = nla_data(attr);
791
792 q->corrupt = r->probability;
793 init_crandom(&q->corrupt_cor, r->correlation);
794}
795
796static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
797{
798 const struct tc_netem_rate *r = nla_data(attr);
799
800 q->rate = r->rate;
801 q->packet_overhead = r->packet_overhead;
802 q->cell_size = r->cell_size;
803 q->cell_overhead = r->cell_overhead;
804 if (q->cell_size)
805 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
806 else
807 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
808}
809
810static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
811{
812 const struct nlattr *la;
813 int rem;
814
815 nla_for_each_nested(la, attr, rem) {
816 u16 type = nla_type(la);
817
818 switch (type) {
819 case NETEM_LOSS_GI: {
820 const struct tc_netem_gimodel *gi = nla_data(la);
821
822 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
823 pr_info("netem: incorrect gi model size\n");
824 return -EINVAL;
825 }
826
827 q->loss_model = CLG_4_STATES;
828
829 q->clg.state = TX_IN_GAP_PERIOD;
830 q->clg.a1 = gi->p13;
831 q->clg.a2 = gi->p31;
832 q->clg.a3 = gi->p32;
833 q->clg.a4 = gi->p14;
834 q->clg.a5 = gi->p23;
835 break;
836 }
837
838 case NETEM_LOSS_GE: {
839 const struct tc_netem_gemodel *ge = nla_data(la);
840
841 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
842 pr_info("netem: incorrect ge model size\n");
843 return -EINVAL;
844 }
845
846 q->loss_model = CLG_GILB_ELL;
847 q->clg.state = GOOD_STATE;
848 q->clg.a1 = ge->p;
849 q->clg.a2 = ge->r;
850 q->clg.a3 = ge->h;
851 q->clg.a4 = ge->k1;
852 break;
853 }
854
855 default:
856 pr_info("netem: unknown loss type %u\n", type);
857 return -EINVAL;
858 }
859 }
860
861 return 0;
862}
863
864static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
865 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
866 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
867 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
868 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
869 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
870 [TCA_NETEM_ECN] = { .type = NLA_U32 },
871 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
872 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
873 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
874 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
875};
876
877static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
878 const struct nla_policy *policy, int len)
879{
880 int nested_len = nla_len(nla) - NLA_ALIGN(len);
881
882 if (nested_len < 0) {
883 pr_info("netem: invalid attributes len %d\n", nested_len);
884 return -EINVAL;
885 }
886
887 if (nested_len >= nla_attr_size(0))
888 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
889 nested_len, policy, NULL);
890
891 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
892 return 0;
893}
894
895/* Parse netlink message to set options */
896static int netem_change(struct Qdisc *sch, struct nlattr *opt,
897 struct netlink_ext_ack *extack)
898{
899 struct netem_sched_data *q = qdisc_priv(sch);
900 struct nlattr *tb[TCA_NETEM_MAX + 1];
901 struct tc_netem_qopt *qopt;
902 struct clgstate old_clg;
903 int old_loss_model = CLG_RANDOM;
904 int ret;
905
906 if (opt == NULL)
907 return -EINVAL;
908
909 qopt = nla_data(opt);
910 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
911 if (ret < 0)
912 return ret;
913
914 /* backup q->clg and q->loss_model */
915 old_clg = q->clg;
916 old_loss_model = q->loss_model;
917
918 if (tb[TCA_NETEM_LOSS]) {
919 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
920 if (ret) {
921 q->loss_model = old_loss_model;
922 return ret;
923 }
924 } else {
925 q->loss_model = CLG_RANDOM;
926 }
927
928 if (tb[TCA_NETEM_DELAY_DIST]) {
929 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
930 if (ret) {
931 /* recover clg and loss_model, in case of
932 * q->clg and q->loss_model were modified
933 * in get_loss_clg()
934 */
935 q->clg = old_clg;
936 q->loss_model = old_loss_model;
937 return ret;
938 }
939 }
940
941 sch->limit = qopt->limit;
942
943 q->latency = PSCHED_TICKS2NS(qopt->latency);
944 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
945 q->limit = qopt->limit;
946 q->gap = qopt->gap;
947 q->counter = 0;
948 q->loss = qopt->loss;
949 q->duplicate = qopt->duplicate;
950
951 /* for compatibility with earlier versions.
952 * if gap is set, need to assume 100% probability
953 */
954 if (q->gap)
955 q->reorder = ~0;
956
957 if (tb[TCA_NETEM_CORR])
958 get_correlation(q, tb[TCA_NETEM_CORR]);
959
960 if (tb[TCA_NETEM_REORDER])
961 get_reorder(q, tb[TCA_NETEM_REORDER]);
962
963 if (tb[TCA_NETEM_CORRUPT])
964 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
965
966 if (tb[TCA_NETEM_RATE])
967 get_rate(q, tb[TCA_NETEM_RATE]);
968
969 if (tb[TCA_NETEM_RATE64])
970 q->rate = max_t(u64, q->rate,
971 nla_get_u64(tb[TCA_NETEM_RATE64]));
972
973 if (tb[TCA_NETEM_LATENCY64])
974 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
975
976 if (tb[TCA_NETEM_JITTER64])
977 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
978
979 if (tb[TCA_NETEM_ECN])
980 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
981
982 if (tb[TCA_NETEM_SLOT])
983 get_slot(q, tb[TCA_NETEM_SLOT]);
984
985 return ret;
986}
987
988static int netem_init(struct Qdisc *sch, struct nlattr *opt,
989 struct netlink_ext_ack *extack)
990{
991 struct netem_sched_data *q = qdisc_priv(sch);
992 int ret;
993
994 qdisc_watchdog_init(&q->watchdog, sch);
995
996 if (!opt)
997 return -EINVAL;
998
999 q->loss_model = CLG_RANDOM;
1000 ret = netem_change(sch, opt, extack);
1001 if (ret)
1002 pr_info("netem: change failed\n");
1003 return ret;
1004}
1005
1006static void netem_destroy(struct Qdisc *sch)
1007{
1008 struct netem_sched_data *q = qdisc_priv(sch);
1009
1010 qdisc_watchdog_cancel(&q->watchdog);
1011 if (q->qdisc)
1012 qdisc_destroy(q->qdisc);
1013 dist_free(q->delay_dist);
1014}
1015
1016static int dump_loss_model(const struct netem_sched_data *q,
1017 struct sk_buff *skb)
1018{
1019 struct nlattr *nest;
1020
1021 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1022 if (nest == NULL)
1023 goto nla_put_failure;
1024
1025 switch (q->loss_model) {
1026 case CLG_RANDOM:
1027 /* legacy loss model */
1028 nla_nest_cancel(skb, nest);
1029 return 0; /* no data */
1030
1031 case CLG_4_STATES: {
1032 struct tc_netem_gimodel gi = {
1033 .p13 = q->clg.a1,
1034 .p31 = q->clg.a2,
1035 .p32 = q->clg.a3,
1036 .p14 = q->clg.a4,
1037 .p23 = q->clg.a5,
1038 };
1039
1040 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1041 goto nla_put_failure;
1042 break;
1043 }
1044 case CLG_GILB_ELL: {
1045 struct tc_netem_gemodel ge = {
1046 .p = q->clg.a1,
1047 .r = q->clg.a2,
1048 .h = q->clg.a3,
1049 .k1 = q->clg.a4,
1050 };
1051
1052 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1053 goto nla_put_failure;
1054 break;
1055 }
1056 }
1057
1058 nla_nest_end(skb, nest);
1059 return 0;
1060
1061nla_put_failure:
1062 nla_nest_cancel(skb, nest);
1063 return -1;
1064}
1065
1066static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1067{
1068 const struct netem_sched_data *q = qdisc_priv(sch);
1069 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1070 struct tc_netem_qopt qopt;
1071 struct tc_netem_corr cor;
1072 struct tc_netem_reorder reorder;
1073 struct tc_netem_corrupt corrupt;
1074 struct tc_netem_rate rate;
1075 struct tc_netem_slot slot;
1076
1077 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1078 UINT_MAX);
1079 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1080 UINT_MAX);
1081 qopt.limit = q->limit;
1082 qopt.loss = q->loss;
1083 qopt.gap = q->gap;
1084 qopt.duplicate = q->duplicate;
1085 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1086 goto nla_put_failure;
1087
1088 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1089 goto nla_put_failure;
1090
1091 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1092 goto nla_put_failure;
1093
1094 cor.delay_corr = q->delay_cor.rho;
1095 cor.loss_corr = q->loss_cor.rho;
1096 cor.dup_corr = q->dup_cor.rho;
1097 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1098 goto nla_put_failure;
1099
1100 reorder.probability = q->reorder;
1101 reorder.correlation = q->reorder_cor.rho;
1102 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1103 goto nla_put_failure;
1104
1105 corrupt.probability = q->corrupt;
1106 corrupt.correlation = q->corrupt_cor.rho;
1107 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1108 goto nla_put_failure;
1109
1110 if (q->rate >= (1ULL << 32)) {
1111 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1112 TCA_NETEM_PAD))
1113 goto nla_put_failure;
1114 rate.rate = ~0U;
1115 } else {
1116 rate.rate = q->rate;
1117 }
1118 rate.packet_overhead = q->packet_overhead;
1119 rate.cell_size = q->cell_size;
1120 rate.cell_overhead = q->cell_overhead;
1121 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1122 goto nla_put_failure;
1123
1124 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1125 goto nla_put_failure;
1126
1127 if (dump_loss_model(q, skb) != 0)
1128 goto nla_put_failure;
1129
1130 if (q->slot_config.min_delay | q->slot_config.max_delay) {
1131 slot = q->slot_config;
1132 if (slot.max_packets == INT_MAX)
1133 slot.max_packets = 0;
1134 if (slot.max_bytes == INT_MAX)
1135 slot.max_bytes = 0;
1136 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1137 goto nla_put_failure;
1138 }
1139
1140 return nla_nest_end(skb, nla);
1141
1142nla_put_failure:
1143 nlmsg_trim(skb, nla);
1144 return -1;
1145}
1146
1147static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1148 struct sk_buff *skb, struct tcmsg *tcm)
1149{
1150 struct netem_sched_data *q = qdisc_priv(sch);
1151
1152 if (cl != 1 || !q->qdisc) /* only one class */
1153 return -ENOENT;
1154
1155 tcm->tcm_handle |= TC_H_MIN(1);
1156 tcm->tcm_info = q->qdisc->handle;
1157
1158 return 0;
1159}
1160
1161static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1162 struct Qdisc **old, struct netlink_ext_ack *extack)
1163{
1164 struct netem_sched_data *q = qdisc_priv(sch);
1165
1166 *old = qdisc_replace(sch, new, &q->qdisc);
1167 return 0;
1168}
1169
1170static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1171{
1172 struct netem_sched_data *q = qdisc_priv(sch);
1173 return q->qdisc;
1174}
1175
1176static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1177{
1178 return 1;
1179}
1180
1181static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1182{
1183 if (!walker->stop) {
1184 if (walker->count >= walker->skip)
1185 if (walker->fn(sch, 1, walker) < 0) {
1186 walker->stop = 1;
1187 return;
1188 }
1189 walker->count++;
1190 }
1191}
1192
1193static const struct Qdisc_class_ops netem_class_ops = {
1194 .graft = netem_graft,
1195 .leaf = netem_leaf,
1196 .find = netem_find,
1197 .walk = netem_walk,
1198 .dump = netem_dump_class,
1199};
1200
1201static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1202 .id = "netem",
1203 .cl_ops = &netem_class_ops,
1204 .priv_size = sizeof(struct netem_sched_data),
1205 .enqueue = netem_enqueue,
1206 .dequeue = netem_dequeue,
1207 .peek = qdisc_peek_dequeued,
1208 .init = netem_init,
1209 .reset = netem_reset,
1210 .destroy = netem_destroy,
1211 .change = netem_change,
1212 .dump = netem_dump,
1213 .owner = THIS_MODULE,
1214};
1215
1216
1217static int __init netem_module_init(void)
1218{
1219 pr_info("netem: version " VERSION "\n");
1220 return register_qdisc(&netem_qdisc_ops);
1221}
1222static void __exit netem_module_exit(void)
1223{
1224 unregister_qdisc(&netem_qdisc_ops);
1225}
1226module_init(netem_module_init)
1227module_exit(netem_module_exit)
1228MODULE_LICENSE("GPL");