Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * net/sched/sch_netem.c	Network emulator
   4 *
   5 *  		Many of the algorithms and ideas for this came from
   6 *		NIST Net which is not copyrighted.
   7 *
   8 * Authors:	Stephen Hemminger <shemminger@osdl.org>
   9 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/errno.h>
  18#include <linux/skbuff.h>
  19#include <linux/vmalloc.h>
  20#include <linux/prandom.h>
  21#include <linux/rtnetlink.h>
  22#include <linux/reciprocal_div.h>
  23#include <linux/rbtree.h>
  24
  25#include <net/gso.h>
  26#include <net/netlink.h>
  27#include <net/pkt_sched.h>
  28#include <net/inet_ecn.h>
  29
  30#define VERSION "1.3"
  31
  32/*	Network Emulation Queuing algorithm.
  33	====================================
  34
  35	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  36		 Network Emulation Tool
  37		 [2] Luigi Rizzo, DummyNet for FreeBSD
  38
  39	 ----------------------------------------------------------------
  40
  41	 This started out as a simple way to delay outgoing packets to
  42	 test TCP but has grown to include most of the functionality
  43	 of a full blown network emulator like NISTnet. It can delay
  44	 packets and add random jitter (and correlation). The random
  45	 distribution can be loaded from a table as well to provide
  46	 normal, Pareto, or experimental curves. Packet loss,
  47	 duplication, and reordering can also be emulated.
  48
  49	 This qdisc does not do classification that can be handled in
  50	 layering other disciplines.  It does not need to do bandwidth
  51	 control either since that can be handled by using token
  52	 bucket or other rate control.
  53
  54     Correlated Loss Generator models
  55
  56	Added generation of correlated loss according to the
  57	"Gilbert-Elliot" model, a 4-state markov model.
  58
  59	References:
  60	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  61	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  62	and intuitive loss model for packet networks and its implementation
  63	in the Netem module in the Linux kernel", available in [1]
  64
  65	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  66		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  67*/
  68
  69struct disttable {
  70	u32  size;
  71	s16 table[] __counted_by(size);
  72};
  73
  74struct netem_sched_data {
  75	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
  76	struct rb_root t_root;
  77
  78	/* a linear queue; reduces rbtree rebalancing when jitter is low */
  79	struct sk_buff	*t_head;
  80	struct sk_buff	*t_tail;
  81
  82	u32 t_len;
  83
  84	/* optional qdisc for classful handling (NULL at netem init) */
  85	struct Qdisc	*qdisc;
  86
  87	struct qdisc_watchdog watchdog;
  88
  89	s64 latency;
  90	s64 jitter;
  91
  92	u32 loss;
  93	u32 ecn;
  94	u32 limit;
  95	u32 counter;
  96	u32 gap;
  97	u32 duplicate;
  98	u32 reorder;
  99	u32 corrupt;
 100	u64 rate;
 101	s32 packet_overhead;
 102	u32 cell_size;
 103	struct reciprocal_value cell_size_reciprocal;
 104	s32 cell_overhead;
 105
 106	struct crndstate {
 107		u32 last;
 108		u32 rho;
 109	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 110
 111	struct prng  {
 112		u64 seed;
 113		struct rnd_state prng_state;
 114	} prng;
 115
 116	struct disttable *delay_dist;
 117
 118	enum  {
 119		CLG_RANDOM,
 120		CLG_4_STATES,
 121		CLG_GILB_ELL,
 122	} loss_model;
 123
 124	enum {
 125		TX_IN_GAP_PERIOD = 1,
 126		TX_IN_BURST_PERIOD,
 127		LOST_IN_GAP_PERIOD,
 128		LOST_IN_BURST_PERIOD,
 129	} _4_state_model;
 130
 131	enum {
 132		GOOD_STATE = 1,
 133		BAD_STATE,
 134	} GE_state_model;
 135
 136	/* Correlated Loss Generation models */
 137	struct clgstate {
 138		/* state of the Markov chain */
 139		u8 state;
 140
 141		/* 4-states and Gilbert-Elliot models */
 142		u32 a1;	/* p13 for 4-states or p for GE */
 143		u32 a2;	/* p31 for 4-states or r for GE */
 144		u32 a3;	/* p32 for 4-states or h for GE */
 145		u32 a4;	/* p14 for 4-states or 1-k for GE */
 146		u32 a5; /* p23 used only in 4-states */
 147	} clg;
 148
 149	struct tc_netem_slot slot_config;
 150	struct slotstate {
 151		u64 slot_next;
 152		s32 packets_left;
 153		s32 bytes_left;
 154	} slot;
 155
 156	struct disttable *slot_dist;
 157};
 158
 159/* Time stamp put into socket buffer control block
 160 * Only valid when skbs are in our internal t(ime)fifo queue.
 161 *
 162 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
 163 * and skb->next & skb->prev are scratch space for a qdisc,
 164 * we save skb->tstamp value in skb->cb[] before destroying it.
 165 */
 166struct netem_skb_cb {
 167	u64	        time_to_send;
 168};
 169
 170static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 171{
 172	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
 173	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 174	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 175}
 176
 177/* init_crandom - initialize correlated random number generator
 178 * Use entropy source for initial seed.
 179 */
 180static void init_crandom(struct crndstate *state, unsigned long rho)
 181{
 182	state->rho = rho;
 183	state->last = get_random_u32();
 184}
 185
 186/* get_crandom - correlated random number generator
 187 * Next number depends on last value.
 188 * rho is scaled to avoid floating point.
 189 */
 190static u32 get_crandom(struct crndstate *state, struct prng *p)
 191{
 192	u64 value, rho;
 193	unsigned long answer;
 194	struct rnd_state *s = &p->prng_state;
 195
 196	if (!state || state->rho == 0)	/* no correlation */
 197		return prandom_u32_state(s);
 198
 199	value = prandom_u32_state(s);
 200	rho = (u64)state->rho + 1;
 201	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 202	state->last = answer;
 203	return answer;
 204}
 205
 206/* loss_4state - 4-state model loss generator
 207 * Generates losses according to the 4-state Markov chain adopted in
 208 * the GI (General and Intuitive) loss model.
 209 */
 210static bool loss_4state(struct netem_sched_data *q)
 211{
 212	struct clgstate *clg = &q->clg;
 213	u32 rnd = prandom_u32_state(&q->prng.prng_state);
 214
 215	/*
 216	 * Makes a comparison between rnd and the transition
 217	 * probabilities outgoing from the current state, then decides the
 218	 * next state and if the next packet has to be transmitted or lost.
 219	 * The four states correspond to:
 220	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
 221	 *   LOST_IN_GAP_PERIOD => isolated losses within a gap period
 222	 *   LOST_IN_BURST_PERIOD => lost packets within a burst period
 223	 *   TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
 224	 */
 225	switch (clg->state) {
 226	case TX_IN_GAP_PERIOD:
 227		if (rnd < clg->a4) {
 228			clg->state = LOST_IN_GAP_PERIOD;
 229			return true;
 230		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
 231			clg->state = LOST_IN_BURST_PERIOD;
 232			return true;
 233		} else if (clg->a1 + clg->a4 < rnd) {
 234			clg->state = TX_IN_GAP_PERIOD;
 235		}
 236
 237		break;
 238	case TX_IN_BURST_PERIOD:
 239		if (rnd < clg->a5) {
 240			clg->state = LOST_IN_BURST_PERIOD;
 241			return true;
 242		} else {
 243			clg->state = TX_IN_BURST_PERIOD;
 244		}
 245
 246		break;
 247	case LOST_IN_BURST_PERIOD:
 248		if (rnd < clg->a3)
 249			clg->state = TX_IN_BURST_PERIOD;
 250		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 251			clg->state = TX_IN_GAP_PERIOD;
 252		} else if (clg->a2 + clg->a3 < rnd) {
 253			clg->state = LOST_IN_BURST_PERIOD;
 254			return true;
 255		}
 256		break;
 257	case LOST_IN_GAP_PERIOD:
 258		clg->state = TX_IN_GAP_PERIOD;
 259		break;
 260	}
 261
 262	return false;
 263}
 264
 265/* loss_gilb_ell - Gilbert-Elliot model loss generator
 266 * Generates losses according to the Gilbert-Elliot loss model or
 267 * its special cases  (Gilbert or Simple Gilbert)
 268 *
 269 * Makes a comparison between random number and the transition
 270 * probabilities outgoing from the current state, then decides the
 271 * next state. A second random number is extracted and the comparison
 272 * with the loss probability of the current state decides if the next
 273 * packet will be transmitted or lost.
 274 */
 275static bool loss_gilb_ell(struct netem_sched_data *q)
 276{
 277	struct clgstate *clg = &q->clg;
 278	struct rnd_state *s = &q->prng.prng_state;
 279
 280	switch (clg->state) {
 281	case GOOD_STATE:
 282		if (prandom_u32_state(s) < clg->a1)
 283			clg->state = BAD_STATE;
 284		if (prandom_u32_state(s) < clg->a4)
 285			return true;
 286		break;
 287	case BAD_STATE:
 288		if (prandom_u32_state(s) < clg->a2)
 289			clg->state = GOOD_STATE;
 290		if (prandom_u32_state(s) > clg->a3)
 291			return true;
 292	}
 293
 294	return false;
 295}
 296
 297static bool loss_event(struct netem_sched_data *q)
 298{
 299	switch (q->loss_model) {
 300	case CLG_RANDOM:
 301		/* Random packet drop 0 => none, ~0 => all */
 302		return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
 303
 304	case CLG_4_STATES:
 305		/* 4state loss model algorithm (used also for GI model)
 306		* Extracts a value from the markov 4 state loss generator,
 307		* if it is 1 drops a packet and if needed writes the event in
 308		* the kernel logs
 309		*/
 310		return loss_4state(q);
 311
 312	case CLG_GILB_ELL:
 313		/* Gilbert-Elliot loss model algorithm
 314		* Extracts a value from the Gilbert-Elliot loss generator,
 315		* if it is 1 drops a packet and if needed writes the event in
 316		* the kernel logs
 317		*/
 318		return loss_gilb_ell(q);
 319	}
 320
 321	return false;	/* not reached */
 322}
 323
 324
 325/* tabledist - return a pseudo-randomly distributed value with mean mu and
 326 * std deviation sigma.  Uses table lookup to approximate the desired
 327 * distribution, and a uniformly-distributed pseudo-random source.
 328 */
 329static s64 tabledist(s64 mu, s32 sigma,
 330		     struct crndstate *state,
 331		     struct prng *prng,
 332		     const struct disttable *dist)
 333{
 334	s64 x;
 335	long t;
 336	u32 rnd;
 337
 338	if (sigma == 0)
 339		return mu;
 340
 341	rnd = get_crandom(state, prng);
 342
 343	/* default uniform distribution */
 344	if (dist == NULL)
 345		return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
 346
 347	t = dist->table[rnd % dist->size];
 348	x = (sigma % NETEM_DIST_SCALE) * t;
 349	if (x >= 0)
 350		x += NETEM_DIST_SCALE/2;
 351	else
 352		x -= NETEM_DIST_SCALE/2;
 353
 354	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 355}
 356
 357static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
 358{
 359	len += q->packet_overhead;
 360
 361	if (q->cell_size) {
 362		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 363
 364		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 365			cells++;
 366		len = cells * (q->cell_size + q->cell_overhead);
 367	}
 368
 369	return div64_u64(len * NSEC_PER_SEC, q->rate);
 370}
 371
 372static void tfifo_reset(struct Qdisc *sch)
 373{
 374	struct netem_sched_data *q = qdisc_priv(sch);
 375	struct rb_node *p = rb_first(&q->t_root);
 376
 377	while (p) {
 378		struct sk_buff *skb = rb_to_skb(p);
 379
 380		p = rb_next(p);
 381		rb_erase(&skb->rbnode, &q->t_root);
 382		rtnl_kfree_skbs(skb, skb);
 383	}
 384
 385	rtnl_kfree_skbs(q->t_head, q->t_tail);
 386	q->t_head = NULL;
 387	q->t_tail = NULL;
 388	q->t_len = 0;
 389}
 390
 391static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 392{
 393	struct netem_sched_data *q = qdisc_priv(sch);
 394	u64 tnext = netem_skb_cb(nskb)->time_to_send;
 395
 396	if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
 397		if (q->t_tail)
 398			q->t_tail->next = nskb;
 399		else
 400			q->t_head = nskb;
 401		q->t_tail = nskb;
 402	} else {
 403		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
 404
 405		while (*p) {
 406			struct sk_buff *skb;
 407
 408			parent = *p;
 409			skb = rb_to_skb(parent);
 410			if (tnext >= netem_skb_cb(skb)->time_to_send)
 411				p = &parent->rb_right;
 412			else
 413				p = &parent->rb_left;
 414		}
 415		rb_link_node(&nskb->rbnode, parent, p);
 416		rb_insert_color(&nskb->rbnode, &q->t_root);
 417	}
 418	q->t_len++;
 419	sch->q.qlen++;
 420}
 421
 422/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
 423 * when we statistically choose to corrupt one, we instead segment it, returning
 424 * the first packet to be corrupted, and re-enqueue the remaining frames
 425 */
 426static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
 427				     struct sk_buff **to_free)
 428{
 429	struct sk_buff *segs;
 430	netdev_features_t features = netif_skb_features(skb);
 431
 432	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 433
 434	if (IS_ERR_OR_NULL(segs)) {
 435		qdisc_drop(skb, sch, to_free);
 436		return NULL;
 437	}
 438	consume_skb(skb);
 439	return segs;
 440}
 441
 442/*
 443 * Insert one skb into qdisc.
 444 * Note: parent depends on return value to account for queue length.
 445 * 	NET_XMIT_DROP: queue length didn't change.
 446 *      NET_XMIT_SUCCESS: one skb was queued.
 447 */
 448static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 449			 struct sk_buff **to_free)
 450{
 451	struct netem_sched_data *q = qdisc_priv(sch);
 452	/* We don't fill cb now as skb_unshare() may invalidate it */
 453	struct netem_skb_cb *cb;
 454	struct sk_buff *skb2 = NULL;
 455	struct sk_buff *segs = NULL;
 456	unsigned int prev_len = qdisc_pkt_len(skb);
 457	int count = 1;
 
 
 458
 459	/* Do not fool qdisc_drop_all() */
 460	skb->prev = NULL;
 461
 462	/* Random duplication */
 463	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
 464		++count;
 465
 466	/* Drop packet? */
 467	if (loss_event(q)) {
 468		if (q->ecn && INET_ECN_set_ce(skb))
 469			qdisc_qstats_drop(sch); /* mark packet */
 470		else
 471			--count;
 472	}
 473	if (count == 0) {
 474		qdisc_qstats_drop(sch);
 475		__qdisc_drop(skb, to_free);
 476		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 477	}
 478
 479	/* If a delay is expected, orphan the skb. (orphaning usually takes
 480	 * place at TX completion time, so _before_ the link transit delay)
 481	 */
 482	if (q->latency || q->jitter || q->rate)
 483		skb_orphan_partial(skb);
 484
 485	/*
 486	 * If we need to duplicate packet, then clone it before
 487	 * original is modified.
 
 488	 */
 489	if (count > 1)
 490		skb2 = skb_clone(skb, GFP_ATOMIC);
 
 
 
 
 
 
 
 491
 492	/*
 493	 * Randomized packet corruption.
 494	 * Make copy if needed since we are modifying
 495	 * If packet is going to be hardware checksummed, then
 496	 * do it now in software before we mangle it.
 497	 */
 498	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
 499		if (skb_is_gso(skb)) {
 500			skb = netem_segment(skb, sch, to_free);
 501			if (!skb)
 502				goto finish_segs;
 503
 504			segs = skb->next;
 505			skb_mark_not_on_list(skb);
 506			qdisc_skb_cb(skb)->pkt_len = skb->len;
 507		}
 508
 509		skb = skb_unshare(skb, GFP_ATOMIC);
 510		if (unlikely(!skb)) {
 511			qdisc_qstats_drop(sch);
 512			goto finish_segs;
 513		}
 514		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 515		    skb_checksum_help(skb)) {
 516			qdisc_drop(skb, sch, to_free);
 517			skb = NULL;
 518			goto finish_segs;
 519		}
 520
 521		skb->data[get_random_u32_below(skb_headlen(skb))] ^=
 522			1<<get_random_u32_below(8);
 523	}
 524
 525	if (unlikely(q->t_len >= sch->limit)) {
 526		/* re-link segs, so that qdisc_drop_all() frees them all */
 527		skb->next = segs;
 528		qdisc_drop_all(skb, sch, to_free);
 529		if (skb2)
 530			__qdisc_drop(skb2, to_free);
 531		return NET_XMIT_DROP;
 532	}
 533
 534	/*
 535	 * If doing duplication then re-insert at top of the
 536	 * qdisc tree, since parent queuer expects that only one
 537	 * skb will be queued.
 538	 */
 539	if (skb2) {
 540		struct Qdisc *rootq = qdisc_root_bh(sch);
 541		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 542
 543		q->duplicate = 0;
 544		rootq->enqueue(skb2, rootq, to_free);
 545		q->duplicate = dupsave;
 546		skb2 = NULL;
 547	}
 548
 549	qdisc_qstats_backlog_inc(sch, skb);
 550
 551	cb = netem_skb_cb(skb);
 552	if (q->gap == 0 ||		/* not doing reordering */
 553	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 554	    q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
 555		u64 now;
 556		s64 delay;
 557
 558		delay = tabledist(q->latency, q->jitter,
 559				  &q->delay_cor, &q->prng, q->delay_dist);
 560
 561		now = ktime_get_ns();
 562
 563		if (q->rate) {
 564			struct netem_skb_cb *last = NULL;
 565
 566			if (sch->q.tail)
 567				last = netem_skb_cb(sch->q.tail);
 568			if (q->t_root.rb_node) {
 569				struct sk_buff *t_skb;
 570				struct netem_skb_cb *t_last;
 571
 572				t_skb = skb_rb_last(&q->t_root);
 573				t_last = netem_skb_cb(t_skb);
 574				if (!last ||
 575				    t_last->time_to_send > last->time_to_send)
 576					last = t_last;
 577			}
 578			if (q->t_tail) {
 579				struct netem_skb_cb *t_last =
 580					netem_skb_cb(q->t_tail);
 581
 582				if (!last ||
 583				    t_last->time_to_send > last->time_to_send)
 584					last = t_last;
 585			}
 586
 587			if (last) {
 588				/*
 589				 * Last packet in queue is reference point (now),
 590				 * calculate this time bonus and subtract
 591				 * from delay.
 592				 */
 593				delay -= last->time_to_send - now;
 594				delay = max_t(s64, 0, delay);
 595				now = last->time_to_send;
 596			}
 597
 598			delay += packet_time_ns(qdisc_pkt_len(skb), q);
 599		}
 600
 601		cb->time_to_send = now + delay;
 602		++q->counter;
 603		tfifo_enqueue(skb, sch);
 604	} else {
 605		/*
 606		 * Do re-ordering by putting one out of N packets at the front
 607		 * of the queue.
 608		 */
 609		cb->time_to_send = ktime_get_ns();
 610		q->counter = 0;
 611
 612		__qdisc_enqueue_head(skb, &sch->q);
 613		sch->qstats.requeues++;
 614	}
 615
 616finish_segs:
 617	if (skb2)
 618		__qdisc_drop(skb2, to_free);
 619
 620	if (segs) {
 621		unsigned int len, last_len;
 622		int rc, nb;
 623
 624		len = skb ? skb->len : 0;
 625		nb = skb ? 1 : 0;
 626
 627		while (segs) {
 628			skb2 = segs->next;
 629			skb_mark_not_on_list(segs);
 630			qdisc_skb_cb(segs)->pkt_len = segs->len;
 631			last_len = segs->len;
 632			rc = qdisc_enqueue(segs, sch, to_free);
 633			if (rc != NET_XMIT_SUCCESS) {
 634				if (net_xmit_drop_count(rc))
 635					qdisc_qstats_drop(sch);
 636			} else {
 637				nb++;
 638				len += last_len;
 639			}
 640			segs = skb2;
 641		}
 642		/* Parent qdiscs accounted for 1 skb of size @prev_len */
 643		qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
 644	} else if (!skb) {
 645		return NET_XMIT_DROP;
 646	}
 647	return NET_XMIT_SUCCESS;
 648}
 649
 650/* Delay the next round with a new future slot with a
 651 * correct number of bytes and packets.
 652 */
 653
 654static void get_slot_next(struct netem_sched_data *q, u64 now)
 655{
 656	s64 next_delay;
 657
 658	if (!q->slot_dist)
 659		next_delay = q->slot_config.min_delay +
 660				(get_random_u32() *
 661				 (q->slot_config.max_delay -
 662				  q->slot_config.min_delay) >> 32);
 663	else
 664		next_delay = tabledist(q->slot_config.dist_delay,
 665				       (s32)(q->slot_config.dist_jitter),
 666				       NULL, &q->prng, q->slot_dist);
 667
 668	q->slot.slot_next = now + next_delay;
 669	q->slot.packets_left = q->slot_config.max_packets;
 670	q->slot.bytes_left = q->slot_config.max_bytes;
 671}
 672
 673static struct sk_buff *netem_peek(struct netem_sched_data *q)
 674{
 675	struct sk_buff *skb = skb_rb_first(&q->t_root);
 676	u64 t1, t2;
 677
 678	if (!skb)
 679		return q->t_head;
 680	if (!q->t_head)
 681		return skb;
 682
 683	t1 = netem_skb_cb(skb)->time_to_send;
 684	t2 = netem_skb_cb(q->t_head)->time_to_send;
 685	if (t1 < t2)
 686		return skb;
 687	return q->t_head;
 688}
 689
 690static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
 691{
 692	if (skb == q->t_head) {
 693		q->t_head = skb->next;
 694		if (!q->t_head)
 695			q->t_tail = NULL;
 696	} else {
 697		rb_erase(&skb->rbnode, &q->t_root);
 698	}
 699}
 700
 701static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 702{
 703	struct netem_sched_data *q = qdisc_priv(sch);
 704	struct sk_buff *skb;
 705
 706tfifo_dequeue:
 707	skb = __qdisc_dequeue_head(&sch->q);
 708	if (skb) {
 709deliver:
 710		qdisc_qstats_backlog_dec(sch, skb);
 
 711		qdisc_bstats_update(sch, skb);
 712		return skb;
 713	}
 714	skb = netem_peek(q);
 715	if (skb) {
 716		u64 time_to_send;
 717		u64 now = ktime_get_ns();
 718
 719		/* if more time remaining? */
 720		time_to_send = netem_skb_cb(skb)->time_to_send;
 721		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
 722			get_slot_next(q, now);
 723
 724		if (time_to_send <= now && q->slot.slot_next <= now) {
 725			netem_erase_head(q, skb);
 726			q->t_len--;
 
 727			skb->next = NULL;
 728			skb->prev = NULL;
 729			/* skb->dev shares skb->rbnode area,
 730			 * we need to restore its value.
 731			 */
 732			skb->dev = qdisc_dev(sch);
 733
 734			if (q->slot.slot_next) {
 735				q->slot.packets_left--;
 736				q->slot.bytes_left -= qdisc_pkt_len(skb);
 737				if (q->slot.packets_left <= 0 ||
 738				    q->slot.bytes_left <= 0)
 739					get_slot_next(q, now);
 740			}
 741
 742			if (q->qdisc) {
 743				unsigned int pkt_len = qdisc_pkt_len(skb);
 744				struct sk_buff *to_free = NULL;
 745				int err;
 746
 747				err = qdisc_enqueue(skb, q->qdisc, &to_free);
 748				kfree_skb_list(to_free);
 749				if (err != NET_XMIT_SUCCESS) {
 750					if (net_xmit_drop_count(err))
 751						qdisc_qstats_drop(sch);
 752					sch->qstats.backlog -= pkt_len;
 753					sch->q.qlen--;
 754					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
 755				}
 756				goto tfifo_dequeue;
 757			}
 758			sch->q.qlen--;
 759			goto deliver;
 760		}
 761
 762		if (q->qdisc) {
 763			skb = q->qdisc->ops->dequeue(q->qdisc);
 764			if (skb) {
 765				sch->q.qlen--;
 766				goto deliver;
 767			}
 768		}
 769
 770		qdisc_watchdog_schedule_ns(&q->watchdog,
 771					   max(time_to_send,
 772					       q->slot.slot_next));
 773	}
 774
 775	if (q->qdisc) {
 776		skb = q->qdisc->ops->dequeue(q->qdisc);
 777		if (skb) {
 778			sch->q.qlen--;
 779			goto deliver;
 780		}
 781	}
 782	return NULL;
 783}
 784
 785static void netem_reset(struct Qdisc *sch)
 786{
 787	struct netem_sched_data *q = qdisc_priv(sch);
 788
 789	qdisc_reset_queue(sch);
 790	tfifo_reset(sch);
 791	if (q->qdisc)
 792		qdisc_reset(q->qdisc);
 793	qdisc_watchdog_cancel(&q->watchdog);
 794}
 795
 796static void dist_free(struct disttable *d)
 797{
 798	kvfree(d);
 799}
 800
 801/*
 802 * Distribution data is a variable size payload containing
 803 * signed 16 bit values.
 804 */
 805
 806static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
 807{
 808	size_t n = nla_len(attr)/sizeof(__s16);
 809	const __s16 *data = nla_data(attr);
 810	struct disttable *d;
 811	int i;
 812
 813	if (!n || n > NETEM_DIST_MAX)
 814		return -EINVAL;
 815
 816	d = kvmalloc(struct_size(d, table, n), GFP_KERNEL);
 817	if (!d)
 818		return -ENOMEM;
 819
 820	d->size = n;
 821	for (i = 0; i < n; i++)
 822		d->table[i] = data[i];
 823
 824	*tbl = d;
 825	return 0;
 826}
 827
 828static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
 829{
 830	const struct tc_netem_slot *c = nla_data(attr);
 831
 832	q->slot_config = *c;
 833	if (q->slot_config.max_packets == 0)
 834		q->slot_config.max_packets = INT_MAX;
 835	if (q->slot_config.max_bytes == 0)
 836		q->slot_config.max_bytes = INT_MAX;
 837
 838	/* capping dist_jitter to the range acceptable by tabledist() */
 839	q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
 840
 841	q->slot.packets_left = q->slot_config.max_packets;
 842	q->slot.bytes_left = q->slot_config.max_bytes;
 843	if (q->slot_config.min_delay | q->slot_config.max_delay |
 844	    q->slot_config.dist_jitter)
 845		q->slot.slot_next = ktime_get_ns();
 846	else
 847		q->slot.slot_next = 0;
 848}
 849
 850static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 851{
 852	const struct tc_netem_corr *c = nla_data(attr);
 853
 854	init_crandom(&q->delay_cor, c->delay_corr);
 855	init_crandom(&q->loss_cor, c->loss_corr);
 856	init_crandom(&q->dup_cor, c->dup_corr);
 857}
 858
 859static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
 860{
 861	const struct tc_netem_reorder *r = nla_data(attr);
 862
 863	q->reorder = r->probability;
 864	init_crandom(&q->reorder_cor, r->correlation);
 865}
 866
 867static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
 868{
 869	const struct tc_netem_corrupt *r = nla_data(attr);
 870
 871	q->corrupt = r->probability;
 872	init_crandom(&q->corrupt_cor, r->correlation);
 873}
 874
 875static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
 876{
 877	const struct tc_netem_rate *r = nla_data(attr);
 878
 879	q->rate = r->rate;
 880	q->packet_overhead = r->packet_overhead;
 881	q->cell_size = r->cell_size;
 882	q->cell_overhead = r->cell_overhead;
 883	if (q->cell_size)
 884		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 885	else
 886		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
 887}
 888
 889static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
 890{
 891	const struct nlattr *la;
 892	int rem;
 893
 894	nla_for_each_nested(la, attr, rem) {
 895		u16 type = nla_type(la);
 896
 897		switch (type) {
 898		case NETEM_LOSS_GI: {
 899			const struct tc_netem_gimodel *gi = nla_data(la);
 900
 901			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 902				pr_info("netem: incorrect gi model size\n");
 903				return -EINVAL;
 904			}
 905
 906			q->loss_model = CLG_4_STATES;
 907
 908			q->clg.state = TX_IN_GAP_PERIOD;
 909			q->clg.a1 = gi->p13;
 910			q->clg.a2 = gi->p31;
 911			q->clg.a3 = gi->p32;
 912			q->clg.a4 = gi->p14;
 913			q->clg.a5 = gi->p23;
 914			break;
 915		}
 916
 917		case NETEM_LOSS_GE: {
 918			const struct tc_netem_gemodel *ge = nla_data(la);
 919
 920			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 921				pr_info("netem: incorrect ge model size\n");
 922				return -EINVAL;
 923			}
 924
 925			q->loss_model = CLG_GILB_ELL;
 926			q->clg.state = GOOD_STATE;
 927			q->clg.a1 = ge->p;
 928			q->clg.a2 = ge->r;
 929			q->clg.a3 = ge->h;
 930			q->clg.a4 = ge->k1;
 931			break;
 932		}
 933
 934		default:
 935			pr_info("netem: unknown loss type %u\n", type);
 936			return -EINVAL;
 937		}
 938	}
 939
 940	return 0;
 941}
 942
 943static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 944	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 945	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 946	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 947	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 948	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 949	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 950	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 951	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
 952	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
 953	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
 954	[TCA_NETEM_PRNG_SEED]	= { .type = NLA_U64 },
 955};
 956
 957static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 958		      const struct nla_policy *policy, int len)
 959{
 960	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 961
 962	if (nested_len < 0) {
 963		pr_info("netem: invalid attributes len %d\n", nested_len);
 964		return -EINVAL;
 965	}
 966
 967	if (nested_len >= nla_attr_size(0))
 968		return nla_parse_deprecated(tb, maxtype,
 969					    nla_data(nla) + NLA_ALIGN(len),
 970					    nested_len, policy, NULL);
 971
 972	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 973	return 0;
 974}
 975
 976/* Parse netlink message to set options */
 977static int netem_change(struct Qdisc *sch, struct nlattr *opt,
 978			struct netlink_ext_ack *extack)
 979{
 980	struct netem_sched_data *q = qdisc_priv(sch);
 981	struct nlattr *tb[TCA_NETEM_MAX + 1];
 982	struct disttable *delay_dist = NULL;
 983	struct disttable *slot_dist = NULL;
 984	struct tc_netem_qopt *qopt;
 985	struct clgstate old_clg;
 986	int old_loss_model = CLG_RANDOM;
 987	int ret;
 988
 989	qopt = nla_data(opt);
 990	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 991	if (ret < 0)
 992		return ret;
 993
 994	if (tb[TCA_NETEM_DELAY_DIST]) {
 995		ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
 996		if (ret)
 997			goto table_free;
 998	}
 999
1000	if (tb[TCA_NETEM_SLOT_DIST]) {
1001		ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
1002		if (ret)
1003			goto table_free;
1004	}
1005
1006	sch_tree_lock(sch);
1007	/* backup q->clg and q->loss_model */
1008	old_clg = q->clg;
1009	old_loss_model = q->loss_model;
1010
1011	if (tb[TCA_NETEM_LOSS]) {
1012		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
1013		if (ret) {
1014			q->loss_model = old_loss_model;
1015			q->clg = old_clg;
1016			goto unlock;
1017		}
1018	} else {
1019		q->loss_model = CLG_RANDOM;
1020	}
1021
1022	if (delay_dist)
1023		swap(q->delay_dist, delay_dist);
1024	if (slot_dist)
1025		swap(q->slot_dist, slot_dist);
1026	sch->limit = qopt->limit;
1027
1028	q->latency = PSCHED_TICKS2NS(qopt->latency);
1029	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1030	q->limit = qopt->limit;
1031	q->gap = qopt->gap;
1032	q->counter = 0;
1033	q->loss = qopt->loss;
1034	q->duplicate = qopt->duplicate;
1035
1036	/* for compatibility with earlier versions.
1037	 * if gap is set, need to assume 100% probability
1038	 */
1039	if (q->gap)
1040		q->reorder = ~0;
1041
1042	if (tb[TCA_NETEM_CORR])
1043		get_correlation(q, tb[TCA_NETEM_CORR]);
1044
1045	if (tb[TCA_NETEM_REORDER])
1046		get_reorder(q, tb[TCA_NETEM_REORDER]);
1047
1048	if (tb[TCA_NETEM_CORRUPT])
1049		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1050
1051	if (tb[TCA_NETEM_RATE])
1052		get_rate(q, tb[TCA_NETEM_RATE]);
1053
1054	if (tb[TCA_NETEM_RATE64])
1055		q->rate = max_t(u64, q->rate,
1056				nla_get_u64(tb[TCA_NETEM_RATE64]));
1057
1058	if (tb[TCA_NETEM_LATENCY64])
1059		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1060
1061	if (tb[TCA_NETEM_JITTER64])
1062		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1063
1064	if (tb[TCA_NETEM_ECN])
1065		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1066
1067	if (tb[TCA_NETEM_SLOT])
1068		get_slot(q, tb[TCA_NETEM_SLOT]);
1069
1070	/* capping jitter to the range acceptable by tabledist() */
1071	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1072
1073	if (tb[TCA_NETEM_PRNG_SEED])
1074		q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1075	else
1076		q->prng.seed = get_random_u64();
1077	prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1078
1079unlock:
1080	sch_tree_unlock(sch);
1081
1082table_free:
1083	dist_free(delay_dist);
1084	dist_free(slot_dist);
1085	return ret;
1086}
1087
1088static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1089		      struct netlink_ext_ack *extack)
1090{
1091	struct netem_sched_data *q = qdisc_priv(sch);
1092	int ret;
1093
1094	qdisc_watchdog_init(&q->watchdog, sch);
1095
1096	if (!opt)
1097		return -EINVAL;
1098
1099	q->loss_model = CLG_RANDOM;
1100	ret = netem_change(sch, opt, extack);
1101	if (ret)
1102		pr_info("netem: change failed\n");
1103	return ret;
1104}
1105
1106static void netem_destroy(struct Qdisc *sch)
1107{
1108	struct netem_sched_data *q = qdisc_priv(sch);
1109
1110	qdisc_watchdog_cancel(&q->watchdog);
1111	if (q->qdisc)
1112		qdisc_put(q->qdisc);
1113	dist_free(q->delay_dist);
1114	dist_free(q->slot_dist);
1115}
1116
1117static int dump_loss_model(const struct netem_sched_data *q,
1118			   struct sk_buff *skb)
1119{
1120	struct nlattr *nest;
1121
1122	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1123	if (nest == NULL)
1124		goto nla_put_failure;
1125
1126	switch (q->loss_model) {
1127	case CLG_RANDOM:
1128		/* legacy loss model */
1129		nla_nest_cancel(skb, nest);
1130		return 0;	/* no data */
1131
1132	case CLG_4_STATES: {
1133		struct tc_netem_gimodel gi = {
1134			.p13 = q->clg.a1,
1135			.p31 = q->clg.a2,
1136			.p32 = q->clg.a3,
1137			.p14 = q->clg.a4,
1138			.p23 = q->clg.a5,
1139		};
1140
1141		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1142			goto nla_put_failure;
1143		break;
1144	}
1145	case CLG_GILB_ELL: {
1146		struct tc_netem_gemodel ge = {
1147			.p = q->clg.a1,
1148			.r = q->clg.a2,
1149			.h = q->clg.a3,
1150			.k1 = q->clg.a4,
1151		};
1152
1153		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1154			goto nla_put_failure;
1155		break;
1156	}
1157	}
1158
1159	nla_nest_end(skb, nest);
1160	return 0;
1161
1162nla_put_failure:
1163	nla_nest_cancel(skb, nest);
1164	return -1;
1165}
1166
1167static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1168{
1169	const struct netem_sched_data *q = qdisc_priv(sch);
1170	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1171	struct tc_netem_qopt qopt;
1172	struct tc_netem_corr cor;
1173	struct tc_netem_reorder reorder;
1174	struct tc_netem_corrupt corrupt;
1175	struct tc_netem_rate rate;
1176	struct tc_netem_slot slot;
1177
1178	qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1179			     UINT_MAX);
1180	qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1181			    UINT_MAX);
1182	qopt.limit = q->limit;
1183	qopt.loss = q->loss;
1184	qopt.gap = q->gap;
1185	qopt.duplicate = q->duplicate;
1186	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1187		goto nla_put_failure;
1188
1189	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1190		goto nla_put_failure;
1191
1192	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1193		goto nla_put_failure;
1194
1195	cor.delay_corr = q->delay_cor.rho;
1196	cor.loss_corr = q->loss_cor.rho;
1197	cor.dup_corr = q->dup_cor.rho;
1198	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1199		goto nla_put_failure;
1200
1201	reorder.probability = q->reorder;
1202	reorder.correlation = q->reorder_cor.rho;
1203	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1204		goto nla_put_failure;
1205
1206	corrupt.probability = q->corrupt;
1207	corrupt.correlation = q->corrupt_cor.rho;
1208	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1209		goto nla_put_failure;
1210
1211	if (q->rate >= (1ULL << 32)) {
1212		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1213				      TCA_NETEM_PAD))
1214			goto nla_put_failure;
1215		rate.rate = ~0U;
1216	} else {
1217		rate.rate = q->rate;
1218	}
1219	rate.packet_overhead = q->packet_overhead;
1220	rate.cell_size = q->cell_size;
1221	rate.cell_overhead = q->cell_overhead;
1222	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1223		goto nla_put_failure;
1224
1225	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1226		goto nla_put_failure;
1227
1228	if (dump_loss_model(q, skb) != 0)
1229		goto nla_put_failure;
1230
1231	if (q->slot_config.min_delay | q->slot_config.max_delay |
1232	    q->slot_config.dist_jitter) {
1233		slot = q->slot_config;
1234		if (slot.max_packets == INT_MAX)
1235			slot.max_packets = 0;
1236		if (slot.max_bytes == INT_MAX)
1237			slot.max_bytes = 0;
1238		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1239			goto nla_put_failure;
1240	}
1241
1242	if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1243			      TCA_NETEM_PAD))
1244		goto nla_put_failure;
1245
1246	return nla_nest_end(skb, nla);
1247
1248nla_put_failure:
1249	nlmsg_trim(skb, nla);
1250	return -1;
1251}
1252
1253static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1254			  struct sk_buff *skb, struct tcmsg *tcm)
1255{
1256	struct netem_sched_data *q = qdisc_priv(sch);
1257
1258	if (cl != 1 || !q->qdisc) 	/* only one class */
1259		return -ENOENT;
1260
1261	tcm->tcm_handle |= TC_H_MIN(1);
1262	tcm->tcm_info = q->qdisc->handle;
1263
1264	return 0;
1265}
1266
1267static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1268		     struct Qdisc **old, struct netlink_ext_ack *extack)
1269{
1270	struct netem_sched_data *q = qdisc_priv(sch);
1271
1272	*old = qdisc_replace(sch, new, &q->qdisc);
1273	return 0;
1274}
1275
1276static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1277{
1278	struct netem_sched_data *q = qdisc_priv(sch);
1279	return q->qdisc;
1280}
1281
1282static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1283{
1284	return 1;
1285}
1286
1287static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1288{
1289	if (!walker->stop) {
1290		if (!tc_qdisc_stats_dump(sch, 1, walker))
1291			return;
1292	}
1293}
1294
1295static const struct Qdisc_class_ops netem_class_ops = {
1296	.graft		=	netem_graft,
1297	.leaf		=	netem_leaf,
1298	.find		=	netem_find,
1299	.walk		=	netem_walk,
1300	.dump		=	netem_dump_class,
1301};
1302
1303static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1304	.id		=	"netem",
1305	.cl_ops		=	&netem_class_ops,
1306	.priv_size	=	sizeof(struct netem_sched_data),
1307	.enqueue	=	netem_enqueue,
1308	.dequeue	=	netem_dequeue,
1309	.peek		=	qdisc_peek_dequeued,
1310	.init		=	netem_init,
1311	.reset		=	netem_reset,
1312	.destroy	=	netem_destroy,
1313	.change		=	netem_change,
1314	.dump		=	netem_dump,
1315	.owner		=	THIS_MODULE,
1316};
1317MODULE_ALIAS_NET_SCH("netem");
1318
1319
1320static int __init netem_module_init(void)
1321{
1322	pr_info("netem: version " VERSION "\n");
1323	return register_qdisc(&netem_qdisc_ops);
1324}
1325static void __exit netem_module_exit(void)
1326{
1327	unregister_qdisc(&netem_qdisc_ops);
1328}
1329module_init(netem_module_init)
1330module_exit(netem_module_exit)
1331MODULE_LICENSE("GPL");
1332MODULE_DESCRIPTION("Network characteristics emulator qdisc");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * net/sched/sch_netem.c	Network emulator
   4 *
   5 *  		Many of the algorithms and ideas for this came from
   6 *		NIST Net which is not copyrighted.
   7 *
   8 * Authors:	Stephen Hemminger <shemminger@osdl.org>
   9 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/errno.h>
  18#include <linux/skbuff.h>
  19#include <linux/vmalloc.h>
 
  20#include <linux/rtnetlink.h>
  21#include <linux/reciprocal_div.h>
  22#include <linux/rbtree.h>
  23
  24#include <net/gso.h>
  25#include <net/netlink.h>
  26#include <net/pkt_sched.h>
  27#include <net/inet_ecn.h>
  28
  29#define VERSION "1.3"
  30
  31/*	Network Emulation Queuing algorithm.
  32	====================================
  33
  34	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  35		 Network Emulation Tool
  36		 [2] Luigi Rizzo, DummyNet for FreeBSD
  37
  38	 ----------------------------------------------------------------
  39
  40	 This started out as a simple way to delay outgoing packets to
  41	 test TCP but has grown to include most of the functionality
  42	 of a full blown network emulator like NISTnet. It can delay
  43	 packets and add random jitter (and correlation). The random
  44	 distribution can be loaded from a table as well to provide
  45	 normal, Pareto, or experimental curves. Packet loss,
  46	 duplication, and reordering can also be emulated.
  47
  48	 This qdisc does not do classification that can be handled in
  49	 layering other disciplines.  It does not need to do bandwidth
  50	 control either since that can be handled by using token
  51	 bucket or other rate control.
  52
  53     Correlated Loss Generator models
  54
  55	Added generation of correlated loss according to the
  56	"Gilbert-Elliot" model, a 4-state markov model.
  57
  58	References:
  59	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  60	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  61	and intuitive loss model for packet networks and its implementation
  62	in the Netem module in the Linux kernel", available in [1]
  63
  64	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  65		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  66*/
  67
  68struct disttable {
  69	u32  size;
  70	s16 table[] __counted_by(size);
  71};
  72
  73struct netem_sched_data {
  74	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
  75	struct rb_root t_root;
  76
  77	/* a linear queue; reduces rbtree rebalancing when jitter is low */
  78	struct sk_buff	*t_head;
  79	struct sk_buff	*t_tail;
  80
 
 
  81	/* optional qdisc for classful handling (NULL at netem init) */
  82	struct Qdisc	*qdisc;
  83
  84	struct qdisc_watchdog watchdog;
  85
  86	s64 latency;
  87	s64 jitter;
  88
  89	u32 loss;
  90	u32 ecn;
  91	u32 limit;
  92	u32 counter;
  93	u32 gap;
  94	u32 duplicate;
  95	u32 reorder;
  96	u32 corrupt;
  97	u64 rate;
  98	s32 packet_overhead;
  99	u32 cell_size;
 100	struct reciprocal_value cell_size_reciprocal;
 101	s32 cell_overhead;
 102
 103	struct crndstate {
 104		u32 last;
 105		u32 rho;
 106	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 107
 108	struct prng  {
 109		u64 seed;
 110		struct rnd_state prng_state;
 111	} prng;
 112
 113	struct disttable *delay_dist;
 114
 115	enum  {
 116		CLG_RANDOM,
 117		CLG_4_STATES,
 118		CLG_GILB_ELL,
 119	} loss_model;
 120
 121	enum {
 122		TX_IN_GAP_PERIOD = 1,
 123		TX_IN_BURST_PERIOD,
 124		LOST_IN_GAP_PERIOD,
 125		LOST_IN_BURST_PERIOD,
 126	} _4_state_model;
 127
 128	enum {
 129		GOOD_STATE = 1,
 130		BAD_STATE,
 131	} GE_state_model;
 132
 133	/* Correlated Loss Generation models */
 134	struct clgstate {
 135		/* state of the Markov chain */
 136		u8 state;
 137
 138		/* 4-states and Gilbert-Elliot models */
 139		u32 a1;	/* p13 for 4-states or p for GE */
 140		u32 a2;	/* p31 for 4-states or r for GE */
 141		u32 a3;	/* p32 for 4-states or h for GE */
 142		u32 a4;	/* p14 for 4-states or 1-k for GE */
 143		u32 a5; /* p23 used only in 4-states */
 144	} clg;
 145
 146	struct tc_netem_slot slot_config;
 147	struct slotstate {
 148		u64 slot_next;
 149		s32 packets_left;
 150		s32 bytes_left;
 151	} slot;
 152
 153	struct disttable *slot_dist;
 154};
 155
 156/* Time stamp put into socket buffer control block
 157 * Only valid when skbs are in our internal t(ime)fifo queue.
 158 *
 159 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
 160 * and skb->next & skb->prev are scratch space for a qdisc,
 161 * we save skb->tstamp value in skb->cb[] before destroying it.
 162 */
 163struct netem_skb_cb {
 164	u64	        time_to_send;
 165};
 166
 167static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 168{
 169	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
 170	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 171	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 172}
 173
 174/* init_crandom - initialize correlated random number generator
 175 * Use entropy source for initial seed.
 176 */
 177static void init_crandom(struct crndstate *state, unsigned long rho)
 178{
 179	state->rho = rho;
 180	state->last = get_random_u32();
 181}
 182
 183/* get_crandom - correlated random number generator
 184 * Next number depends on last value.
 185 * rho is scaled to avoid floating point.
 186 */
 187static u32 get_crandom(struct crndstate *state, struct prng *p)
 188{
 189	u64 value, rho;
 190	unsigned long answer;
 191	struct rnd_state *s = &p->prng_state;
 192
 193	if (!state || state->rho == 0)	/* no correlation */
 194		return prandom_u32_state(s);
 195
 196	value = prandom_u32_state(s);
 197	rho = (u64)state->rho + 1;
 198	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 199	state->last = answer;
 200	return answer;
 201}
 202
 203/* loss_4state - 4-state model loss generator
 204 * Generates losses according to the 4-state Markov chain adopted in
 205 * the GI (General and Intuitive) loss model.
 206 */
 207static bool loss_4state(struct netem_sched_data *q)
 208{
 209	struct clgstate *clg = &q->clg;
 210	u32 rnd = prandom_u32_state(&q->prng.prng_state);
 211
 212	/*
 213	 * Makes a comparison between rnd and the transition
 214	 * probabilities outgoing from the current state, then decides the
 215	 * next state and if the next packet has to be transmitted or lost.
 216	 * The four states correspond to:
 217	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
 218	 *   LOST_IN_GAP_PERIOD => isolated losses within a gap period
 219	 *   LOST_IN_BURST_PERIOD => lost packets within a burst period
 220	 *   TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
 221	 */
 222	switch (clg->state) {
 223	case TX_IN_GAP_PERIOD:
 224		if (rnd < clg->a4) {
 225			clg->state = LOST_IN_GAP_PERIOD;
 226			return true;
 227		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
 228			clg->state = LOST_IN_BURST_PERIOD;
 229			return true;
 230		} else if (clg->a1 + clg->a4 < rnd) {
 231			clg->state = TX_IN_GAP_PERIOD;
 232		}
 233
 234		break;
 235	case TX_IN_BURST_PERIOD:
 236		if (rnd < clg->a5) {
 237			clg->state = LOST_IN_BURST_PERIOD;
 238			return true;
 239		} else {
 240			clg->state = TX_IN_BURST_PERIOD;
 241		}
 242
 243		break;
 244	case LOST_IN_BURST_PERIOD:
 245		if (rnd < clg->a3)
 246			clg->state = TX_IN_BURST_PERIOD;
 247		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 248			clg->state = TX_IN_GAP_PERIOD;
 249		} else if (clg->a2 + clg->a3 < rnd) {
 250			clg->state = LOST_IN_BURST_PERIOD;
 251			return true;
 252		}
 253		break;
 254	case LOST_IN_GAP_PERIOD:
 255		clg->state = TX_IN_GAP_PERIOD;
 256		break;
 257	}
 258
 259	return false;
 260}
 261
 262/* loss_gilb_ell - Gilbert-Elliot model loss generator
 263 * Generates losses according to the Gilbert-Elliot loss model or
 264 * its special cases  (Gilbert or Simple Gilbert)
 265 *
 266 * Makes a comparison between random number and the transition
 267 * probabilities outgoing from the current state, then decides the
 268 * next state. A second random number is extracted and the comparison
 269 * with the loss probability of the current state decides if the next
 270 * packet will be transmitted or lost.
 271 */
 272static bool loss_gilb_ell(struct netem_sched_data *q)
 273{
 274	struct clgstate *clg = &q->clg;
 275	struct rnd_state *s = &q->prng.prng_state;
 276
 277	switch (clg->state) {
 278	case GOOD_STATE:
 279		if (prandom_u32_state(s) < clg->a1)
 280			clg->state = BAD_STATE;
 281		if (prandom_u32_state(s) < clg->a4)
 282			return true;
 283		break;
 284	case BAD_STATE:
 285		if (prandom_u32_state(s) < clg->a2)
 286			clg->state = GOOD_STATE;
 287		if (prandom_u32_state(s) > clg->a3)
 288			return true;
 289	}
 290
 291	return false;
 292}
 293
 294static bool loss_event(struct netem_sched_data *q)
 295{
 296	switch (q->loss_model) {
 297	case CLG_RANDOM:
 298		/* Random packet drop 0 => none, ~0 => all */
 299		return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
 300
 301	case CLG_4_STATES:
 302		/* 4state loss model algorithm (used also for GI model)
 303		* Extracts a value from the markov 4 state loss generator,
 304		* if it is 1 drops a packet and if needed writes the event in
 305		* the kernel logs
 306		*/
 307		return loss_4state(q);
 308
 309	case CLG_GILB_ELL:
 310		/* Gilbert-Elliot loss model algorithm
 311		* Extracts a value from the Gilbert-Elliot loss generator,
 312		* if it is 1 drops a packet and if needed writes the event in
 313		* the kernel logs
 314		*/
 315		return loss_gilb_ell(q);
 316	}
 317
 318	return false;	/* not reached */
 319}
 320
 321
 322/* tabledist - return a pseudo-randomly distributed value with mean mu and
 323 * std deviation sigma.  Uses table lookup to approximate the desired
 324 * distribution, and a uniformly-distributed pseudo-random source.
 325 */
 326static s64 tabledist(s64 mu, s32 sigma,
 327		     struct crndstate *state,
 328		     struct prng *prng,
 329		     const struct disttable *dist)
 330{
 331	s64 x;
 332	long t;
 333	u32 rnd;
 334
 335	if (sigma == 0)
 336		return mu;
 337
 338	rnd = get_crandom(state, prng);
 339
 340	/* default uniform distribution */
 341	if (dist == NULL)
 342		return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
 343
 344	t = dist->table[rnd % dist->size];
 345	x = (sigma % NETEM_DIST_SCALE) * t;
 346	if (x >= 0)
 347		x += NETEM_DIST_SCALE/2;
 348	else
 349		x -= NETEM_DIST_SCALE/2;
 350
 351	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 352}
 353
 354static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
 355{
 356	len += q->packet_overhead;
 357
 358	if (q->cell_size) {
 359		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 360
 361		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 362			cells++;
 363		len = cells * (q->cell_size + q->cell_overhead);
 364	}
 365
 366	return div64_u64(len * NSEC_PER_SEC, q->rate);
 367}
 368
 369static void tfifo_reset(struct Qdisc *sch)
 370{
 371	struct netem_sched_data *q = qdisc_priv(sch);
 372	struct rb_node *p = rb_first(&q->t_root);
 373
 374	while (p) {
 375		struct sk_buff *skb = rb_to_skb(p);
 376
 377		p = rb_next(p);
 378		rb_erase(&skb->rbnode, &q->t_root);
 379		rtnl_kfree_skbs(skb, skb);
 380	}
 381
 382	rtnl_kfree_skbs(q->t_head, q->t_tail);
 383	q->t_head = NULL;
 384	q->t_tail = NULL;
 
 385}
 386
 387static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 388{
 389	struct netem_sched_data *q = qdisc_priv(sch);
 390	u64 tnext = netem_skb_cb(nskb)->time_to_send;
 391
 392	if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
 393		if (q->t_tail)
 394			q->t_tail->next = nskb;
 395		else
 396			q->t_head = nskb;
 397		q->t_tail = nskb;
 398	} else {
 399		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
 400
 401		while (*p) {
 402			struct sk_buff *skb;
 403
 404			parent = *p;
 405			skb = rb_to_skb(parent);
 406			if (tnext >= netem_skb_cb(skb)->time_to_send)
 407				p = &parent->rb_right;
 408			else
 409				p = &parent->rb_left;
 410		}
 411		rb_link_node(&nskb->rbnode, parent, p);
 412		rb_insert_color(&nskb->rbnode, &q->t_root);
 413	}
 
 414	sch->q.qlen++;
 415}
 416
 417/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
 418 * when we statistically choose to corrupt one, we instead segment it, returning
 419 * the first packet to be corrupted, and re-enqueue the remaining frames
 420 */
 421static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
 422				     struct sk_buff **to_free)
 423{
 424	struct sk_buff *segs;
 425	netdev_features_t features = netif_skb_features(skb);
 426
 427	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 428
 429	if (IS_ERR_OR_NULL(segs)) {
 430		qdisc_drop(skb, sch, to_free);
 431		return NULL;
 432	}
 433	consume_skb(skb);
 434	return segs;
 435}
 436
 437/*
 438 * Insert one skb into qdisc.
 439 * Note: parent depends on return value to account for queue length.
 440 * 	NET_XMIT_DROP: queue length didn't change.
 441 *      NET_XMIT_SUCCESS: one skb was queued.
 442 */
 443static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 444			 struct sk_buff **to_free)
 445{
 446	struct netem_sched_data *q = qdisc_priv(sch);
 447	/* We don't fill cb now as skb_unshare() may invalidate it */
 448	struct netem_skb_cb *cb;
 449	struct sk_buff *skb2;
 450	struct sk_buff *segs = NULL;
 451	unsigned int prev_len = qdisc_pkt_len(skb);
 452	int count = 1;
 453	int rc = NET_XMIT_SUCCESS;
 454	int rc_drop = NET_XMIT_DROP;
 455
 456	/* Do not fool qdisc_drop_all() */
 457	skb->prev = NULL;
 458
 459	/* Random duplication */
 460	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
 461		++count;
 462
 463	/* Drop packet? */
 464	if (loss_event(q)) {
 465		if (q->ecn && INET_ECN_set_ce(skb))
 466			qdisc_qstats_drop(sch); /* mark packet */
 467		else
 468			--count;
 469	}
 470	if (count == 0) {
 471		qdisc_qstats_drop(sch);
 472		__qdisc_drop(skb, to_free);
 473		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 474	}
 475
 476	/* If a delay is expected, orphan the skb. (orphaning usually takes
 477	 * place at TX completion time, so _before_ the link transit delay)
 478	 */
 479	if (q->latency || q->jitter || q->rate)
 480		skb_orphan_partial(skb);
 481
 482	/*
 483	 * If we need to duplicate packet, then re-insert at top of the
 484	 * qdisc tree, since parent queuer expects that only one
 485	 * skb will be queued.
 486	 */
 487	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 488		struct Qdisc *rootq = qdisc_root_bh(sch);
 489		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 490
 491		q->duplicate = 0;
 492		rootq->enqueue(skb2, rootq, to_free);
 493		q->duplicate = dupsave;
 494		rc_drop = NET_XMIT_SUCCESS;
 495	}
 496
 497	/*
 498	 * Randomized packet corruption.
 499	 * Make copy if needed since we are modifying
 500	 * If packet is going to be hardware checksummed, then
 501	 * do it now in software before we mangle it.
 502	 */
 503	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
 504		if (skb_is_gso(skb)) {
 505			skb = netem_segment(skb, sch, to_free);
 506			if (!skb)
 507				return rc_drop;
 
 508			segs = skb->next;
 509			skb_mark_not_on_list(skb);
 510			qdisc_skb_cb(skb)->pkt_len = skb->len;
 511		}
 512
 513		skb = skb_unshare(skb, GFP_ATOMIC);
 514		if (unlikely(!skb)) {
 515			qdisc_qstats_drop(sch);
 516			goto finish_segs;
 517		}
 518		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 519		    skb_checksum_help(skb)) {
 520			qdisc_drop(skb, sch, to_free);
 521			skb = NULL;
 522			goto finish_segs;
 523		}
 524
 525		skb->data[get_random_u32_below(skb_headlen(skb))] ^=
 526			1<<get_random_u32_below(8);
 527	}
 528
 529	if (unlikely(sch->q.qlen >= sch->limit)) {
 530		/* re-link segs, so that qdisc_drop_all() frees them all */
 531		skb->next = segs;
 532		qdisc_drop_all(skb, sch, to_free);
 533		return rc_drop;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 534	}
 535
 536	qdisc_qstats_backlog_inc(sch, skb);
 537
 538	cb = netem_skb_cb(skb);
 539	if (q->gap == 0 ||		/* not doing reordering */
 540	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 541	    q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
 542		u64 now;
 543		s64 delay;
 544
 545		delay = tabledist(q->latency, q->jitter,
 546				  &q->delay_cor, &q->prng, q->delay_dist);
 547
 548		now = ktime_get_ns();
 549
 550		if (q->rate) {
 551			struct netem_skb_cb *last = NULL;
 552
 553			if (sch->q.tail)
 554				last = netem_skb_cb(sch->q.tail);
 555			if (q->t_root.rb_node) {
 556				struct sk_buff *t_skb;
 557				struct netem_skb_cb *t_last;
 558
 559				t_skb = skb_rb_last(&q->t_root);
 560				t_last = netem_skb_cb(t_skb);
 561				if (!last ||
 562				    t_last->time_to_send > last->time_to_send)
 563					last = t_last;
 564			}
 565			if (q->t_tail) {
 566				struct netem_skb_cb *t_last =
 567					netem_skb_cb(q->t_tail);
 568
 569				if (!last ||
 570				    t_last->time_to_send > last->time_to_send)
 571					last = t_last;
 572			}
 573
 574			if (last) {
 575				/*
 576				 * Last packet in queue is reference point (now),
 577				 * calculate this time bonus and subtract
 578				 * from delay.
 579				 */
 580				delay -= last->time_to_send - now;
 581				delay = max_t(s64, 0, delay);
 582				now = last->time_to_send;
 583			}
 584
 585			delay += packet_time_ns(qdisc_pkt_len(skb), q);
 586		}
 587
 588		cb->time_to_send = now + delay;
 589		++q->counter;
 590		tfifo_enqueue(skb, sch);
 591	} else {
 592		/*
 593		 * Do re-ordering by putting one out of N packets at the front
 594		 * of the queue.
 595		 */
 596		cb->time_to_send = ktime_get_ns();
 597		q->counter = 0;
 598
 599		__qdisc_enqueue_head(skb, &sch->q);
 600		sch->qstats.requeues++;
 601	}
 602
 603finish_segs:
 
 
 
 604	if (segs) {
 605		unsigned int len, last_len;
 606		int nb;
 607
 608		len = skb ? skb->len : 0;
 609		nb = skb ? 1 : 0;
 610
 611		while (segs) {
 612			skb2 = segs->next;
 613			skb_mark_not_on_list(segs);
 614			qdisc_skb_cb(segs)->pkt_len = segs->len;
 615			last_len = segs->len;
 616			rc = qdisc_enqueue(segs, sch, to_free);
 617			if (rc != NET_XMIT_SUCCESS) {
 618				if (net_xmit_drop_count(rc))
 619					qdisc_qstats_drop(sch);
 620			} else {
 621				nb++;
 622				len += last_len;
 623			}
 624			segs = skb2;
 625		}
 626		/* Parent qdiscs accounted for 1 skb of size @prev_len */
 627		qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
 628	} else if (!skb) {
 629		return NET_XMIT_DROP;
 630	}
 631	return NET_XMIT_SUCCESS;
 632}
 633
 634/* Delay the next round with a new future slot with a
 635 * correct number of bytes and packets.
 636 */
 637
 638static void get_slot_next(struct netem_sched_data *q, u64 now)
 639{
 640	s64 next_delay;
 641
 642	if (!q->slot_dist)
 643		next_delay = q->slot_config.min_delay +
 644				(get_random_u32() *
 645				 (q->slot_config.max_delay -
 646				  q->slot_config.min_delay) >> 32);
 647	else
 648		next_delay = tabledist(q->slot_config.dist_delay,
 649				       (s32)(q->slot_config.dist_jitter),
 650				       NULL, &q->prng, q->slot_dist);
 651
 652	q->slot.slot_next = now + next_delay;
 653	q->slot.packets_left = q->slot_config.max_packets;
 654	q->slot.bytes_left = q->slot_config.max_bytes;
 655}
 656
 657static struct sk_buff *netem_peek(struct netem_sched_data *q)
 658{
 659	struct sk_buff *skb = skb_rb_first(&q->t_root);
 660	u64 t1, t2;
 661
 662	if (!skb)
 663		return q->t_head;
 664	if (!q->t_head)
 665		return skb;
 666
 667	t1 = netem_skb_cb(skb)->time_to_send;
 668	t2 = netem_skb_cb(q->t_head)->time_to_send;
 669	if (t1 < t2)
 670		return skb;
 671	return q->t_head;
 672}
 673
 674static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
 675{
 676	if (skb == q->t_head) {
 677		q->t_head = skb->next;
 678		if (!q->t_head)
 679			q->t_tail = NULL;
 680	} else {
 681		rb_erase(&skb->rbnode, &q->t_root);
 682	}
 683}
 684
 685static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 686{
 687	struct netem_sched_data *q = qdisc_priv(sch);
 688	struct sk_buff *skb;
 689
 690tfifo_dequeue:
 691	skb = __qdisc_dequeue_head(&sch->q);
 692	if (skb) {
 
 693		qdisc_qstats_backlog_dec(sch, skb);
 694deliver:
 695		qdisc_bstats_update(sch, skb);
 696		return skb;
 697	}
 698	skb = netem_peek(q);
 699	if (skb) {
 700		u64 time_to_send;
 701		u64 now = ktime_get_ns();
 702
 703		/* if more time remaining? */
 704		time_to_send = netem_skb_cb(skb)->time_to_send;
 705		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
 706			get_slot_next(q, now);
 707
 708		if (time_to_send <= now && q->slot.slot_next <= now) {
 709			netem_erase_head(q, skb);
 710			sch->q.qlen--;
 711			qdisc_qstats_backlog_dec(sch, skb);
 712			skb->next = NULL;
 713			skb->prev = NULL;
 714			/* skb->dev shares skb->rbnode area,
 715			 * we need to restore its value.
 716			 */
 717			skb->dev = qdisc_dev(sch);
 718
 719			if (q->slot.slot_next) {
 720				q->slot.packets_left--;
 721				q->slot.bytes_left -= qdisc_pkt_len(skb);
 722				if (q->slot.packets_left <= 0 ||
 723				    q->slot.bytes_left <= 0)
 724					get_slot_next(q, now);
 725			}
 726
 727			if (q->qdisc) {
 728				unsigned int pkt_len = qdisc_pkt_len(skb);
 729				struct sk_buff *to_free = NULL;
 730				int err;
 731
 732				err = qdisc_enqueue(skb, q->qdisc, &to_free);
 733				kfree_skb_list(to_free);
 734				if (err != NET_XMIT_SUCCESS &&
 735				    net_xmit_drop_count(err)) {
 736					qdisc_qstats_drop(sch);
 737					qdisc_tree_reduce_backlog(sch, 1,
 738								  pkt_len);
 
 739				}
 740				goto tfifo_dequeue;
 741			}
 
 742			goto deliver;
 743		}
 744
 745		if (q->qdisc) {
 746			skb = q->qdisc->ops->dequeue(q->qdisc);
 747			if (skb)
 
 748				goto deliver;
 
 749		}
 750
 751		qdisc_watchdog_schedule_ns(&q->watchdog,
 752					   max(time_to_send,
 753					       q->slot.slot_next));
 754	}
 755
 756	if (q->qdisc) {
 757		skb = q->qdisc->ops->dequeue(q->qdisc);
 758		if (skb)
 
 759			goto deliver;
 
 760	}
 761	return NULL;
 762}
 763
 764static void netem_reset(struct Qdisc *sch)
 765{
 766	struct netem_sched_data *q = qdisc_priv(sch);
 767
 768	qdisc_reset_queue(sch);
 769	tfifo_reset(sch);
 770	if (q->qdisc)
 771		qdisc_reset(q->qdisc);
 772	qdisc_watchdog_cancel(&q->watchdog);
 773}
 774
 775static void dist_free(struct disttable *d)
 776{
 777	kvfree(d);
 778}
 779
 780/*
 781 * Distribution data is a variable size payload containing
 782 * signed 16 bit values.
 783 */
 784
 785static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
 786{
 787	size_t n = nla_len(attr)/sizeof(__s16);
 788	const __s16 *data = nla_data(attr);
 789	struct disttable *d;
 790	int i;
 791
 792	if (!n || n > NETEM_DIST_MAX)
 793		return -EINVAL;
 794
 795	d = kvmalloc(struct_size(d, table, n), GFP_KERNEL);
 796	if (!d)
 797		return -ENOMEM;
 798
 799	d->size = n;
 800	for (i = 0; i < n; i++)
 801		d->table[i] = data[i];
 802
 803	*tbl = d;
 804	return 0;
 805}
 806
 807static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
 808{
 809	const struct tc_netem_slot *c = nla_data(attr);
 810
 811	q->slot_config = *c;
 812	if (q->slot_config.max_packets == 0)
 813		q->slot_config.max_packets = INT_MAX;
 814	if (q->slot_config.max_bytes == 0)
 815		q->slot_config.max_bytes = INT_MAX;
 816
 817	/* capping dist_jitter to the range acceptable by tabledist() */
 818	q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
 819
 820	q->slot.packets_left = q->slot_config.max_packets;
 821	q->slot.bytes_left = q->slot_config.max_bytes;
 822	if (q->slot_config.min_delay | q->slot_config.max_delay |
 823	    q->slot_config.dist_jitter)
 824		q->slot.slot_next = ktime_get_ns();
 825	else
 826		q->slot.slot_next = 0;
 827}
 828
 829static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 830{
 831	const struct tc_netem_corr *c = nla_data(attr);
 832
 833	init_crandom(&q->delay_cor, c->delay_corr);
 834	init_crandom(&q->loss_cor, c->loss_corr);
 835	init_crandom(&q->dup_cor, c->dup_corr);
 836}
 837
 838static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
 839{
 840	const struct tc_netem_reorder *r = nla_data(attr);
 841
 842	q->reorder = r->probability;
 843	init_crandom(&q->reorder_cor, r->correlation);
 844}
 845
 846static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
 847{
 848	const struct tc_netem_corrupt *r = nla_data(attr);
 849
 850	q->corrupt = r->probability;
 851	init_crandom(&q->corrupt_cor, r->correlation);
 852}
 853
 854static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
 855{
 856	const struct tc_netem_rate *r = nla_data(attr);
 857
 858	q->rate = r->rate;
 859	q->packet_overhead = r->packet_overhead;
 860	q->cell_size = r->cell_size;
 861	q->cell_overhead = r->cell_overhead;
 862	if (q->cell_size)
 863		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 864	else
 865		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
 866}
 867
 868static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
 869{
 870	const struct nlattr *la;
 871	int rem;
 872
 873	nla_for_each_nested(la, attr, rem) {
 874		u16 type = nla_type(la);
 875
 876		switch (type) {
 877		case NETEM_LOSS_GI: {
 878			const struct tc_netem_gimodel *gi = nla_data(la);
 879
 880			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 881				pr_info("netem: incorrect gi model size\n");
 882				return -EINVAL;
 883			}
 884
 885			q->loss_model = CLG_4_STATES;
 886
 887			q->clg.state = TX_IN_GAP_PERIOD;
 888			q->clg.a1 = gi->p13;
 889			q->clg.a2 = gi->p31;
 890			q->clg.a3 = gi->p32;
 891			q->clg.a4 = gi->p14;
 892			q->clg.a5 = gi->p23;
 893			break;
 894		}
 895
 896		case NETEM_LOSS_GE: {
 897			const struct tc_netem_gemodel *ge = nla_data(la);
 898
 899			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 900				pr_info("netem: incorrect ge model size\n");
 901				return -EINVAL;
 902			}
 903
 904			q->loss_model = CLG_GILB_ELL;
 905			q->clg.state = GOOD_STATE;
 906			q->clg.a1 = ge->p;
 907			q->clg.a2 = ge->r;
 908			q->clg.a3 = ge->h;
 909			q->clg.a4 = ge->k1;
 910			break;
 911		}
 912
 913		default:
 914			pr_info("netem: unknown loss type %u\n", type);
 915			return -EINVAL;
 916		}
 917	}
 918
 919	return 0;
 920}
 921
 922static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 923	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 924	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 925	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 926	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 927	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 928	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 929	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 930	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
 931	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
 932	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
 933	[TCA_NETEM_PRNG_SEED]	= { .type = NLA_U64 },
 934};
 935
 936static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 937		      const struct nla_policy *policy, int len)
 938{
 939	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 940
 941	if (nested_len < 0) {
 942		pr_info("netem: invalid attributes len %d\n", nested_len);
 943		return -EINVAL;
 944	}
 945
 946	if (nested_len >= nla_attr_size(0))
 947		return nla_parse_deprecated(tb, maxtype,
 948					    nla_data(nla) + NLA_ALIGN(len),
 949					    nested_len, policy, NULL);
 950
 951	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 952	return 0;
 953}
 954
 955/* Parse netlink message to set options */
 956static int netem_change(struct Qdisc *sch, struct nlattr *opt,
 957			struct netlink_ext_ack *extack)
 958{
 959	struct netem_sched_data *q = qdisc_priv(sch);
 960	struct nlattr *tb[TCA_NETEM_MAX + 1];
 961	struct disttable *delay_dist = NULL;
 962	struct disttable *slot_dist = NULL;
 963	struct tc_netem_qopt *qopt;
 964	struct clgstate old_clg;
 965	int old_loss_model = CLG_RANDOM;
 966	int ret;
 967
 968	qopt = nla_data(opt);
 969	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 970	if (ret < 0)
 971		return ret;
 972
 973	if (tb[TCA_NETEM_DELAY_DIST]) {
 974		ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
 975		if (ret)
 976			goto table_free;
 977	}
 978
 979	if (tb[TCA_NETEM_SLOT_DIST]) {
 980		ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
 981		if (ret)
 982			goto table_free;
 983	}
 984
 985	sch_tree_lock(sch);
 986	/* backup q->clg and q->loss_model */
 987	old_clg = q->clg;
 988	old_loss_model = q->loss_model;
 989
 990	if (tb[TCA_NETEM_LOSS]) {
 991		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
 992		if (ret) {
 993			q->loss_model = old_loss_model;
 994			q->clg = old_clg;
 995			goto unlock;
 996		}
 997	} else {
 998		q->loss_model = CLG_RANDOM;
 999	}
1000
1001	if (delay_dist)
1002		swap(q->delay_dist, delay_dist);
1003	if (slot_dist)
1004		swap(q->slot_dist, slot_dist);
1005	sch->limit = qopt->limit;
1006
1007	q->latency = PSCHED_TICKS2NS(qopt->latency);
1008	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1009	q->limit = qopt->limit;
1010	q->gap = qopt->gap;
1011	q->counter = 0;
1012	q->loss = qopt->loss;
1013	q->duplicate = qopt->duplicate;
1014
1015	/* for compatibility with earlier versions.
1016	 * if gap is set, need to assume 100% probability
1017	 */
1018	if (q->gap)
1019		q->reorder = ~0;
1020
1021	if (tb[TCA_NETEM_CORR])
1022		get_correlation(q, tb[TCA_NETEM_CORR]);
1023
1024	if (tb[TCA_NETEM_REORDER])
1025		get_reorder(q, tb[TCA_NETEM_REORDER]);
1026
1027	if (tb[TCA_NETEM_CORRUPT])
1028		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1029
1030	if (tb[TCA_NETEM_RATE])
1031		get_rate(q, tb[TCA_NETEM_RATE]);
1032
1033	if (tb[TCA_NETEM_RATE64])
1034		q->rate = max_t(u64, q->rate,
1035				nla_get_u64(tb[TCA_NETEM_RATE64]));
1036
1037	if (tb[TCA_NETEM_LATENCY64])
1038		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1039
1040	if (tb[TCA_NETEM_JITTER64])
1041		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1042
1043	if (tb[TCA_NETEM_ECN])
1044		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1045
1046	if (tb[TCA_NETEM_SLOT])
1047		get_slot(q, tb[TCA_NETEM_SLOT]);
1048
1049	/* capping jitter to the range acceptable by tabledist() */
1050	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1051
1052	if (tb[TCA_NETEM_PRNG_SEED])
1053		q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1054	else
1055		q->prng.seed = get_random_u64();
1056	prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1057
1058unlock:
1059	sch_tree_unlock(sch);
1060
1061table_free:
1062	dist_free(delay_dist);
1063	dist_free(slot_dist);
1064	return ret;
1065}
1066
1067static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1068		      struct netlink_ext_ack *extack)
1069{
1070	struct netem_sched_data *q = qdisc_priv(sch);
1071	int ret;
1072
1073	qdisc_watchdog_init(&q->watchdog, sch);
1074
1075	if (!opt)
1076		return -EINVAL;
1077
1078	q->loss_model = CLG_RANDOM;
1079	ret = netem_change(sch, opt, extack);
1080	if (ret)
1081		pr_info("netem: change failed\n");
1082	return ret;
1083}
1084
1085static void netem_destroy(struct Qdisc *sch)
1086{
1087	struct netem_sched_data *q = qdisc_priv(sch);
1088
1089	qdisc_watchdog_cancel(&q->watchdog);
1090	if (q->qdisc)
1091		qdisc_put(q->qdisc);
1092	dist_free(q->delay_dist);
1093	dist_free(q->slot_dist);
1094}
1095
1096static int dump_loss_model(const struct netem_sched_data *q,
1097			   struct sk_buff *skb)
1098{
1099	struct nlattr *nest;
1100
1101	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1102	if (nest == NULL)
1103		goto nla_put_failure;
1104
1105	switch (q->loss_model) {
1106	case CLG_RANDOM:
1107		/* legacy loss model */
1108		nla_nest_cancel(skb, nest);
1109		return 0;	/* no data */
1110
1111	case CLG_4_STATES: {
1112		struct tc_netem_gimodel gi = {
1113			.p13 = q->clg.a1,
1114			.p31 = q->clg.a2,
1115			.p32 = q->clg.a3,
1116			.p14 = q->clg.a4,
1117			.p23 = q->clg.a5,
1118		};
1119
1120		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1121			goto nla_put_failure;
1122		break;
1123	}
1124	case CLG_GILB_ELL: {
1125		struct tc_netem_gemodel ge = {
1126			.p = q->clg.a1,
1127			.r = q->clg.a2,
1128			.h = q->clg.a3,
1129			.k1 = q->clg.a4,
1130		};
1131
1132		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1133			goto nla_put_failure;
1134		break;
1135	}
1136	}
1137
1138	nla_nest_end(skb, nest);
1139	return 0;
1140
1141nla_put_failure:
1142	nla_nest_cancel(skb, nest);
1143	return -1;
1144}
1145
1146static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1147{
1148	const struct netem_sched_data *q = qdisc_priv(sch);
1149	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1150	struct tc_netem_qopt qopt;
1151	struct tc_netem_corr cor;
1152	struct tc_netem_reorder reorder;
1153	struct tc_netem_corrupt corrupt;
1154	struct tc_netem_rate rate;
1155	struct tc_netem_slot slot;
1156
1157	qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1158			     UINT_MAX);
1159	qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1160			    UINT_MAX);
1161	qopt.limit = q->limit;
1162	qopt.loss = q->loss;
1163	qopt.gap = q->gap;
1164	qopt.duplicate = q->duplicate;
1165	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1166		goto nla_put_failure;
1167
1168	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1169		goto nla_put_failure;
1170
1171	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1172		goto nla_put_failure;
1173
1174	cor.delay_corr = q->delay_cor.rho;
1175	cor.loss_corr = q->loss_cor.rho;
1176	cor.dup_corr = q->dup_cor.rho;
1177	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1178		goto nla_put_failure;
1179
1180	reorder.probability = q->reorder;
1181	reorder.correlation = q->reorder_cor.rho;
1182	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1183		goto nla_put_failure;
1184
1185	corrupt.probability = q->corrupt;
1186	corrupt.correlation = q->corrupt_cor.rho;
1187	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1188		goto nla_put_failure;
1189
1190	if (q->rate >= (1ULL << 32)) {
1191		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1192				      TCA_NETEM_PAD))
1193			goto nla_put_failure;
1194		rate.rate = ~0U;
1195	} else {
1196		rate.rate = q->rate;
1197	}
1198	rate.packet_overhead = q->packet_overhead;
1199	rate.cell_size = q->cell_size;
1200	rate.cell_overhead = q->cell_overhead;
1201	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1202		goto nla_put_failure;
1203
1204	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1205		goto nla_put_failure;
1206
1207	if (dump_loss_model(q, skb) != 0)
1208		goto nla_put_failure;
1209
1210	if (q->slot_config.min_delay | q->slot_config.max_delay |
1211	    q->slot_config.dist_jitter) {
1212		slot = q->slot_config;
1213		if (slot.max_packets == INT_MAX)
1214			slot.max_packets = 0;
1215		if (slot.max_bytes == INT_MAX)
1216			slot.max_bytes = 0;
1217		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1218			goto nla_put_failure;
1219	}
1220
1221	if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1222			      TCA_NETEM_PAD))
1223		goto nla_put_failure;
1224
1225	return nla_nest_end(skb, nla);
1226
1227nla_put_failure:
1228	nlmsg_trim(skb, nla);
1229	return -1;
1230}
1231
1232static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1233			  struct sk_buff *skb, struct tcmsg *tcm)
1234{
1235	struct netem_sched_data *q = qdisc_priv(sch);
1236
1237	if (cl != 1 || !q->qdisc) 	/* only one class */
1238		return -ENOENT;
1239
1240	tcm->tcm_handle |= TC_H_MIN(1);
1241	tcm->tcm_info = q->qdisc->handle;
1242
1243	return 0;
1244}
1245
1246static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1247		     struct Qdisc **old, struct netlink_ext_ack *extack)
1248{
1249	struct netem_sched_data *q = qdisc_priv(sch);
1250
1251	*old = qdisc_replace(sch, new, &q->qdisc);
1252	return 0;
1253}
1254
1255static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1256{
1257	struct netem_sched_data *q = qdisc_priv(sch);
1258	return q->qdisc;
1259}
1260
1261static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1262{
1263	return 1;
1264}
1265
1266static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1267{
1268	if (!walker->stop) {
1269		if (!tc_qdisc_stats_dump(sch, 1, walker))
1270			return;
1271	}
1272}
1273
1274static const struct Qdisc_class_ops netem_class_ops = {
1275	.graft		=	netem_graft,
1276	.leaf		=	netem_leaf,
1277	.find		=	netem_find,
1278	.walk		=	netem_walk,
1279	.dump		=	netem_dump_class,
1280};
1281
1282static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1283	.id		=	"netem",
1284	.cl_ops		=	&netem_class_ops,
1285	.priv_size	=	sizeof(struct netem_sched_data),
1286	.enqueue	=	netem_enqueue,
1287	.dequeue	=	netem_dequeue,
1288	.peek		=	qdisc_peek_dequeued,
1289	.init		=	netem_init,
1290	.reset		=	netem_reset,
1291	.destroy	=	netem_destroy,
1292	.change		=	netem_change,
1293	.dump		=	netem_dump,
1294	.owner		=	THIS_MODULE,
1295};
1296MODULE_ALIAS_NET_SCH("netem");
1297
1298
1299static int __init netem_module_init(void)
1300{
1301	pr_info("netem: version " VERSION "\n");
1302	return register_qdisc(&netem_qdisc_ops);
1303}
1304static void __exit netem_module_exit(void)
1305{
1306	unregister_qdisc(&netem_qdisc_ops);
1307}
1308module_init(netem_module_init)
1309module_exit(netem_module_exit)
1310MODULE_LICENSE("GPL");
1311MODULE_DESCRIPTION("Network characteristics emulator qdisc");