Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * net/sched/sch_netem.c	Network emulator
   4 *
 
 
 
 
 
   5 *  		Many of the algorithms and ideas for this came from
   6 *		NIST Net which is not copyrighted.
   7 *
   8 * Authors:	Stephen Hemminger <shemminger@osdl.org>
   9 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/errno.h>
  18#include <linux/skbuff.h>
  19#include <linux/vmalloc.h>
  20#include <linux/rtnetlink.h>
  21#include <linux/reciprocal_div.h>
  22#include <linux/rbtree.h>
  23
  24#include <net/netlink.h>
  25#include <net/pkt_sched.h>
  26#include <net/inet_ecn.h>
  27
  28#define VERSION "1.3"
  29
  30/*	Network Emulation Queuing algorithm.
  31	====================================
  32
  33	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  34		 Network Emulation Tool
  35		 [2] Luigi Rizzo, DummyNet for FreeBSD
  36
  37	 ----------------------------------------------------------------
  38
  39	 This started out as a simple way to delay outgoing packets to
  40	 test TCP but has grown to include most of the functionality
  41	 of a full blown network emulator like NISTnet. It can delay
  42	 packets and add random jitter (and correlation). The random
  43	 distribution can be loaded from a table as well to provide
  44	 normal, Pareto, or experimental curves. Packet loss,
  45	 duplication, and reordering can also be emulated.
  46
  47	 This qdisc does not do classification that can be handled in
  48	 layering other disciplines.  It does not need to do bandwidth
  49	 control either since that can be handled by using token
  50	 bucket or other rate control.
  51
  52     Correlated Loss Generator models
  53
  54	Added generation of correlated loss according to the
  55	"Gilbert-Elliot" model, a 4-state markov model.
  56
  57	References:
  58	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  59	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  60	and intuitive loss model for packet networks and its implementation
  61	in the Netem module in the Linux kernel", available in [1]
  62
  63	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  64		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  65*/
  66
  67struct disttable {
  68	u32  size;
  69	s16 table[];
  70};
  71
  72struct netem_sched_data {
  73	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
  74	struct rb_root t_root;
  75
  76	/* a linear queue; reduces rbtree rebalancing when jitter is low */
  77	struct sk_buff	*t_head;
  78	struct sk_buff	*t_tail;
  79
  80	/* optional qdisc for classful handling (NULL at netem init) */
  81	struct Qdisc	*qdisc;
  82
  83	struct qdisc_watchdog watchdog;
  84
  85	s64 latency;
  86	s64 jitter;
  87
  88	u32 loss;
  89	u32 ecn;
  90	u32 limit;
  91	u32 counter;
  92	u32 gap;
  93	u32 duplicate;
  94	u32 reorder;
  95	u32 corrupt;
  96	u64 rate;
  97	s32 packet_overhead;
  98	u32 cell_size;
  99	struct reciprocal_value cell_size_reciprocal;
 100	s32 cell_overhead;
 101
 102	struct crndstate {
 103		u32 last;
 104		u32 rho;
 105	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 106
 107	struct disttable *delay_dist;
 
 
 
 108
 109	enum  {
 110		CLG_RANDOM,
 111		CLG_4_STATES,
 112		CLG_GILB_ELL,
 113	} loss_model;
 114
 115	enum {
 116		TX_IN_GAP_PERIOD = 1,
 117		TX_IN_BURST_PERIOD,
 118		LOST_IN_GAP_PERIOD,
 119		LOST_IN_BURST_PERIOD,
 120	} _4_state_model;
 121
 122	enum {
 123		GOOD_STATE = 1,
 124		BAD_STATE,
 125	} GE_state_model;
 126
 127	/* Correlated Loss Generation models */
 128	struct clgstate {
 129		/* state of the Markov chain */
 130		u8 state;
 131
 132		/* 4-states and Gilbert-Elliot models */
 133		u32 a1;	/* p13 for 4-states or p for GE */
 134		u32 a2;	/* p31 for 4-states or r for GE */
 135		u32 a3;	/* p32 for 4-states or h for GE */
 136		u32 a4;	/* p14 for 4-states or 1-k for GE */
 137		u32 a5; /* p23 used only in 4-states */
 138	} clg;
 139
 140	struct tc_netem_slot slot_config;
 141	struct slotstate {
 142		u64 slot_next;
 143		s32 packets_left;
 144		s32 bytes_left;
 145	} slot;
 146
 147	struct disttable *slot_dist;
 148};
 149
 150/* Time stamp put into socket buffer control block
 151 * Only valid when skbs are in our internal t(ime)fifo queue.
 152 *
 153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
 154 * and skb->next & skb->prev are scratch space for a qdisc,
 155 * we save skb->tstamp value in skb->cb[] before destroying it.
 156 */
 157struct netem_skb_cb {
 158	u64	        time_to_send;
 
 159};
 160
 
 
 
 
 
 
 161static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 162{
 163	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
 164	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 165	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 166}
 167
 168/* init_crandom - initialize correlated random number generator
 169 * Use entropy source for initial seed.
 170 */
 171static void init_crandom(struct crndstate *state, unsigned long rho)
 172{
 173	state->rho = rho;
 174	state->last = get_random_u32();
 175}
 176
 177/* get_crandom - correlated random number generator
 178 * Next number depends on last value.
 179 * rho is scaled to avoid floating point.
 180 */
 181static u32 get_crandom(struct crndstate *state)
 182{
 183	u64 value, rho;
 184	unsigned long answer;
 185
 186	if (!state || state->rho == 0)	/* no correlation */
 187		return get_random_u32();
 188
 189	value = get_random_u32();
 190	rho = (u64)state->rho + 1;
 191	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 192	state->last = answer;
 193	return answer;
 194}
 195
 196/* loss_4state - 4-state model loss generator
 197 * Generates losses according to the 4-state Markov chain adopted in
 198 * the GI (General and Intuitive) loss model.
 199 */
 200static bool loss_4state(struct netem_sched_data *q)
 201{
 202	struct clgstate *clg = &q->clg;
 203	u32 rnd = get_random_u32();
 204
 205	/*
 206	 * Makes a comparison between rnd and the transition
 207	 * probabilities outgoing from the current state, then decides the
 208	 * next state and if the next packet has to be transmitted or lost.
 209	 * The four states correspond to:
 210	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
 211	 *   LOST_IN_GAP_PERIOD => isolated losses within a gap period
 212	 *   LOST_IN_BURST_PERIOD => lost packets within a burst period
 213	 *   TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
 214	 */
 215	switch (clg->state) {
 216	case TX_IN_GAP_PERIOD:
 217		if (rnd < clg->a4) {
 218			clg->state = LOST_IN_GAP_PERIOD;
 219			return true;
 220		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
 221			clg->state = LOST_IN_BURST_PERIOD;
 222			return true;
 223		} else if (clg->a1 + clg->a4 < rnd) {
 224			clg->state = TX_IN_GAP_PERIOD;
 225		}
 226
 227		break;
 228	case TX_IN_BURST_PERIOD:
 229		if (rnd < clg->a5) {
 230			clg->state = LOST_IN_BURST_PERIOD;
 231			return true;
 232		} else {
 233			clg->state = TX_IN_BURST_PERIOD;
 234		}
 235
 236		break;
 237	case LOST_IN_BURST_PERIOD:
 238		if (rnd < clg->a3)
 239			clg->state = TX_IN_BURST_PERIOD;
 240		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 241			clg->state = TX_IN_GAP_PERIOD;
 242		} else if (clg->a2 + clg->a3 < rnd) {
 243			clg->state = LOST_IN_BURST_PERIOD;
 244			return true;
 245		}
 246		break;
 247	case LOST_IN_GAP_PERIOD:
 248		clg->state = TX_IN_GAP_PERIOD;
 249		break;
 250	}
 251
 252	return false;
 253}
 254
 255/* loss_gilb_ell - Gilbert-Elliot model loss generator
 256 * Generates losses according to the Gilbert-Elliot loss model or
 257 * its special cases  (Gilbert or Simple Gilbert)
 258 *
 259 * Makes a comparison between random number and the transition
 260 * probabilities outgoing from the current state, then decides the
 261 * next state. A second random number is extracted and the comparison
 262 * with the loss probability of the current state decides if the next
 263 * packet will be transmitted or lost.
 264 */
 265static bool loss_gilb_ell(struct netem_sched_data *q)
 266{
 267	struct clgstate *clg = &q->clg;
 268
 269	switch (clg->state) {
 270	case GOOD_STATE:
 271		if (get_random_u32() < clg->a1)
 272			clg->state = BAD_STATE;
 273		if (get_random_u32() < clg->a4)
 274			return true;
 275		break;
 276	case BAD_STATE:
 277		if (get_random_u32() < clg->a2)
 278			clg->state = GOOD_STATE;
 279		if (get_random_u32() > clg->a3)
 280			return true;
 281	}
 282
 283	return false;
 284}
 285
 286static bool loss_event(struct netem_sched_data *q)
 287{
 288	switch (q->loss_model) {
 289	case CLG_RANDOM:
 290		/* Random packet drop 0 => none, ~0 => all */
 291		return q->loss && q->loss >= get_crandom(&q->loss_cor);
 292
 293	case CLG_4_STATES:
 294		/* 4state loss model algorithm (used also for GI model)
 295		* Extracts a value from the markov 4 state loss generator,
 296		* if it is 1 drops a packet and if needed writes the event in
 297		* the kernel logs
 298		*/
 299		return loss_4state(q);
 300
 301	case CLG_GILB_ELL:
 302		/* Gilbert-Elliot loss model algorithm
 303		* Extracts a value from the Gilbert-Elliot loss generator,
 304		* if it is 1 drops a packet and if needed writes the event in
 305		* the kernel logs
 306		*/
 307		return loss_gilb_ell(q);
 308	}
 309
 310	return false;	/* not reached */
 311}
 312
 313
 314/* tabledist - return a pseudo-randomly distributed value with mean mu and
 315 * std deviation sigma.  Uses table lookup to approximate the desired
 316 * distribution, and a uniformly-distributed pseudo-random source.
 317 */
 318static s64 tabledist(s64 mu, s32 sigma,
 319		     struct crndstate *state,
 320		     const struct disttable *dist)
 321{
 322	s64 x;
 323	long t;
 324	u32 rnd;
 325
 326	if (sigma == 0)
 327		return mu;
 328
 329	rnd = get_crandom(state);
 330
 331	/* default uniform distribution */
 332	if (dist == NULL)
 333		return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
 334
 335	t = dist->table[rnd % dist->size];
 336	x = (sigma % NETEM_DIST_SCALE) * t;
 337	if (x >= 0)
 338		x += NETEM_DIST_SCALE/2;
 339	else
 340		x -= NETEM_DIST_SCALE/2;
 341
 342	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 343}
 344
 345static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
 346{
 
 
 347	len += q->packet_overhead;
 348
 349	if (q->cell_size) {
 350		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 351
 352		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 353			cells++;
 354		len = cells * (q->cell_size + q->cell_overhead);
 355	}
 356
 357	return div64_u64(len * NSEC_PER_SEC, q->rate);
 
 
 
 358}
 359
 360static void tfifo_reset(struct Qdisc *sch)
 361{
 362	struct netem_sched_data *q = qdisc_priv(sch);
 363	struct rb_node *p = rb_first(&q->t_root);
 364
 365	while (p) {
 366		struct sk_buff *skb = rb_to_skb(p);
 367
 368		p = rb_next(p);
 369		rb_erase(&skb->rbnode, &q->t_root);
 370		rtnl_kfree_skbs(skb, skb);
 
 371	}
 372
 373	rtnl_kfree_skbs(q->t_head, q->t_tail);
 374	q->t_head = NULL;
 375	q->t_tail = NULL;
 376}
 377
 378static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 379{
 380	struct netem_sched_data *q = qdisc_priv(sch);
 381	u64 tnext = netem_skb_cb(nskb)->time_to_send;
 
 382
 383	if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
 384		if (q->t_tail)
 385			q->t_tail->next = nskb;
 386		else
 387			q->t_head = nskb;
 388		q->t_tail = nskb;
 389	} else {
 390		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
 391
 392		while (*p) {
 393			struct sk_buff *skb;
 394
 395			parent = *p;
 396			skb = rb_to_skb(parent);
 397			if (tnext >= netem_skb_cb(skb)->time_to_send)
 398				p = &parent->rb_right;
 399			else
 400				p = &parent->rb_left;
 401		}
 402		rb_link_node(&nskb->rbnode, parent, p);
 403		rb_insert_color(&nskb->rbnode, &q->t_root);
 404	}
 
 
 405	sch->q.qlen++;
 406}
 407
 408/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
 409 * when we statistically choose to corrupt one, we instead segment it, returning
 410 * the first packet to be corrupted, and re-enqueue the remaining frames
 411 */
 412static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
 413				     struct sk_buff **to_free)
 414{
 415	struct sk_buff *segs;
 416	netdev_features_t features = netif_skb_features(skb);
 417
 418	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 419
 420	if (IS_ERR_OR_NULL(segs)) {
 421		qdisc_drop(skb, sch, to_free);
 422		return NULL;
 423	}
 424	consume_skb(skb);
 425	return segs;
 426}
 427
 428/*
 429 * Insert one skb into qdisc.
 430 * Note: parent depends on return value to account for queue length.
 431 * 	NET_XMIT_DROP: queue length didn't change.
 432 *      NET_XMIT_SUCCESS: one skb was queued.
 433 */
 434static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 435			 struct sk_buff **to_free)
 436{
 437	struct netem_sched_data *q = qdisc_priv(sch);
 438	/* We don't fill cb now as skb_unshare() may invalidate it */
 439	struct netem_skb_cb *cb;
 440	struct sk_buff *skb2;
 441	struct sk_buff *segs = NULL;
 442	unsigned int prev_len = qdisc_pkt_len(skb);
 
 443	int count = 1;
 444	int rc = NET_XMIT_SUCCESS;
 445	int rc_drop = NET_XMIT_DROP;
 446
 447	/* Do not fool qdisc_drop_all() */
 448	skb->prev = NULL;
 449
 450	/* Random duplication */
 451	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
 452		++count;
 453
 454	/* Drop packet? */
 455	if (loss_event(q)) {
 456		if (q->ecn && INET_ECN_set_ce(skb))
 457			qdisc_qstats_drop(sch); /* mark packet */
 458		else
 459			--count;
 460	}
 461	if (count == 0) {
 462		qdisc_qstats_drop(sch);
 463		__qdisc_drop(skb, to_free);
 464		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 465	}
 466
 467	/* If a delay is expected, orphan the skb. (orphaning usually takes
 468	 * place at TX completion time, so _before_ the link transit delay)
 469	 */
 470	if (q->latency || q->jitter || q->rate)
 471		skb_orphan_partial(skb);
 472
 473	/*
 474	 * If we need to duplicate packet, then re-insert at top of the
 475	 * qdisc tree, since parent queuer expects that only one
 476	 * skb will be queued.
 477	 */
 478	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 479		struct Qdisc *rootq = qdisc_root_bh(sch);
 480		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 481
 482		q->duplicate = 0;
 483		rootq->enqueue(skb2, rootq, to_free);
 484		q->duplicate = dupsave;
 485		rc_drop = NET_XMIT_SUCCESS;
 486	}
 487
 488	/*
 489	 * Randomized packet corruption.
 490	 * Make copy if needed since we are modifying
 491	 * If packet is going to be hardware checksummed, then
 492	 * do it now in software before we mangle it.
 493	 */
 494	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
 495		if (skb_is_gso(skb)) {
 496			skb = netem_segment(skb, sch, to_free);
 497			if (!skb)
 498				return rc_drop;
 499			segs = skb->next;
 500			skb_mark_not_on_list(skb);
 501			qdisc_skb_cb(skb)->pkt_len = skb->len;
 502		}
 503
 504		skb = skb_unshare(skb, GFP_ATOMIC);
 505		if (unlikely(!skb)) {
 506			qdisc_qstats_drop(sch);
 507			goto finish_segs;
 508		}
 509		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 510		    skb_checksum_help(skb)) {
 511			qdisc_drop(skb, sch, to_free);
 512			skb = NULL;
 513			goto finish_segs;
 514		}
 515
 516		skb->data[get_random_u32_below(skb_headlen(skb))] ^=
 517			1<<get_random_u32_below(8);
 518	}
 519
 520	if (unlikely(sch->q.qlen >= sch->limit)) {
 521		/* re-link segs, so that qdisc_drop_all() frees them all */
 522		skb->next = segs;
 523		qdisc_drop_all(skb, sch, to_free);
 524		return rc_drop;
 525	}
 526
 527	qdisc_qstats_backlog_inc(sch, skb);
 528
 529	cb = netem_skb_cb(skb);
 530	if (q->gap == 0 ||		/* not doing reordering */
 531	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 532	    q->reorder < get_crandom(&q->reorder_cor)) {
 533		u64 now;
 534		s64 delay;
 535
 536		delay = tabledist(q->latency, q->jitter,
 537				  &q->delay_cor, q->delay_dist);
 538
 539		now = ktime_get_ns();
 540
 541		if (q->rate) {
 542			struct netem_skb_cb *last = NULL;
 543
 544			if (sch->q.tail)
 545				last = netem_skb_cb(sch->q.tail);
 546			if (q->t_root.rb_node) {
 547				struct sk_buff *t_skb;
 548				struct netem_skb_cb *t_last;
 549
 550				t_skb = skb_rb_last(&q->t_root);
 551				t_last = netem_skb_cb(t_skb);
 552				if (!last ||
 553				    t_last->time_to_send > last->time_to_send)
 554					last = t_last;
 555			}
 556			if (q->t_tail) {
 557				struct netem_skb_cb *t_last =
 558					netem_skb_cb(q->t_tail);
 559
 560				if (!last ||
 561				    t_last->time_to_send > last->time_to_send)
 562					last = t_last;
 563			}
 564
 
 
 
 
 565			if (last) {
 566				/*
 567				 * Last packet in queue is reference point (now),
 568				 * calculate this time bonus and subtract
 569				 * from delay.
 570				 */
 571				delay -= last->time_to_send - now;
 572				delay = max_t(s64, 0, delay);
 573				now = last->time_to_send;
 574			}
 575
 576			delay += packet_time_ns(qdisc_pkt_len(skb), q);
 577		}
 578
 579		cb->time_to_send = now + delay;
 
 580		++q->counter;
 581		tfifo_enqueue(skb, sch);
 582	} else {
 583		/*
 584		 * Do re-ordering by putting one out of N packets at the front
 585		 * of the queue.
 586		 */
 587		cb->time_to_send = ktime_get_ns();
 588		q->counter = 0;
 589
 590		__qdisc_enqueue_head(skb, &sch->q);
 591		sch->qstats.requeues++;
 592	}
 593
 594finish_segs:
 595	if (segs) {
 596		unsigned int len, last_len;
 597		int nb;
 598
 599		len = skb ? skb->len : 0;
 600		nb = skb ? 1 : 0;
 601
 602		while (segs) {
 603			skb2 = segs->next;
 604			skb_mark_not_on_list(segs);
 605			qdisc_skb_cb(segs)->pkt_len = segs->len;
 606			last_len = segs->len;
 607			rc = qdisc_enqueue(segs, sch, to_free);
 608			if (rc != NET_XMIT_SUCCESS) {
 609				if (net_xmit_drop_count(rc))
 610					qdisc_qstats_drop(sch);
 611			} else {
 612				nb++;
 613				len += last_len;
 614			}
 615			segs = skb2;
 616		}
 617		/* Parent qdiscs accounted for 1 skb of size @prev_len */
 618		qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
 619	} else if (!skb) {
 620		return NET_XMIT_DROP;
 621	}
 622	return NET_XMIT_SUCCESS;
 623}
 624
 625/* Delay the next round with a new future slot with a
 626 * correct number of bytes and packets.
 627 */
 628
 629static void get_slot_next(struct netem_sched_data *q, u64 now)
 630{
 631	s64 next_delay;
 632
 633	if (!q->slot_dist)
 634		next_delay = q->slot_config.min_delay +
 635				(get_random_u32() *
 636				 (q->slot_config.max_delay -
 637				  q->slot_config.min_delay) >> 32);
 638	else
 639		next_delay = tabledist(q->slot_config.dist_delay,
 640				       (s32)(q->slot_config.dist_jitter),
 641				       NULL, q->slot_dist);
 642
 643	q->slot.slot_next = now + next_delay;
 644	q->slot.packets_left = q->slot_config.max_packets;
 645	q->slot.bytes_left = q->slot_config.max_bytes;
 646}
 647
 648static struct sk_buff *netem_peek(struct netem_sched_data *q)
 649{
 650	struct sk_buff *skb = skb_rb_first(&q->t_root);
 651	u64 t1, t2;
 652
 653	if (!skb)
 654		return q->t_head;
 655	if (!q->t_head)
 656		return skb;
 657
 658	t1 = netem_skb_cb(skb)->time_to_send;
 659	t2 = netem_skb_cb(q->t_head)->time_to_send;
 660	if (t1 < t2)
 661		return skb;
 662	return q->t_head;
 663}
 664
 665static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
 666{
 667	if (skb == q->t_head) {
 668		q->t_head = skb->next;
 669		if (!q->t_head)
 670			q->t_tail = NULL;
 671	} else {
 672		rb_erase(&skb->rbnode, &q->t_root);
 673	}
 
 
 
 
 
 
 674}
 675
 676static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 677{
 678	struct netem_sched_data *q = qdisc_priv(sch);
 679	struct sk_buff *skb;
 
 
 
 
 680
 681tfifo_dequeue:
 682	skb = __qdisc_dequeue_head(&sch->q);
 683	if (skb) {
 684		qdisc_qstats_backlog_dec(sch, skb);
 685deliver:
 
 686		qdisc_bstats_update(sch, skb);
 687		return skb;
 688	}
 689	skb = netem_peek(q);
 690	if (skb) {
 691		u64 time_to_send;
 692		u64 now = ktime_get_ns();
 
 693
 694		/* if more time remaining? */
 695		time_to_send = netem_skb_cb(skb)->time_to_send;
 696		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
 697			get_slot_next(q, now);
 698
 699		if (time_to_send <= now && q->slot.slot_next <= now) {
 700			netem_erase_head(q, skb);
 701			sch->q.qlen--;
 702			qdisc_qstats_backlog_dec(sch, skb);
 703			skb->next = NULL;
 704			skb->prev = NULL;
 705			/* skb->dev shares skb->rbnode area,
 706			 * we need to restore its value.
 707			 */
 708			skb->dev = qdisc_dev(sch);
 709
 710			if (q->slot.slot_next) {
 711				q->slot.packets_left--;
 712				q->slot.bytes_left -= qdisc_pkt_len(skb);
 713				if (q->slot.packets_left <= 0 ||
 714				    q->slot.bytes_left <= 0)
 715					get_slot_next(q, now);
 716			}
 
 717
 718			if (q->qdisc) {
 719				unsigned int pkt_len = qdisc_pkt_len(skb);
 720				struct sk_buff *to_free = NULL;
 721				int err;
 722
 723				err = qdisc_enqueue(skb, q->qdisc, &to_free);
 724				kfree_skb_list(to_free);
 725				if (err != NET_XMIT_SUCCESS &&
 726				    net_xmit_drop_count(err)) {
 727					qdisc_qstats_drop(sch);
 728					qdisc_tree_reduce_backlog(sch, 1,
 729								  pkt_len);
 730				}
 731				goto tfifo_dequeue;
 732			}
 733			goto deliver;
 734		}
 735
 736		if (q->qdisc) {
 737			skb = q->qdisc->ops->dequeue(q->qdisc);
 738			if (skb)
 739				goto deliver;
 740		}
 741
 742		qdisc_watchdog_schedule_ns(&q->watchdog,
 743					   max(time_to_send,
 744					       q->slot.slot_next));
 745	}
 746
 747	if (q->qdisc) {
 748		skb = q->qdisc->ops->dequeue(q->qdisc);
 749		if (skb)
 750			goto deliver;
 751	}
 752	return NULL;
 753}
 754
 755static void netem_reset(struct Qdisc *sch)
 756{
 757	struct netem_sched_data *q = qdisc_priv(sch);
 758
 759	qdisc_reset_queue(sch);
 760	tfifo_reset(sch);
 761	if (q->qdisc)
 762		qdisc_reset(q->qdisc);
 763	qdisc_watchdog_cancel(&q->watchdog);
 764}
 765
 766static void dist_free(struct disttable *d)
 767{
 768	kvfree(d);
 769}
 770
 771/*
 772 * Distribution data is a variable size payload containing
 773 * signed 16 bit values.
 774 */
 775
 776static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
 777			  const struct nlattr *attr)
 778{
 
 779	size_t n = nla_len(attr)/sizeof(__s16);
 780	const __s16 *data = nla_data(attr);
 781	spinlock_t *root_lock;
 782	struct disttable *d;
 783	int i;
 
 784
 785	if (!n || n > NETEM_DIST_MAX)
 786		return -EINVAL;
 787
 788	d = kvmalloc(struct_size(d, table, n), GFP_KERNEL);
 
 
 
 789	if (!d)
 790		return -ENOMEM;
 791
 792	d->size = n;
 793	for (i = 0; i < n; i++)
 794		d->table[i] = data[i];
 795
 796	root_lock = qdisc_root_sleeping_lock(sch);
 797
 798	spin_lock_bh(root_lock);
 799	swap(*tbl, d);
 800	spin_unlock_bh(root_lock);
 801
 802	dist_free(d);
 803	return 0;
 804}
 805
 806static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
 807{
 808	const struct tc_netem_slot *c = nla_data(attr);
 809
 810	q->slot_config = *c;
 811	if (q->slot_config.max_packets == 0)
 812		q->slot_config.max_packets = INT_MAX;
 813	if (q->slot_config.max_bytes == 0)
 814		q->slot_config.max_bytes = INT_MAX;
 815
 816	/* capping dist_jitter to the range acceptable by tabledist() */
 817	q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
 818
 819	q->slot.packets_left = q->slot_config.max_packets;
 820	q->slot.bytes_left = q->slot_config.max_bytes;
 821	if (q->slot_config.min_delay | q->slot_config.max_delay |
 822	    q->slot_config.dist_jitter)
 823		q->slot.slot_next = ktime_get_ns();
 824	else
 825		q->slot.slot_next = 0;
 826}
 827
 828static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 829{
 830	const struct tc_netem_corr *c = nla_data(attr);
 831
 832	init_crandom(&q->delay_cor, c->delay_corr);
 833	init_crandom(&q->loss_cor, c->loss_corr);
 834	init_crandom(&q->dup_cor, c->dup_corr);
 835}
 836
 837static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
 838{
 839	const struct tc_netem_reorder *r = nla_data(attr);
 840
 841	q->reorder = r->probability;
 842	init_crandom(&q->reorder_cor, r->correlation);
 843}
 844
 845static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
 846{
 847	const struct tc_netem_corrupt *r = nla_data(attr);
 848
 849	q->corrupt = r->probability;
 850	init_crandom(&q->corrupt_cor, r->correlation);
 851}
 852
 853static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
 854{
 855	const struct tc_netem_rate *r = nla_data(attr);
 856
 857	q->rate = r->rate;
 858	q->packet_overhead = r->packet_overhead;
 859	q->cell_size = r->cell_size;
 860	q->cell_overhead = r->cell_overhead;
 861	if (q->cell_size)
 862		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 863	else
 864		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
 865}
 866
 867static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
 868{
 869	const struct nlattr *la;
 870	int rem;
 871
 872	nla_for_each_nested(la, attr, rem) {
 873		u16 type = nla_type(la);
 874
 875		switch (type) {
 876		case NETEM_LOSS_GI: {
 877			const struct tc_netem_gimodel *gi = nla_data(la);
 878
 879			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 880				pr_info("netem: incorrect gi model size\n");
 881				return -EINVAL;
 882			}
 883
 884			q->loss_model = CLG_4_STATES;
 885
 886			q->clg.state = TX_IN_GAP_PERIOD;
 887			q->clg.a1 = gi->p13;
 888			q->clg.a2 = gi->p31;
 889			q->clg.a3 = gi->p32;
 890			q->clg.a4 = gi->p14;
 891			q->clg.a5 = gi->p23;
 892			break;
 893		}
 894
 895		case NETEM_LOSS_GE: {
 896			const struct tc_netem_gemodel *ge = nla_data(la);
 897
 898			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 899				pr_info("netem: incorrect ge model size\n");
 900				return -EINVAL;
 901			}
 902
 903			q->loss_model = CLG_GILB_ELL;
 904			q->clg.state = GOOD_STATE;
 905			q->clg.a1 = ge->p;
 906			q->clg.a2 = ge->r;
 907			q->clg.a3 = ge->h;
 908			q->clg.a4 = ge->k1;
 909			break;
 910		}
 911
 912		default:
 913			pr_info("netem: unknown loss type %u\n", type);
 914			return -EINVAL;
 915		}
 916	}
 917
 918	return 0;
 919}
 920
 921static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 922	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 923	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 924	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 925	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 926	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 927	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 928	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 929	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
 930	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
 931	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
 932};
 933
 934static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 935		      const struct nla_policy *policy, int len)
 936{
 937	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 938
 939	if (nested_len < 0) {
 940		pr_info("netem: invalid attributes len %d\n", nested_len);
 941		return -EINVAL;
 942	}
 943
 944	if (nested_len >= nla_attr_size(0))
 945		return nla_parse_deprecated(tb, maxtype,
 946					    nla_data(nla) + NLA_ALIGN(len),
 947					    nested_len, policy, NULL);
 948
 949	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 950	return 0;
 951}
 952
 953/* Parse netlink message to set options */
 954static int netem_change(struct Qdisc *sch, struct nlattr *opt,
 955			struct netlink_ext_ack *extack)
 956{
 957	struct netem_sched_data *q = qdisc_priv(sch);
 958	struct nlattr *tb[TCA_NETEM_MAX + 1];
 959	struct tc_netem_qopt *qopt;
 960	struct clgstate old_clg;
 961	int old_loss_model = CLG_RANDOM;
 962	int ret;
 963
 
 
 
 964	qopt = nla_data(opt);
 965	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 966	if (ret < 0)
 967		return ret;
 968
 969	/* backup q->clg and q->loss_model */
 970	old_clg = q->clg;
 971	old_loss_model = q->loss_model;
 972
 973	if (tb[TCA_NETEM_LOSS]) {
 974		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
 975		if (ret) {
 976			q->loss_model = old_loss_model;
 977			return ret;
 978		}
 979	} else {
 980		q->loss_model = CLG_RANDOM;
 981	}
 982
 983	if (tb[TCA_NETEM_DELAY_DIST]) {
 984		ret = get_dist_table(sch, &q->delay_dist,
 985				     tb[TCA_NETEM_DELAY_DIST]);
 986		if (ret)
 987			goto get_table_failure;
 988	}
 989
 990	if (tb[TCA_NETEM_SLOT_DIST]) {
 991		ret = get_dist_table(sch, &q->slot_dist,
 992				     tb[TCA_NETEM_SLOT_DIST]);
 993		if (ret)
 994			goto get_table_failure;
 995	}
 996
 997	sch->limit = qopt->limit;
 998
 999	q->latency = PSCHED_TICKS2NS(qopt->latency);
1000	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1001	q->limit = qopt->limit;
1002	q->gap = qopt->gap;
1003	q->counter = 0;
1004	q->loss = qopt->loss;
1005	q->duplicate = qopt->duplicate;
1006
1007	/* for compatibility with earlier versions.
1008	 * if gap is set, need to assume 100% probability
1009	 */
1010	if (q->gap)
1011		q->reorder = ~0;
1012
1013	if (tb[TCA_NETEM_CORR])
1014		get_correlation(q, tb[TCA_NETEM_CORR]);
1015
1016	if (tb[TCA_NETEM_REORDER])
1017		get_reorder(q, tb[TCA_NETEM_REORDER]);
1018
1019	if (tb[TCA_NETEM_CORRUPT])
1020		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1021
1022	if (tb[TCA_NETEM_RATE])
1023		get_rate(q, tb[TCA_NETEM_RATE]);
1024
1025	if (tb[TCA_NETEM_RATE64])
1026		q->rate = max_t(u64, q->rate,
1027				nla_get_u64(tb[TCA_NETEM_RATE64]));
1028
1029	if (tb[TCA_NETEM_LATENCY64])
1030		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1031
1032	if (tb[TCA_NETEM_JITTER64])
1033		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1034
1035	if (tb[TCA_NETEM_ECN])
1036		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1037
1038	if (tb[TCA_NETEM_SLOT])
1039		get_slot(q, tb[TCA_NETEM_SLOT]);
1040
1041	/* capping jitter to the range acceptable by tabledist() */
1042	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1043
1044	return ret;
1045
1046get_table_failure:
1047	/* recover clg and loss_model, in case of
1048	 * q->clg and q->loss_model were modified
1049	 * in get_loss_clg()
1050	 */
1051	q->clg = old_clg;
1052	q->loss_model = old_loss_model;
1053	return ret;
1054}
1055
1056static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1057		      struct netlink_ext_ack *extack)
1058{
1059	struct netem_sched_data *q = qdisc_priv(sch);
1060	int ret;
1061
1062	qdisc_watchdog_init(&q->watchdog, sch);
1063
1064	if (!opt)
1065		return -EINVAL;
1066
 
 
1067	q->loss_model = CLG_RANDOM;
1068	ret = netem_change(sch, opt, extack);
1069	if (ret)
1070		pr_info("netem: change failed\n");
1071	return ret;
1072}
1073
1074static void netem_destroy(struct Qdisc *sch)
1075{
1076	struct netem_sched_data *q = qdisc_priv(sch);
1077
1078	qdisc_watchdog_cancel(&q->watchdog);
1079	if (q->qdisc)
1080		qdisc_put(q->qdisc);
1081	dist_free(q->delay_dist);
1082	dist_free(q->slot_dist);
1083}
1084
1085static int dump_loss_model(const struct netem_sched_data *q,
1086			   struct sk_buff *skb)
1087{
1088	struct nlattr *nest;
1089
1090	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1091	if (nest == NULL)
1092		goto nla_put_failure;
1093
1094	switch (q->loss_model) {
1095	case CLG_RANDOM:
1096		/* legacy loss model */
1097		nla_nest_cancel(skb, nest);
1098		return 0;	/* no data */
1099
1100	case CLG_4_STATES: {
1101		struct tc_netem_gimodel gi = {
1102			.p13 = q->clg.a1,
1103			.p31 = q->clg.a2,
1104			.p32 = q->clg.a3,
1105			.p14 = q->clg.a4,
1106			.p23 = q->clg.a5,
1107		};
1108
1109		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1110			goto nla_put_failure;
1111		break;
1112	}
1113	case CLG_GILB_ELL: {
1114		struct tc_netem_gemodel ge = {
1115			.p = q->clg.a1,
1116			.r = q->clg.a2,
1117			.h = q->clg.a3,
1118			.k1 = q->clg.a4,
1119		};
1120
1121		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1122			goto nla_put_failure;
1123		break;
1124	}
1125	}
1126
1127	nla_nest_end(skb, nest);
1128	return 0;
1129
1130nla_put_failure:
1131	nla_nest_cancel(skb, nest);
1132	return -1;
1133}
1134
1135static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1136{
1137	const struct netem_sched_data *q = qdisc_priv(sch);
1138	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1139	struct tc_netem_qopt qopt;
1140	struct tc_netem_corr cor;
1141	struct tc_netem_reorder reorder;
1142	struct tc_netem_corrupt corrupt;
1143	struct tc_netem_rate rate;
1144	struct tc_netem_slot slot;
1145
1146	qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1147			     UINT_MAX);
1148	qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1149			    UINT_MAX);
1150	qopt.limit = q->limit;
1151	qopt.loss = q->loss;
1152	qopt.gap = q->gap;
1153	qopt.duplicate = q->duplicate;
1154	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1155		goto nla_put_failure;
1156
1157	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1158		goto nla_put_failure;
1159
1160	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1161		goto nla_put_failure;
1162
1163	cor.delay_corr = q->delay_cor.rho;
1164	cor.loss_corr = q->loss_cor.rho;
1165	cor.dup_corr = q->dup_cor.rho;
1166	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1167		goto nla_put_failure;
1168
1169	reorder.probability = q->reorder;
1170	reorder.correlation = q->reorder_cor.rho;
1171	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1172		goto nla_put_failure;
1173
1174	corrupt.probability = q->corrupt;
1175	corrupt.correlation = q->corrupt_cor.rho;
1176	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1177		goto nla_put_failure;
1178
1179	if (q->rate >= (1ULL << 32)) {
1180		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1181				      TCA_NETEM_PAD))
1182			goto nla_put_failure;
1183		rate.rate = ~0U;
1184	} else {
1185		rate.rate = q->rate;
1186	}
1187	rate.packet_overhead = q->packet_overhead;
1188	rate.cell_size = q->cell_size;
1189	rate.cell_overhead = q->cell_overhead;
1190	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1191		goto nla_put_failure;
1192
1193	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1194		goto nla_put_failure;
1195
1196	if (dump_loss_model(q, skb) != 0)
1197		goto nla_put_failure;
1198
1199	if (q->slot_config.min_delay | q->slot_config.max_delay |
1200	    q->slot_config.dist_jitter) {
1201		slot = q->slot_config;
1202		if (slot.max_packets == INT_MAX)
1203			slot.max_packets = 0;
1204		if (slot.max_bytes == INT_MAX)
1205			slot.max_bytes = 0;
1206		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1207			goto nla_put_failure;
1208	}
1209
1210	return nla_nest_end(skb, nla);
1211
1212nla_put_failure:
1213	nlmsg_trim(skb, nla);
1214	return -1;
1215}
1216
1217static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1218			  struct sk_buff *skb, struct tcmsg *tcm)
1219{
1220	struct netem_sched_data *q = qdisc_priv(sch);
1221
1222	if (cl != 1 || !q->qdisc) 	/* only one class */
1223		return -ENOENT;
1224
1225	tcm->tcm_handle |= TC_H_MIN(1);
1226	tcm->tcm_info = q->qdisc->handle;
1227
1228	return 0;
1229}
1230
1231static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1232		     struct Qdisc **old, struct netlink_ext_ack *extack)
1233{
1234	struct netem_sched_data *q = qdisc_priv(sch);
1235
1236	*old = qdisc_replace(sch, new, &q->qdisc);
1237	return 0;
1238}
1239
1240static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1241{
1242	struct netem_sched_data *q = qdisc_priv(sch);
1243	return q->qdisc;
1244}
1245
1246static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1247{
1248	return 1;
1249}
1250
 
 
 
 
1251static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1252{
1253	if (!walker->stop) {
1254		if (!tc_qdisc_stats_dump(sch, 1, walker))
1255			return;
 
 
 
 
1256	}
1257}
1258
1259static const struct Qdisc_class_ops netem_class_ops = {
1260	.graft		=	netem_graft,
1261	.leaf		=	netem_leaf,
1262	.find		=	netem_find,
 
1263	.walk		=	netem_walk,
1264	.dump		=	netem_dump_class,
1265};
1266
1267static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1268	.id		=	"netem",
1269	.cl_ops		=	&netem_class_ops,
1270	.priv_size	=	sizeof(struct netem_sched_data),
1271	.enqueue	=	netem_enqueue,
1272	.dequeue	=	netem_dequeue,
1273	.peek		=	qdisc_peek_dequeued,
 
1274	.init		=	netem_init,
1275	.reset		=	netem_reset,
1276	.destroy	=	netem_destroy,
1277	.change		=	netem_change,
1278	.dump		=	netem_dump,
1279	.owner		=	THIS_MODULE,
1280};
1281
1282
1283static int __init netem_module_init(void)
1284{
1285	pr_info("netem: version " VERSION "\n");
1286	return register_qdisc(&netem_qdisc_ops);
1287}
1288static void __exit netem_module_exit(void)
1289{
1290	unregister_qdisc(&netem_qdisc_ops);
1291}
1292module_init(netem_module_init)
1293module_exit(netem_module_exit)
1294MODULE_LICENSE("GPL");
v4.6
 
   1/*
   2 * net/sched/sch_netem.c	Network emulator
   3 *
   4 * 		This program is free software; you can redistribute it and/or
   5 * 		modify it under the terms of the GNU General Public License
   6 * 		as published by the Free Software Foundation; either version
   7 * 		2 of the License.
   8 *
   9 *  		Many of the algorithms and ideas for this came from
  10 *		NIST Net which is not copyrighted.
  11 *
  12 * Authors:	Stephen Hemminger <shemminger@osdl.org>
  13 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/rtnetlink.h>
  25#include <linux/reciprocal_div.h>
  26#include <linux/rbtree.h>
  27
  28#include <net/netlink.h>
  29#include <net/pkt_sched.h>
  30#include <net/inet_ecn.h>
  31
  32#define VERSION "1.3"
  33
  34/*	Network Emulation Queuing algorithm.
  35	====================================
  36
  37	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  38		 Network Emulation Tool
  39		 [2] Luigi Rizzo, DummyNet for FreeBSD
  40
  41	 ----------------------------------------------------------------
  42
  43	 This started out as a simple way to delay outgoing packets to
  44	 test TCP but has grown to include most of the functionality
  45	 of a full blown network emulator like NISTnet. It can delay
  46	 packets and add random jitter (and correlation). The random
  47	 distribution can be loaded from a table as well to provide
  48	 normal, Pareto, or experimental curves. Packet loss,
  49	 duplication, and reordering can also be emulated.
  50
  51	 This qdisc does not do classification that can be handled in
  52	 layering other disciplines.  It does not need to do bandwidth
  53	 control either since that can be handled by using token
  54	 bucket or other rate control.
  55
  56     Correlated Loss Generator models
  57
  58	Added generation of correlated loss according to the
  59	"Gilbert-Elliot" model, a 4-state markov model.
  60
  61	References:
  62	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  63	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  64	and intuitive loss model for packet networks and its implementation
  65	in the Netem module in the Linux kernel", available in [1]
  66
  67	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  68		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  69*/
  70
 
 
 
 
 
  71struct netem_sched_data {
  72	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
  73	struct rb_root t_root;
  74
 
 
 
 
  75	/* optional qdisc for classful handling (NULL at netem init) */
  76	struct Qdisc	*qdisc;
  77
  78	struct qdisc_watchdog watchdog;
  79
  80	psched_tdiff_t latency;
  81	psched_tdiff_t jitter;
  82
  83	u32 loss;
  84	u32 ecn;
  85	u32 limit;
  86	u32 counter;
  87	u32 gap;
  88	u32 duplicate;
  89	u32 reorder;
  90	u32 corrupt;
  91	u64 rate;
  92	s32 packet_overhead;
  93	u32 cell_size;
  94	struct reciprocal_value cell_size_reciprocal;
  95	s32 cell_overhead;
  96
  97	struct crndstate {
  98		u32 last;
  99		u32 rho;
 100	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 101
 102	struct disttable {
 103		u32  size;
 104		s16 table[0];
 105	} *delay_dist;
 106
 107	enum  {
 108		CLG_RANDOM,
 109		CLG_4_STATES,
 110		CLG_GILB_ELL,
 111	} loss_model;
 112
 113	enum {
 114		TX_IN_GAP_PERIOD = 1,
 115		TX_IN_BURST_PERIOD,
 116		LOST_IN_GAP_PERIOD,
 117		LOST_IN_BURST_PERIOD,
 118	} _4_state_model;
 119
 120	enum {
 121		GOOD_STATE = 1,
 122		BAD_STATE,
 123	} GE_state_model;
 124
 125	/* Correlated Loss Generation models */
 126	struct clgstate {
 127		/* state of the Markov chain */
 128		u8 state;
 129
 130		/* 4-states and Gilbert-Elliot models */
 131		u32 a1;	/* p13 for 4-states or p for GE */
 132		u32 a2;	/* p31 for 4-states or r for GE */
 133		u32 a3;	/* p32 for 4-states or h for GE */
 134		u32 a4;	/* p14 for 4-states or 1-k for GE */
 135		u32 a5; /* p23 used only in 4-states */
 136	} clg;
 137
 
 
 
 
 
 
 
 
 138};
 139
 140/* Time stamp put into socket buffer control block
 141 * Only valid when skbs are in our internal t(ime)fifo queue.
 142 *
 143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
 144 * and skb->next & skb->prev are scratch space for a qdisc,
 145 * we save skb->tstamp value in skb->cb[] before destroying it.
 146 */
 147struct netem_skb_cb {
 148	psched_time_t	time_to_send;
 149	ktime_t		tstamp_save;
 150};
 151
 152
 153static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
 154{
 155	return container_of(rb, struct sk_buff, rbnode);
 156}
 157
 158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 159{
 160	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
 161	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 162	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 163}
 164
 165/* init_crandom - initialize correlated random number generator
 166 * Use entropy source for initial seed.
 167 */
 168static void init_crandom(struct crndstate *state, unsigned long rho)
 169{
 170	state->rho = rho;
 171	state->last = prandom_u32();
 172}
 173
 174/* get_crandom - correlated random number generator
 175 * Next number depends on last value.
 176 * rho is scaled to avoid floating point.
 177 */
 178static u32 get_crandom(struct crndstate *state)
 179{
 180	u64 value, rho;
 181	unsigned long answer;
 182
 183	if (state->rho == 0)	/* no correlation */
 184		return prandom_u32();
 185
 186	value = prandom_u32();
 187	rho = (u64)state->rho + 1;
 188	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 189	state->last = answer;
 190	return answer;
 191}
 192
 193/* loss_4state - 4-state model loss generator
 194 * Generates losses according to the 4-state Markov chain adopted in
 195 * the GI (General and Intuitive) loss model.
 196 */
 197static bool loss_4state(struct netem_sched_data *q)
 198{
 199	struct clgstate *clg = &q->clg;
 200	u32 rnd = prandom_u32();
 201
 202	/*
 203	 * Makes a comparison between rnd and the transition
 204	 * probabilities outgoing from the current state, then decides the
 205	 * next state and if the next packet has to be transmitted or lost.
 206	 * The four states correspond to:
 207	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
 208	 *   LOST_IN_BURST_PERIOD => isolated losses within a gap period
 209	 *   LOST_IN_GAP_PERIOD => lost packets within a burst period
 210	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
 211	 */
 212	switch (clg->state) {
 213	case TX_IN_GAP_PERIOD:
 214		if (rnd < clg->a4) {
 215			clg->state = LOST_IN_BURST_PERIOD;
 216			return true;
 217		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
 218			clg->state = LOST_IN_GAP_PERIOD;
 219			return true;
 220		} else if (clg->a1 + clg->a4 < rnd) {
 221			clg->state = TX_IN_GAP_PERIOD;
 222		}
 223
 224		break;
 225	case TX_IN_BURST_PERIOD:
 226		if (rnd < clg->a5) {
 227			clg->state = LOST_IN_GAP_PERIOD;
 228			return true;
 229		} else {
 230			clg->state = TX_IN_BURST_PERIOD;
 231		}
 232
 233		break;
 234	case LOST_IN_GAP_PERIOD:
 235		if (rnd < clg->a3)
 236			clg->state = TX_IN_BURST_PERIOD;
 237		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 238			clg->state = TX_IN_GAP_PERIOD;
 239		} else if (clg->a2 + clg->a3 < rnd) {
 240			clg->state = LOST_IN_GAP_PERIOD;
 241			return true;
 242		}
 243		break;
 244	case LOST_IN_BURST_PERIOD:
 245		clg->state = TX_IN_GAP_PERIOD;
 246		break;
 247	}
 248
 249	return false;
 250}
 251
 252/* loss_gilb_ell - Gilbert-Elliot model loss generator
 253 * Generates losses according to the Gilbert-Elliot loss model or
 254 * its special cases  (Gilbert or Simple Gilbert)
 255 *
 256 * Makes a comparison between random number and the transition
 257 * probabilities outgoing from the current state, then decides the
 258 * next state. A second random number is extracted and the comparison
 259 * with the loss probability of the current state decides if the next
 260 * packet will be transmitted or lost.
 261 */
 262static bool loss_gilb_ell(struct netem_sched_data *q)
 263{
 264	struct clgstate *clg = &q->clg;
 265
 266	switch (clg->state) {
 267	case GOOD_STATE:
 268		if (prandom_u32() < clg->a1)
 269			clg->state = BAD_STATE;
 270		if (prandom_u32() < clg->a4)
 271			return true;
 272		break;
 273	case BAD_STATE:
 274		if (prandom_u32() < clg->a2)
 275			clg->state = GOOD_STATE;
 276		if (prandom_u32() > clg->a3)
 277			return true;
 278	}
 279
 280	return false;
 281}
 282
 283static bool loss_event(struct netem_sched_data *q)
 284{
 285	switch (q->loss_model) {
 286	case CLG_RANDOM:
 287		/* Random packet drop 0 => none, ~0 => all */
 288		return q->loss && q->loss >= get_crandom(&q->loss_cor);
 289
 290	case CLG_4_STATES:
 291		/* 4state loss model algorithm (used also for GI model)
 292		* Extracts a value from the markov 4 state loss generator,
 293		* if it is 1 drops a packet and if needed writes the event in
 294		* the kernel logs
 295		*/
 296		return loss_4state(q);
 297
 298	case CLG_GILB_ELL:
 299		/* Gilbert-Elliot loss model algorithm
 300		* Extracts a value from the Gilbert-Elliot loss generator,
 301		* if it is 1 drops a packet and if needed writes the event in
 302		* the kernel logs
 303		*/
 304		return loss_gilb_ell(q);
 305	}
 306
 307	return false;	/* not reached */
 308}
 309
 310
 311/* tabledist - return a pseudo-randomly distributed value with mean mu and
 312 * std deviation sigma.  Uses table lookup to approximate the desired
 313 * distribution, and a uniformly-distributed pseudo-random source.
 314 */
 315static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
 316				struct crndstate *state,
 317				const struct disttable *dist)
 318{
 319	psched_tdiff_t x;
 320	long t;
 321	u32 rnd;
 322
 323	if (sigma == 0)
 324		return mu;
 325
 326	rnd = get_crandom(state);
 327
 328	/* default uniform distribution */
 329	if (dist == NULL)
 330		return (rnd % (2*sigma)) - sigma + mu;
 331
 332	t = dist->table[rnd % dist->size];
 333	x = (sigma % NETEM_DIST_SCALE) * t;
 334	if (x >= 0)
 335		x += NETEM_DIST_SCALE/2;
 336	else
 337		x -= NETEM_DIST_SCALE/2;
 338
 339	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 340}
 341
 342static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
 343{
 344	u64 ticks;
 345
 346	len += q->packet_overhead;
 347
 348	if (q->cell_size) {
 349		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 350
 351		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 352			cells++;
 353		len = cells * (q->cell_size + q->cell_overhead);
 354	}
 355
 356	ticks = (u64)len * NSEC_PER_SEC;
 357
 358	do_div(ticks, q->rate);
 359	return PSCHED_NS2TICKS(ticks);
 360}
 361
 362static void tfifo_reset(struct Qdisc *sch)
 363{
 364	struct netem_sched_data *q = qdisc_priv(sch);
 365	struct rb_node *p;
 366
 367	while ((p = rb_first(&q->t_root))) {
 368		struct sk_buff *skb = netem_rb_to_skb(p);
 369
 370		rb_erase(p, &q->t_root);
 371		skb->next = NULL;
 372		skb->prev = NULL;
 373		kfree_skb(skb);
 374	}
 
 
 
 
 375}
 376
 377static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 378{
 379	struct netem_sched_data *q = qdisc_priv(sch);
 380	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
 381	struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
 382
 383	while (*p) {
 384		struct sk_buff *skb;
 
 
 
 
 
 
 
 
 
 385
 386		parent = *p;
 387		skb = netem_rb_to_skb(parent);
 388		if (tnext >= netem_skb_cb(skb)->time_to_send)
 389			p = &parent->rb_right;
 390		else
 391			p = &parent->rb_left;
 
 
 
 392	}
 393	rb_link_node(&nskb->rbnode, parent, p);
 394	rb_insert_color(&nskb->rbnode, &q->t_root);
 395	sch->q.qlen++;
 396}
 397
 398/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
 399 * when we statistically choose to corrupt one, we instead segment it, returning
 400 * the first packet to be corrupted, and re-enqueue the remaining frames
 401 */
 402static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
 
 403{
 404	struct sk_buff *segs;
 405	netdev_features_t features = netif_skb_features(skb);
 406
 407	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 408
 409	if (IS_ERR_OR_NULL(segs)) {
 410		qdisc_reshape_fail(skb, sch);
 411		return NULL;
 412	}
 413	consume_skb(skb);
 414	return segs;
 415}
 416
 417/*
 418 * Insert one skb into qdisc.
 419 * Note: parent depends on return value to account for queue length.
 420 * 	NET_XMIT_DROP: queue length didn't change.
 421 *      NET_XMIT_SUCCESS: one skb was queued.
 422 */
 423static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 424{
 425	struct netem_sched_data *q = qdisc_priv(sch);
 426	/* We don't fill cb now as skb_unshare() may invalidate it */
 427	struct netem_skb_cb *cb;
 428	struct sk_buff *skb2;
 429	struct sk_buff *segs = NULL;
 430	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
 431	int nb = 0;
 432	int count = 1;
 433	int rc = NET_XMIT_SUCCESS;
 
 
 
 
 434
 435	/* Random duplication */
 436	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
 437		++count;
 438
 439	/* Drop packet? */
 440	if (loss_event(q)) {
 441		if (q->ecn && INET_ECN_set_ce(skb))
 442			qdisc_qstats_drop(sch); /* mark packet */
 443		else
 444			--count;
 445	}
 446	if (count == 0) {
 447		qdisc_qstats_drop(sch);
 448		kfree_skb(skb);
 449		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 450	}
 451
 452	/* If a delay is expected, orphan the skb. (orphaning usually takes
 453	 * place at TX completion time, so _before_ the link transit delay)
 454	 */
 455	if (q->latency || q->jitter)
 456		skb_orphan_partial(skb);
 457
 458	/*
 459	 * If we need to duplicate packet, then re-insert at top of the
 460	 * qdisc tree, since parent queuer expects that only one
 461	 * skb will be queued.
 462	 */
 463	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 464		struct Qdisc *rootq = qdisc_root(sch);
 465		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 466
 467		q->duplicate = 0;
 468		rootq->enqueue(skb2, rootq);
 469		q->duplicate = dupsave;
 
 470	}
 471
 472	/*
 473	 * Randomized packet corruption.
 474	 * Make copy if needed since we are modifying
 475	 * If packet is going to be hardware checksummed, then
 476	 * do it now in software before we mangle it.
 477	 */
 478	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
 479		if (skb_is_gso(skb)) {
 480			segs = netem_segment(skb, sch);
 481			if (!segs)
 482				return NET_XMIT_DROP;
 483		} else {
 484			segs = skb;
 
 485		}
 486
 487		skb = segs;
 488		segs = segs->next;
 489
 490		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
 491		    (skb->ip_summed == CHECKSUM_PARTIAL &&
 492		     skb_checksum_help(skb))) {
 493			rc = qdisc_drop(skb, sch);
 
 
 494			goto finish_segs;
 495		}
 496
 497		skb->data[prandom_u32() % skb_headlen(skb)] ^=
 498			1<<(prandom_u32() % 8);
 499	}
 500
 501	if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
 502		return qdisc_reshape_fail(skb, sch);
 
 
 
 
 503
 504	qdisc_qstats_backlog_inc(sch, skb);
 505
 506	cb = netem_skb_cb(skb);
 507	if (q->gap == 0 ||		/* not doing reordering */
 508	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 509	    q->reorder < get_crandom(&q->reorder_cor)) {
 510		psched_time_t now;
 511		psched_tdiff_t delay;
 512
 513		delay = tabledist(q->latency, q->jitter,
 514				  &q->delay_cor, q->delay_dist);
 515
 516		now = psched_get_time();
 517
 518		if (q->rate) {
 519			struct sk_buff *last;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520
 521			if (!skb_queue_empty(&sch->q))
 522				last = skb_peek_tail(&sch->q);
 523			else
 524				last = netem_rb_to_skb(rb_last(&q->t_root));
 525			if (last) {
 526				/*
 527				 * Last packet in queue is reference point (now),
 528				 * calculate this time bonus and subtract
 529				 * from delay.
 530				 */
 531				delay -= netem_skb_cb(last)->time_to_send - now;
 532				delay = max_t(psched_tdiff_t, 0, delay);
 533				now = netem_skb_cb(last)->time_to_send;
 534			}
 535
 536			delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
 537		}
 538
 539		cb->time_to_send = now + delay;
 540		cb->tstamp_save = skb->tstamp;
 541		++q->counter;
 542		tfifo_enqueue(skb, sch);
 543	} else {
 544		/*
 545		 * Do re-ordering by putting one out of N packets at the front
 546		 * of the queue.
 547		 */
 548		cb->time_to_send = psched_get_time();
 549		q->counter = 0;
 550
 551		__skb_queue_head(&sch->q, skb);
 552		sch->qstats.requeues++;
 553	}
 554
 555finish_segs:
 556	if (segs) {
 
 
 
 
 
 
 557		while (segs) {
 558			skb2 = segs->next;
 559			segs->next = NULL;
 560			qdisc_skb_cb(segs)->pkt_len = segs->len;
 561			last_len = segs->len;
 562			rc = qdisc_enqueue(segs, sch);
 563			if (rc != NET_XMIT_SUCCESS) {
 564				if (net_xmit_drop_count(rc))
 565					qdisc_qstats_drop(sch);
 566			} else {
 567				nb++;
 568				len += last_len;
 569			}
 570			segs = skb2;
 571		}
 572		sch->q.qlen += nb;
 573		if (nb > 1)
 574			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
 
 575	}
 576	return NET_XMIT_SUCCESS;
 577}
 578
 579static unsigned int netem_drop(struct Qdisc *sch)
 
 
 
 
 580{
 581	struct netem_sched_data *q = qdisc_priv(sch);
 582	unsigned int len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 583
 584	len = qdisc_queue_drop(sch);
 
 
 
 585
 586	if (!len) {
 587		struct rb_node *p = rb_first(&q->t_root);
 
 
 588
 589		if (p) {
 590			struct sk_buff *skb = netem_rb_to_skb(p);
 
 
 
 
 591
 592			rb_erase(p, &q->t_root);
 593			sch->q.qlen--;
 594			skb->next = NULL;
 595			skb->prev = NULL;
 596			qdisc_qstats_backlog_dec(sch, skb);
 597			kfree_skb(skb);
 598		}
 
 599	}
 600	if (!len && q->qdisc && q->qdisc->ops->drop)
 601	    len = q->qdisc->ops->drop(q->qdisc);
 602	if (len)
 603		qdisc_qstats_drop(sch);
 604
 605	return len;
 606}
 607
 608static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 609{
 610	struct netem_sched_data *q = qdisc_priv(sch);
 611	struct sk_buff *skb;
 612	struct rb_node *p;
 613
 614	if (qdisc_is_throttled(sch))
 615		return NULL;
 616
 617tfifo_dequeue:
 618	skb = __skb_dequeue(&sch->q);
 619	if (skb) {
 620		qdisc_qstats_backlog_dec(sch, skb);
 621deliver:
 622		qdisc_unthrottled(sch);
 623		qdisc_bstats_update(sch, skb);
 624		return skb;
 625	}
 626	p = rb_first(&q->t_root);
 627	if (p) {
 628		psched_time_t time_to_send;
 629
 630		skb = netem_rb_to_skb(p);
 631
 632		/* if more time remaining? */
 633		time_to_send = netem_skb_cb(skb)->time_to_send;
 634		if (time_to_send <= psched_get_time()) {
 635			rb_erase(p, &q->t_root);
 636
 
 
 637			sch->q.qlen--;
 638			qdisc_qstats_backlog_dec(sch, skb);
 639			skb->next = NULL;
 640			skb->prev = NULL;
 641			skb->tstamp = netem_skb_cb(skb)->tstamp_save;
 
 
 
 642
 643#ifdef CONFIG_NET_CLS_ACT
 644			/*
 645			 * If it's at ingress let's pretend the delay is
 646			 * from the network (tstamp will be updated).
 647			 */
 648			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
 649				skb->tstamp.tv64 = 0;
 650#endif
 651
 652			if (q->qdisc) {
 653				int err = qdisc_enqueue(skb, q->qdisc);
 654
 655				if (unlikely(err != NET_XMIT_SUCCESS)) {
 656					if (net_xmit_drop_count(err)) {
 657						qdisc_qstats_drop(sch);
 658						qdisc_tree_reduce_backlog(sch, 1,
 659									  qdisc_pkt_len(skb));
 660					}
 
 
 
 661				}
 662				goto tfifo_dequeue;
 663			}
 664			goto deliver;
 665		}
 666
 667		if (q->qdisc) {
 668			skb = q->qdisc->ops->dequeue(q->qdisc);
 669			if (skb)
 670				goto deliver;
 671		}
 672		qdisc_watchdog_schedule(&q->watchdog, time_to_send);
 
 
 
 673	}
 674
 675	if (q->qdisc) {
 676		skb = q->qdisc->ops->dequeue(q->qdisc);
 677		if (skb)
 678			goto deliver;
 679	}
 680	return NULL;
 681}
 682
 683static void netem_reset(struct Qdisc *sch)
 684{
 685	struct netem_sched_data *q = qdisc_priv(sch);
 686
 687	qdisc_reset_queue(sch);
 688	tfifo_reset(sch);
 689	if (q->qdisc)
 690		qdisc_reset(q->qdisc);
 691	qdisc_watchdog_cancel(&q->watchdog);
 692}
 693
 694static void dist_free(struct disttable *d)
 695{
 696	kvfree(d);
 697}
 698
 699/*
 700 * Distribution data is a variable size payload containing
 701 * signed 16 bit values.
 702 */
 703static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 
 
 704{
 705	struct netem_sched_data *q = qdisc_priv(sch);
 706	size_t n = nla_len(attr)/sizeof(__s16);
 707	const __s16 *data = nla_data(attr);
 708	spinlock_t *root_lock;
 709	struct disttable *d;
 710	int i;
 711	size_t s;
 712
 713	if (n > NETEM_DIST_MAX)
 714		return -EINVAL;
 715
 716	s = sizeof(struct disttable) + n * sizeof(s16);
 717	d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
 718	if (!d)
 719		d = vmalloc(s);
 720	if (!d)
 721		return -ENOMEM;
 722
 723	d->size = n;
 724	for (i = 0; i < n; i++)
 725		d->table[i] = data[i];
 726
 727	root_lock = qdisc_root_sleeping_lock(sch);
 728
 729	spin_lock_bh(root_lock);
 730	swap(q->delay_dist, d);
 731	spin_unlock_bh(root_lock);
 732
 733	dist_free(d);
 734	return 0;
 735}
 736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 738{
 739	const struct tc_netem_corr *c = nla_data(attr);
 740
 741	init_crandom(&q->delay_cor, c->delay_corr);
 742	init_crandom(&q->loss_cor, c->loss_corr);
 743	init_crandom(&q->dup_cor, c->dup_corr);
 744}
 745
 746static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
 747{
 748	const struct tc_netem_reorder *r = nla_data(attr);
 749
 750	q->reorder = r->probability;
 751	init_crandom(&q->reorder_cor, r->correlation);
 752}
 753
 754static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
 755{
 756	const struct tc_netem_corrupt *r = nla_data(attr);
 757
 758	q->corrupt = r->probability;
 759	init_crandom(&q->corrupt_cor, r->correlation);
 760}
 761
 762static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
 763{
 764	const struct tc_netem_rate *r = nla_data(attr);
 765
 766	q->rate = r->rate;
 767	q->packet_overhead = r->packet_overhead;
 768	q->cell_size = r->cell_size;
 769	q->cell_overhead = r->cell_overhead;
 770	if (q->cell_size)
 771		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 772	else
 773		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
 774}
 775
 776static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
 777{
 778	const struct nlattr *la;
 779	int rem;
 780
 781	nla_for_each_nested(la, attr, rem) {
 782		u16 type = nla_type(la);
 783
 784		switch (type) {
 785		case NETEM_LOSS_GI: {
 786			const struct tc_netem_gimodel *gi = nla_data(la);
 787
 788			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 789				pr_info("netem: incorrect gi model size\n");
 790				return -EINVAL;
 791			}
 792
 793			q->loss_model = CLG_4_STATES;
 794
 795			q->clg.state = TX_IN_GAP_PERIOD;
 796			q->clg.a1 = gi->p13;
 797			q->clg.a2 = gi->p31;
 798			q->clg.a3 = gi->p32;
 799			q->clg.a4 = gi->p14;
 800			q->clg.a5 = gi->p23;
 801			break;
 802		}
 803
 804		case NETEM_LOSS_GE: {
 805			const struct tc_netem_gemodel *ge = nla_data(la);
 806
 807			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 808				pr_info("netem: incorrect ge model size\n");
 809				return -EINVAL;
 810			}
 811
 812			q->loss_model = CLG_GILB_ELL;
 813			q->clg.state = GOOD_STATE;
 814			q->clg.a1 = ge->p;
 815			q->clg.a2 = ge->r;
 816			q->clg.a3 = ge->h;
 817			q->clg.a4 = ge->k1;
 818			break;
 819		}
 820
 821		default:
 822			pr_info("netem: unknown loss type %u\n", type);
 823			return -EINVAL;
 824		}
 825	}
 826
 827	return 0;
 828}
 829
 830static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 831	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 832	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 833	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 834	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 835	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 836	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 837	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 
 
 
 838};
 839
 840static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 841		      const struct nla_policy *policy, int len)
 842{
 843	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 844
 845	if (nested_len < 0) {
 846		pr_info("netem: invalid attributes len %d\n", nested_len);
 847		return -EINVAL;
 848	}
 849
 850	if (nested_len >= nla_attr_size(0))
 851		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
 852				 nested_len, policy);
 
 853
 854	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 855	return 0;
 856}
 857
 858/* Parse netlink message to set options */
 859static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 
 860{
 861	struct netem_sched_data *q = qdisc_priv(sch);
 862	struct nlattr *tb[TCA_NETEM_MAX + 1];
 863	struct tc_netem_qopt *qopt;
 864	struct clgstate old_clg;
 865	int old_loss_model = CLG_RANDOM;
 866	int ret;
 867
 868	if (opt == NULL)
 869		return -EINVAL;
 870
 871	qopt = nla_data(opt);
 872	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 873	if (ret < 0)
 874		return ret;
 875
 876	/* backup q->clg and q->loss_model */
 877	old_clg = q->clg;
 878	old_loss_model = q->loss_model;
 879
 880	if (tb[TCA_NETEM_LOSS]) {
 881		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
 882		if (ret) {
 883			q->loss_model = old_loss_model;
 884			return ret;
 885		}
 886	} else {
 887		q->loss_model = CLG_RANDOM;
 888	}
 889
 890	if (tb[TCA_NETEM_DELAY_DIST]) {
 891		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
 892		if (ret) {
 893			/* recover clg and loss_model, in case of
 894			 * q->clg and q->loss_model were modified
 895			 * in get_loss_clg()
 896			 */
 897			q->clg = old_clg;
 898			q->loss_model = old_loss_model;
 899			return ret;
 900		}
 
 901	}
 902
 903	sch->limit = qopt->limit;
 904
 905	q->latency = qopt->latency;
 906	q->jitter = qopt->jitter;
 907	q->limit = qopt->limit;
 908	q->gap = qopt->gap;
 909	q->counter = 0;
 910	q->loss = qopt->loss;
 911	q->duplicate = qopt->duplicate;
 912
 913	/* for compatibility with earlier versions.
 914	 * if gap is set, need to assume 100% probability
 915	 */
 916	if (q->gap)
 917		q->reorder = ~0;
 918
 919	if (tb[TCA_NETEM_CORR])
 920		get_correlation(q, tb[TCA_NETEM_CORR]);
 921
 922	if (tb[TCA_NETEM_REORDER])
 923		get_reorder(q, tb[TCA_NETEM_REORDER]);
 924
 925	if (tb[TCA_NETEM_CORRUPT])
 926		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
 927
 928	if (tb[TCA_NETEM_RATE])
 929		get_rate(q, tb[TCA_NETEM_RATE]);
 930
 931	if (tb[TCA_NETEM_RATE64])
 932		q->rate = max_t(u64, q->rate,
 933				nla_get_u64(tb[TCA_NETEM_RATE64]));
 934
 
 
 
 
 
 
 935	if (tb[TCA_NETEM_ECN])
 936		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 937
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 938	return ret;
 939}
 940
 941static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 
 942{
 943	struct netem_sched_data *q = qdisc_priv(sch);
 944	int ret;
 945
 
 
 946	if (!opt)
 947		return -EINVAL;
 948
 949	qdisc_watchdog_init(&q->watchdog, sch);
 950
 951	q->loss_model = CLG_RANDOM;
 952	ret = netem_change(sch, opt);
 953	if (ret)
 954		pr_info("netem: change failed\n");
 955	return ret;
 956}
 957
 958static void netem_destroy(struct Qdisc *sch)
 959{
 960	struct netem_sched_data *q = qdisc_priv(sch);
 961
 962	qdisc_watchdog_cancel(&q->watchdog);
 963	if (q->qdisc)
 964		qdisc_destroy(q->qdisc);
 965	dist_free(q->delay_dist);
 
 966}
 967
 968static int dump_loss_model(const struct netem_sched_data *q,
 969			   struct sk_buff *skb)
 970{
 971	struct nlattr *nest;
 972
 973	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
 974	if (nest == NULL)
 975		goto nla_put_failure;
 976
 977	switch (q->loss_model) {
 978	case CLG_RANDOM:
 979		/* legacy loss model */
 980		nla_nest_cancel(skb, nest);
 981		return 0;	/* no data */
 982
 983	case CLG_4_STATES: {
 984		struct tc_netem_gimodel gi = {
 985			.p13 = q->clg.a1,
 986			.p31 = q->clg.a2,
 987			.p32 = q->clg.a3,
 988			.p14 = q->clg.a4,
 989			.p23 = q->clg.a5,
 990		};
 991
 992		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
 993			goto nla_put_failure;
 994		break;
 995	}
 996	case CLG_GILB_ELL: {
 997		struct tc_netem_gemodel ge = {
 998			.p = q->clg.a1,
 999			.r = q->clg.a2,
1000			.h = q->clg.a3,
1001			.k1 = q->clg.a4,
1002		};
1003
1004		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1005			goto nla_put_failure;
1006		break;
1007	}
1008	}
1009
1010	nla_nest_end(skb, nest);
1011	return 0;
1012
1013nla_put_failure:
1014	nla_nest_cancel(skb, nest);
1015	return -1;
1016}
1017
1018static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1019{
1020	const struct netem_sched_data *q = qdisc_priv(sch);
1021	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1022	struct tc_netem_qopt qopt;
1023	struct tc_netem_corr cor;
1024	struct tc_netem_reorder reorder;
1025	struct tc_netem_corrupt corrupt;
1026	struct tc_netem_rate rate;
 
1027
1028	qopt.latency = q->latency;
1029	qopt.jitter = q->jitter;
 
 
1030	qopt.limit = q->limit;
1031	qopt.loss = q->loss;
1032	qopt.gap = q->gap;
1033	qopt.duplicate = q->duplicate;
1034	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1035		goto nla_put_failure;
1036
 
 
 
 
 
 
1037	cor.delay_corr = q->delay_cor.rho;
1038	cor.loss_corr = q->loss_cor.rho;
1039	cor.dup_corr = q->dup_cor.rho;
1040	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1041		goto nla_put_failure;
1042
1043	reorder.probability = q->reorder;
1044	reorder.correlation = q->reorder_cor.rho;
1045	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1046		goto nla_put_failure;
1047
1048	corrupt.probability = q->corrupt;
1049	corrupt.correlation = q->corrupt_cor.rho;
1050	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1051		goto nla_put_failure;
1052
1053	if (q->rate >= (1ULL << 32)) {
1054		if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
 
1055			goto nla_put_failure;
1056		rate.rate = ~0U;
1057	} else {
1058		rate.rate = q->rate;
1059	}
1060	rate.packet_overhead = q->packet_overhead;
1061	rate.cell_size = q->cell_size;
1062	rate.cell_overhead = q->cell_overhead;
1063	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1064		goto nla_put_failure;
1065
1066	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1067		goto nla_put_failure;
1068
1069	if (dump_loss_model(q, skb) != 0)
1070		goto nla_put_failure;
1071
 
 
 
 
 
 
 
 
 
 
 
1072	return nla_nest_end(skb, nla);
1073
1074nla_put_failure:
1075	nlmsg_trim(skb, nla);
1076	return -1;
1077}
1078
1079static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1080			  struct sk_buff *skb, struct tcmsg *tcm)
1081{
1082	struct netem_sched_data *q = qdisc_priv(sch);
1083
1084	if (cl != 1 || !q->qdisc) 	/* only one class */
1085		return -ENOENT;
1086
1087	tcm->tcm_handle |= TC_H_MIN(1);
1088	tcm->tcm_info = q->qdisc->handle;
1089
1090	return 0;
1091}
1092
1093static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1094		     struct Qdisc **old)
1095{
1096	struct netem_sched_data *q = qdisc_priv(sch);
1097
1098	*old = qdisc_replace(sch, new, &q->qdisc);
1099	return 0;
1100}
1101
1102static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1103{
1104	struct netem_sched_data *q = qdisc_priv(sch);
1105	return q->qdisc;
1106}
1107
1108static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1109{
1110	return 1;
1111}
1112
1113static void netem_put(struct Qdisc *sch, unsigned long arg)
1114{
1115}
1116
1117static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1118{
1119	if (!walker->stop) {
1120		if (walker->count >= walker->skip)
1121			if (walker->fn(sch, 1, walker) < 0) {
1122				walker->stop = 1;
1123				return;
1124			}
1125		walker->count++;
1126	}
1127}
1128
1129static const struct Qdisc_class_ops netem_class_ops = {
1130	.graft		=	netem_graft,
1131	.leaf		=	netem_leaf,
1132	.get		=	netem_get,
1133	.put		=	netem_put,
1134	.walk		=	netem_walk,
1135	.dump		=	netem_dump_class,
1136};
1137
1138static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1139	.id		=	"netem",
1140	.cl_ops		=	&netem_class_ops,
1141	.priv_size	=	sizeof(struct netem_sched_data),
1142	.enqueue	=	netem_enqueue,
1143	.dequeue	=	netem_dequeue,
1144	.peek		=	qdisc_peek_dequeued,
1145	.drop		=	netem_drop,
1146	.init		=	netem_init,
1147	.reset		=	netem_reset,
1148	.destroy	=	netem_destroy,
1149	.change		=	netem_change,
1150	.dump		=	netem_dump,
1151	.owner		=	THIS_MODULE,
1152};
1153
1154
1155static int __init netem_module_init(void)
1156{
1157	pr_info("netem: version " VERSION "\n");
1158	return register_qdisc(&netem_qdisc_ops);
1159}
1160static void __exit netem_module_exit(void)
1161{
1162	unregister_qdisc(&netem_qdisc_ops);
1163}
1164module_init(netem_module_init)
1165module_exit(netem_module_exit)
1166MODULE_LICENSE("GPL");