Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * net/sched/sch_netem.c	Network emulator
   3 *
   4 * 		This program is free software; you can redistribute it and/or
   5 * 		modify it under the terms of the GNU General Public License
   6 * 		as published by the Free Software Foundation; either version
   7 * 		2 of the License.
   8 *
   9 *  		Many of the algorithms and ideas for this came from
  10 *		NIST Net which is not copyrighted.
  11 *
  12 * Authors:	Stephen Hemminger <shemminger@osdl.org>
  13 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/rtnetlink.h>
  25#include <linux/reciprocal_div.h>
 
  26
  27#include <net/netlink.h>
  28#include <net/pkt_sched.h>
  29#include <net/inet_ecn.h>
  30
  31#define VERSION "1.3"
  32
  33/*	Network Emulation Queuing algorithm.
  34	====================================
  35
  36	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  37		 Network Emulation Tool
  38		 [2] Luigi Rizzo, DummyNet for FreeBSD
  39
  40	 ----------------------------------------------------------------
  41
  42	 This started out as a simple way to delay outgoing packets to
  43	 test TCP but has grown to include most of the functionality
  44	 of a full blown network emulator like NISTnet. It can delay
  45	 packets and add random jitter (and correlation). The random
  46	 distribution can be loaded from a table as well to provide
  47	 normal, Pareto, or experimental curves. Packet loss,
  48	 duplication, and reordering can also be emulated.
  49
  50	 This qdisc does not do classification that can be handled in
  51	 layering other disciplines.  It does not need to do bandwidth
  52	 control either since that can be handled by using token
  53	 bucket or other rate control.
  54
  55     Correlated Loss Generator models
  56
  57	Added generation of correlated loss according to the
  58	"Gilbert-Elliot" model, a 4-state markov model.
  59
  60	References:
  61	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  62	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  63	and intuitive loss model for packet networks and its implementation
  64	in the Netem module in the Linux kernel", available in [1]
  65
  66	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  67		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  68*/
  69
 
 
 
 
 
  70struct netem_sched_data {
  71	/* internal t(ime)fifo qdisc uses sch->q and sch->limit */
 
 
 
 
 
  72
  73	/* optional qdisc for classful handling (NULL at netem init) */
  74	struct Qdisc	*qdisc;
  75
  76	struct qdisc_watchdog watchdog;
  77
  78	psched_tdiff_t latency;
  79	psched_tdiff_t jitter;
  80
  81	u32 loss;
  82	u32 ecn;
  83	u32 limit;
  84	u32 counter;
  85	u32 gap;
  86	u32 duplicate;
  87	u32 reorder;
  88	u32 corrupt;
  89	u32 rate;
  90	s32 packet_overhead;
  91	u32 cell_size;
  92	u32 cell_size_reciprocal;
  93	s32 cell_overhead;
  94
  95	struct crndstate {
  96		u32 last;
  97		u32 rho;
  98	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
  99
 100	struct disttable {
 101		u32  size;
 102		s16 table[0];
 103	} *delay_dist;
 104
 105	enum  {
 106		CLG_RANDOM,
 107		CLG_4_STATES,
 108		CLG_GILB_ELL,
 109	} loss_model;
 110
 
 
 
 
 
 
 
 
 
 
 
 
 111	/* Correlated Loss Generation models */
 112	struct clgstate {
 113		/* state of the Markov chain */
 114		u8 state;
 115
 116		/* 4-states and Gilbert-Elliot models */
 117		u32 a1;	/* p13 for 4-states or p for GE */
 118		u32 a2;	/* p31 for 4-states or r for GE */
 119		u32 a3;	/* p32 for 4-states or h for GE */
 120		u32 a4;	/* p14 for 4-states or 1-k for GE */
 121		u32 a5; /* p23 used only in 4-states */
 122	} clg;
 123
 
 
 
 
 
 
 
 
 124};
 125
 126/* Time stamp put into socket buffer control block
 127 * Only valid when skbs are in our internal t(ime)fifo queue.
 
 
 
 
 128 */
 129struct netem_skb_cb {
 130	psched_time_t	time_to_send;
 131};
 132
 133static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 134{
 
 135	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 136	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 137}
 138
 139/* init_crandom - initialize correlated random number generator
 140 * Use entropy source for initial seed.
 141 */
 142static void init_crandom(struct crndstate *state, unsigned long rho)
 143{
 144	state->rho = rho;
 145	state->last = net_random();
 146}
 147
 148/* get_crandom - correlated random number generator
 149 * Next number depends on last value.
 150 * rho is scaled to avoid floating point.
 151 */
 152static u32 get_crandom(struct crndstate *state)
 153{
 154	u64 value, rho;
 155	unsigned long answer;
 156
 157	if (state->rho == 0)	/* no correlation */
 158		return net_random();
 159
 160	value = net_random();
 161	rho = (u64)state->rho + 1;
 162	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 163	state->last = answer;
 164	return answer;
 165}
 166
 167/* loss_4state - 4-state model loss generator
 168 * Generates losses according to the 4-state Markov chain adopted in
 169 * the GI (General and Intuitive) loss model.
 170 */
 171static bool loss_4state(struct netem_sched_data *q)
 172{
 173	struct clgstate *clg = &q->clg;
 174	u32 rnd = net_random();
 175
 176	/*
 177	 * Makes a comparison between rnd and the transition
 178	 * probabilities outgoing from the current state, then decides the
 179	 * next state and if the next packet has to be transmitted or lost.
 180	 * The four states correspond to:
 181	 *   1 => successfully transmitted packets within a gap period
 182	 *   4 => isolated losses within a gap period
 183	 *   3 => lost packets within a burst period
 184	 *   2 => successfully transmitted packets within a burst period
 185	 */
 186	switch (clg->state) {
 187	case 1:
 188		if (rnd < clg->a4) {
 189			clg->state = 4;
 190			return true;
 191		} else if (clg->a4 < rnd && rnd < clg->a1) {
 192			clg->state = 3;
 193			return true;
 194		} else if (clg->a1 < rnd)
 195			clg->state = 1;
 
 196
 197		break;
 198	case 2:
 199		if (rnd < clg->a5) {
 200			clg->state = 3;
 201			return true;
 202		} else
 203			clg->state = 2;
 
 204
 205		break;
 206	case 3:
 207		if (rnd < clg->a3)
 208			clg->state = 2;
 209		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 210			clg->state = 1;
 211			return true;
 212		} else if (clg->a2 + clg->a3 < rnd) {
 213			clg->state = 3;
 214			return true;
 215		}
 216		break;
 217	case 4:
 218		clg->state = 1;
 219		break;
 220	}
 221
 222	return false;
 223}
 224
 225/* loss_gilb_ell - Gilbert-Elliot model loss generator
 226 * Generates losses according to the Gilbert-Elliot loss model or
 227 * its special cases  (Gilbert or Simple Gilbert)
 228 *
 229 * Makes a comparison between random number and the transition
 230 * probabilities outgoing from the current state, then decides the
 231 * next state. A second random number is extracted and the comparison
 232 * with the loss probability of the current state decides if the next
 233 * packet will be transmitted or lost.
 234 */
 235static bool loss_gilb_ell(struct netem_sched_data *q)
 236{
 237	struct clgstate *clg = &q->clg;
 238
 239	switch (clg->state) {
 240	case 1:
 241		if (net_random() < clg->a1)
 242			clg->state = 2;
 243		if (net_random() < clg->a4)
 244			return true;
 245	case 2:
 246		if (net_random() < clg->a2)
 247			clg->state = 1;
 248		if (clg->a3 > net_random())
 
 249			return true;
 250	}
 251
 252	return false;
 253}
 254
 255static bool loss_event(struct netem_sched_data *q)
 256{
 257	switch (q->loss_model) {
 258	case CLG_RANDOM:
 259		/* Random packet drop 0 => none, ~0 => all */
 260		return q->loss && q->loss >= get_crandom(&q->loss_cor);
 261
 262	case CLG_4_STATES:
 263		/* 4state loss model algorithm (used also for GI model)
 264		* Extracts a value from the markov 4 state loss generator,
 265		* if it is 1 drops a packet and if needed writes the event in
 266		* the kernel logs
 267		*/
 268		return loss_4state(q);
 269
 270	case CLG_GILB_ELL:
 271		/* Gilbert-Elliot loss model algorithm
 272		* Extracts a value from the Gilbert-Elliot loss generator,
 273		* if it is 1 drops a packet and if needed writes the event in
 274		* the kernel logs
 275		*/
 276		return loss_gilb_ell(q);
 277	}
 278
 279	return false;	/* not reached */
 280}
 281
 282
 283/* tabledist - return a pseudo-randomly distributed value with mean mu and
 284 * std deviation sigma.  Uses table lookup to approximate the desired
 285 * distribution, and a uniformly-distributed pseudo-random source.
 286 */
 287static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
 288				struct crndstate *state,
 289				const struct disttable *dist)
 290{
 291	psched_tdiff_t x;
 292	long t;
 293	u32 rnd;
 294
 295	if (sigma == 0)
 296		return mu;
 297
 298	rnd = get_crandom(state);
 299
 300	/* default uniform distribution */
 301	if (dist == NULL)
 302		return (rnd % (2*sigma)) - sigma + mu;
 303
 304	t = dist->table[rnd % dist->size];
 305	x = (sigma % NETEM_DIST_SCALE) * t;
 306	if (x >= 0)
 307		x += NETEM_DIST_SCALE/2;
 308	else
 309		x -= NETEM_DIST_SCALE/2;
 310
 311	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 312}
 313
 314static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
 315{
 316	u64 ticks;
 317
 318	len += q->packet_overhead;
 319
 320	if (q->cell_size) {
 321		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 322
 323		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 324			cells++;
 325		len = cells * (q->cell_size + q->cell_overhead);
 326	}
 327
 328	ticks = (u64)len * NSEC_PER_SEC;
 
 
 
 
 
 
 
 
 
 329
 330	do_div(ticks, q->rate);
 331	return PSCHED_NS2TICKS(ticks);
 
 
 
 
 
 
 332}
 333
 334static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 335{
 336	struct sk_buff_head *list = &sch->q;
 337	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
 338	struct sk_buff *skb = skb_peek_tail(list);
 339
 340	/* Optimize for add at tail */
 341	if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
 342		return __skb_queue_tail(list, nskb);
 343
 344	skb_queue_reverse_walk(list, skb) {
 345		if (tnext >= netem_skb_cb(skb)->time_to_send)
 346			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347	}
 
 
 
 
 
 
 
 
 
 
 
 
 348
 349	__skb_queue_after(list, skb, nskb);
 
 
 
 
 
 
 
 350}
 351
 352/*
 353 * Insert one skb into qdisc.
 354 * Note: parent depends on return value to account for queue length.
 355 * 	NET_XMIT_DROP: queue length didn't change.
 356 *      NET_XMIT_SUCCESS: one skb was queued.
 357 */
 358static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 359{
 360	struct netem_sched_data *q = qdisc_priv(sch);
 361	/* We don't fill cb now as skb_unshare() may invalidate it */
 362	struct netem_skb_cb *cb;
 363	struct sk_buff *skb2;
 
 
 364	int count = 1;
 
 
 
 
 
 365
 366	/* Random duplication */
 367	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
 368		++count;
 369
 370	/* Drop packet? */
 371	if (loss_event(q)) {
 372		if (q->ecn && INET_ECN_set_ce(skb))
 373			sch->qstats.drops++; /* mark packet */
 374		else
 375			--count;
 376	}
 377	if (count == 0) {
 378		sch->qstats.drops++;
 379		kfree_skb(skb);
 380		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 381	}
 382
 383	skb_orphan(skb);
 
 
 
 
 384
 385	/*
 386	 * If we need to duplicate packet, then re-insert at top of the
 387	 * qdisc tree, since parent queuer expects that only one
 388	 * skb will be queued.
 389	 */
 390	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 391		struct Qdisc *rootq = qdisc_root(sch);
 392		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 393		q->duplicate = 0;
 394
 395		qdisc_enqueue_root(skb2, rootq);
 
 396		q->duplicate = dupsave;
 
 397	}
 398
 399	/*
 400	 * Randomized packet corruption.
 401	 * Make copy if needed since we are modifying
 402	 * If packet is going to be hardware checksummed, then
 403	 * do it now in software before we mangle it.
 404	 */
 405	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
 406		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
 407		    (skb->ip_summed == CHECKSUM_PARTIAL &&
 408		     skb_checksum_help(skb)))
 409			return qdisc_drop(skb, sch);
 
 
 
 
 410
 411		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
 
 
 
 
 
 
 
 
 
 
 
 
 
 412	}
 413
 414	if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
 415		return qdisc_reshape_fail(skb, sch);
 
 
 
 
 416
 417	sch->qstats.backlog += qdisc_pkt_len(skb);
 418
 419	cb = netem_skb_cb(skb);
 420	if (q->gap == 0 ||		/* not doing reordering */
 421	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 422	    q->reorder < get_crandom(&q->reorder_cor)) {
 423		psched_time_t now;
 424		psched_tdiff_t delay;
 425
 426		delay = tabledist(q->latency, q->jitter,
 427				  &q->delay_cor, q->delay_dist);
 428
 429		now = psched_get_time();
 430
 431		if (q->rate) {
 432			struct sk_buff_head *list = &sch->q;
 433
 434			delay += packet_len_2_sched_time(skb->len, q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435
 436			if (!skb_queue_empty(list)) {
 437				/*
 438				 * Last packet in queue is reference point (now).
 439				 * First packet in queue is already in flight,
 440				 * calculate this time bonus and substract
 441				 * from delay.
 442				 */
 443				delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
 444				now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
 
 445			}
 
 
 446		}
 447
 448		cb->time_to_send = now + delay;
 449		++q->counter;
 450		tfifo_enqueue(skb, sch);
 451	} else {
 452		/*
 453		 * Do re-ordering by putting one out of N packets at the front
 454		 * of the queue.
 455		 */
 456		cb->time_to_send = psched_get_time();
 457		q->counter = 0;
 458
 459		__skb_queue_head(&sch->q, skb);
 460		sch->qstats.requeues++;
 461	}
 462
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463	return NET_XMIT_SUCCESS;
 464}
 465
 466static unsigned int netem_drop(struct Qdisc *sch)
 
 
 
 
 467{
 468	struct netem_sched_data *q = qdisc_priv(sch);
 469	unsigned int len;
 470
 471	len = qdisc_queue_drop(sch);
 472	if (!len && q->qdisc && q->qdisc->ops->drop)
 473	    len = q->qdisc->ops->drop(q->qdisc);
 474	if (len)
 475		sch->qstats.drops++;
 
 
 
 
 476
 477	return len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478}
 479
 480static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 481{
 482	struct netem_sched_data *q = qdisc_priv(sch);
 483	struct sk_buff *skb;
 484
 485	if (qdisc_is_throttled(sch))
 486		return NULL;
 487
 488tfifo_dequeue:
 489	skb = qdisc_peek_head(sch);
 490	if (skb) {
 491		const struct netem_skb_cb *cb = netem_skb_cb(skb);
 
 
 
 
 
 
 
 
 492
 493		/* if more time remaining? */
 494		if (cb->time_to_send <= psched_get_time()) {
 495			__skb_unlink(skb, &sch->q);
 496			sch->qstats.backlog -= qdisc_pkt_len(skb);
 497
 498#ifdef CONFIG_NET_CLS_ACT
 499			/*
 500			 * If it's at ingress let's pretend the delay is
 501			 * from the network (tstamp will be updated).
 
 
 
 
 502			 */
 503			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
 504				skb->tstamp.tv64 = 0;
 505#endif
 506
 507			if (q->qdisc) {
 508				int err = qdisc_enqueue(skb, q->qdisc);
 
 
 
 
 
 509
 510				if (unlikely(err != NET_XMIT_SUCCESS)) {
 511					if (net_xmit_drop_count(err)) {
 512						sch->qstats.drops++;
 513						qdisc_tree_decrease_qlen(sch, 1);
 514					}
 
 
 
 
 
 
 
 515				}
 516				goto tfifo_dequeue;
 517			}
 518deliver:
 519			qdisc_unthrottled(sch);
 520			qdisc_bstats_update(sch, skb);
 521			return skb;
 522		}
 523
 524		if (q->qdisc) {
 525			skb = q->qdisc->ops->dequeue(q->qdisc);
 526			if (skb)
 527				goto deliver;
 528		}
 529		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
 
 
 
 530	}
 531
 532	if (q->qdisc) {
 533		skb = q->qdisc->ops->dequeue(q->qdisc);
 534		if (skb)
 535			goto deliver;
 536	}
 537	return NULL;
 538}
 539
 540static void netem_reset(struct Qdisc *sch)
 541{
 542	struct netem_sched_data *q = qdisc_priv(sch);
 543
 544	qdisc_reset_queue(sch);
 
 545	if (q->qdisc)
 546		qdisc_reset(q->qdisc);
 547	qdisc_watchdog_cancel(&q->watchdog);
 548}
 549
 550static void dist_free(struct disttable *d)
 551{
 552	if (d) {
 553		if (is_vmalloc_addr(d))
 554			vfree(d);
 555		else
 556			kfree(d);
 557	}
 558}
 559
 560/*
 561 * Distribution data is a variable size payload containing
 562 * signed 16 bit values.
 563 */
 564static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 
 
 565{
 566	struct netem_sched_data *q = qdisc_priv(sch);
 567	size_t n = nla_len(attr)/sizeof(__s16);
 568	const __s16 *data = nla_data(attr);
 569	spinlock_t *root_lock;
 570	struct disttable *d;
 571	int i;
 572	size_t s;
 573
 574	if (n > NETEM_DIST_MAX)
 575		return -EINVAL;
 576
 577	s = sizeof(struct disttable) + n * sizeof(s16);
 578	d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
 579	if (!d)
 580		d = vmalloc(s);
 581	if (!d)
 582		return -ENOMEM;
 583
 584	d->size = n;
 585	for (i = 0; i < n; i++)
 586		d->table[i] = data[i];
 587
 588	root_lock = qdisc_root_sleeping_lock(sch);
 589
 590	spin_lock_bh(root_lock);
 591	swap(q->delay_dist, d);
 592	spin_unlock_bh(root_lock);
 593
 594	dist_free(d);
 595	return 0;
 596}
 597
 598static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 599{
 600	struct netem_sched_data *q = qdisc_priv(sch);
 601	const struct tc_netem_corr *c = nla_data(attr);
 602
 603	init_crandom(&q->delay_cor, c->delay_corr);
 604	init_crandom(&q->loss_cor, c->loss_corr);
 605	init_crandom(&q->dup_cor, c->dup_corr);
 606}
 607
 608static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
 609{
 610	struct netem_sched_data *q = qdisc_priv(sch);
 611	const struct tc_netem_reorder *r = nla_data(attr);
 612
 613	q->reorder = r->probability;
 614	init_crandom(&q->reorder_cor, r->correlation);
 615}
 616
 617static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
 618{
 619	struct netem_sched_data *q = qdisc_priv(sch);
 620	const struct tc_netem_corrupt *r = nla_data(attr);
 621
 622	q->corrupt = r->probability;
 623	init_crandom(&q->corrupt_cor, r->correlation);
 624}
 625
 626static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
 627{
 628	struct netem_sched_data *q = qdisc_priv(sch);
 629	const struct tc_netem_rate *r = nla_data(attr);
 630
 631	q->rate = r->rate;
 632	q->packet_overhead = r->packet_overhead;
 633	q->cell_size = r->cell_size;
 
 634	if (q->cell_size)
 635		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 636	q->cell_overhead = r->cell_overhead;
 
 637}
 638
 639static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
 640{
 641	struct netem_sched_data *q = qdisc_priv(sch);
 642	const struct nlattr *la;
 643	int rem;
 644
 645	nla_for_each_nested(la, attr, rem) {
 646		u16 type = nla_type(la);
 647
 648		switch(type) {
 649		case NETEM_LOSS_GI: {
 650			const struct tc_netem_gimodel *gi = nla_data(la);
 651
 652			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 653				pr_info("netem: incorrect gi model size\n");
 654				return -EINVAL;
 655			}
 656
 657			q->loss_model = CLG_4_STATES;
 658
 659			q->clg.state = 1;
 660			q->clg.a1 = gi->p13;
 661			q->clg.a2 = gi->p31;
 662			q->clg.a3 = gi->p32;
 663			q->clg.a4 = gi->p14;
 664			q->clg.a5 = gi->p23;
 665			break;
 666		}
 667
 668		case NETEM_LOSS_GE: {
 669			const struct tc_netem_gemodel *ge = nla_data(la);
 670
 671			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 672				pr_info("netem: incorrect ge model size\n");
 673				return -EINVAL;
 674			}
 675
 676			q->loss_model = CLG_GILB_ELL;
 677			q->clg.state = 1;
 678			q->clg.a1 = ge->p;
 679			q->clg.a2 = ge->r;
 680			q->clg.a3 = ge->h;
 681			q->clg.a4 = ge->k1;
 682			break;
 683		}
 684
 685		default:
 686			pr_info("netem: unknown loss type %u\n", type);
 687			return -EINVAL;
 688		}
 689	}
 690
 691	return 0;
 692}
 693
 694static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 695	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 696	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 697	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 698	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 699	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 700	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 
 
 
 
 701};
 702
 703static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 704		      const struct nla_policy *policy, int len)
 705{
 706	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 707
 708	if (nested_len < 0) {
 709		pr_info("netem: invalid attributes len %d\n", nested_len);
 710		return -EINVAL;
 711	}
 712
 713	if (nested_len >= nla_attr_size(0))
 714		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
 715				 nested_len, policy);
 
 716
 717	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 718	return 0;
 719}
 720
 721/* Parse netlink message to set options */
 722static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 
 723{
 724	struct netem_sched_data *q = qdisc_priv(sch);
 725	struct nlattr *tb[TCA_NETEM_MAX + 1];
 726	struct tc_netem_qopt *qopt;
 
 
 727	int ret;
 728
 729	if (opt == NULL)
 730		return -EINVAL;
 731
 732	qopt = nla_data(opt);
 733	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 734	if (ret < 0)
 735		return ret;
 736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737	sch->limit = qopt->limit;
 738
 739	q->latency = qopt->latency;
 740	q->jitter = qopt->jitter;
 741	q->limit = qopt->limit;
 742	q->gap = qopt->gap;
 743	q->counter = 0;
 744	q->loss = qopt->loss;
 745	q->duplicate = qopt->duplicate;
 746
 747	/* for compatibility with earlier versions.
 748	 * if gap is set, need to assume 100% probability
 749	 */
 750	if (q->gap)
 751		q->reorder = ~0;
 752
 753	if (tb[TCA_NETEM_CORR])
 754		get_correlation(sch, tb[TCA_NETEM_CORR]);
 755
 756	if (tb[TCA_NETEM_DELAY_DIST]) {
 757		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
 758		if (ret)
 759			return ret;
 760	}
 761
 762	if (tb[TCA_NETEM_REORDER])
 763		get_reorder(sch, tb[TCA_NETEM_REORDER]);
 764
 765	if (tb[TCA_NETEM_CORRUPT])
 766		get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
 767
 768	if (tb[TCA_NETEM_RATE])
 769		get_rate(sch, tb[TCA_NETEM_RATE]);
 
 
 
 
 
 
 
 
 
 
 770
 771	if (tb[TCA_NETEM_ECN])
 772		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 773
 774	q->loss_model = CLG_RANDOM;
 775	if (tb[TCA_NETEM_LOSS])
 776		ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
 
 
 
 
 777
 
 
 
 
 
 
 
 778	return ret;
 779}
 780
 781static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 
 782{
 783	struct netem_sched_data *q = qdisc_priv(sch);
 784	int ret;
 785
 
 
 786	if (!opt)
 787		return -EINVAL;
 788
 789	qdisc_watchdog_init(&q->watchdog, sch);
 790
 791	q->loss_model = CLG_RANDOM;
 792	ret = netem_change(sch, opt);
 793	if (ret)
 794		pr_info("netem: change failed\n");
 795	return ret;
 796}
 797
 798static void netem_destroy(struct Qdisc *sch)
 799{
 800	struct netem_sched_data *q = qdisc_priv(sch);
 801
 802	qdisc_watchdog_cancel(&q->watchdog);
 803	if (q->qdisc)
 804		qdisc_destroy(q->qdisc);
 805	dist_free(q->delay_dist);
 
 806}
 807
 808static int dump_loss_model(const struct netem_sched_data *q,
 809			   struct sk_buff *skb)
 810{
 811	struct nlattr *nest;
 812
 813	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
 814	if (nest == NULL)
 815		goto nla_put_failure;
 816
 817	switch (q->loss_model) {
 818	case CLG_RANDOM:
 819		/* legacy loss model */
 820		nla_nest_cancel(skb, nest);
 821		return 0;	/* no data */
 822
 823	case CLG_4_STATES: {
 824		struct tc_netem_gimodel gi = {
 825			.p13 = q->clg.a1,
 826			.p31 = q->clg.a2,
 827			.p32 = q->clg.a3,
 828			.p14 = q->clg.a4,
 829			.p23 = q->clg.a5,
 830		};
 831
 832		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
 833			goto nla_put_failure;
 834		break;
 835	}
 836	case CLG_GILB_ELL: {
 837		struct tc_netem_gemodel ge = {
 838			.p = q->clg.a1,
 839			.r = q->clg.a2,
 840			.h = q->clg.a3,
 841			.k1 = q->clg.a4,
 842		};
 843
 844		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
 845			goto nla_put_failure;
 846		break;
 847	}
 848	}
 849
 850	nla_nest_end(skb, nest);
 851	return 0;
 852
 853nla_put_failure:
 854	nla_nest_cancel(skb, nest);
 855	return -1;
 856}
 857
 858static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 859{
 860	const struct netem_sched_data *q = qdisc_priv(sch);
 861	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
 862	struct tc_netem_qopt qopt;
 863	struct tc_netem_corr cor;
 864	struct tc_netem_reorder reorder;
 865	struct tc_netem_corrupt corrupt;
 866	struct tc_netem_rate rate;
 
 867
 868	qopt.latency = q->latency;
 869	qopt.jitter = q->jitter;
 
 
 870	qopt.limit = q->limit;
 871	qopt.loss = q->loss;
 872	qopt.gap = q->gap;
 873	qopt.duplicate = q->duplicate;
 874	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
 875		goto nla_put_failure;
 876
 
 
 
 
 
 
 877	cor.delay_corr = q->delay_cor.rho;
 878	cor.loss_corr = q->loss_cor.rho;
 879	cor.dup_corr = q->dup_cor.rho;
 880	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
 881		goto nla_put_failure;
 882
 883	reorder.probability = q->reorder;
 884	reorder.correlation = q->reorder_cor.rho;
 885	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
 886		goto nla_put_failure;
 887
 888	corrupt.probability = q->corrupt;
 889	corrupt.correlation = q->corrupt_cor.rho;
 890	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
 891		goto nla_put_failure;
 892
 893	rate.rate = q->rate;
 
 
 
 
 
 
 
 894	rate.packet_overhead = q->packet_overhead;
 895	rate.cell_size = q->cell_size;
 896	rate.cell_overhead = q->cell_overhead;
 897	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
 898		goto nla_put_failure;
 899
 900	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
 901		goto nla_put_failure;
 902
 903	if (dump_loss_model(q, skb) != 0)
 904		goto nla_put_failure;
 905
 
 
 
 
 
 
 
 
 
 
 
 906	return nla_nest_end(skb, nla);
 907
 908nla_put_failure:
 909	nlmsg_trim(skb, nla);
 910	return -1;
 911}
 912
 913static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 914			  struct sk_buff *skb, struct tcmsg *tcm)
 915{
 916	struct netem_sched_data *q = qdisc_priv(sch);
 917
 918	if (cl != 1 || !q->qdisc) 	/* only one class */
 919		return -ENOENT;
 920
 921	tcm->tcm_handle |= TC_H_MIN(1);
 922	tcm->tcm_info = q->qdisc->handle;
 923
 924	return 0;
 925}
 926
 927static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 928		     struct Qdisc **old)
 929{
 930	struct netem_sched_data *q = qdisc_priv(sch);
 931
 932	sch_tree_lock(sch);
 933	*old = q->qdisc;
 934	q->qdisc = new;
 935	if (*old) {
 936		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 937		qdisc_reset(*old);
 938	}
 939	sch_tree_unlock(sch);
 940
 941	return 0;
 942}
 943
 944static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
 945{
 946	struct netem_sched_data *q = qdisc_priv(sch);
 947	return q->qdisc;
 948}
 949
 950static unsigned long netem_get(struct Qdisc *sch, u32 classid)
 951{
 952	return 1;
 953}
 954
 955static void netem_put(struct Qdisc *sch, unsigned long arg)
 956{
 957}
 958
 959static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 960{
 961	if (!walker->stop) {
 962		if (walker->count >= walker->skip)
 963			if (walker->fn(sch, 1, walker) < 0) {
 964				walker->stop = 1;
 965				return;
 966			}
 967		walker->count++;
 968	}
 969}
 970
 971static const struct Qdisc_class_ops netem_class_ops = {
 972	.graft		=	netem_graft,
 973	.leaf		=	netem_leaf,
 974	.get		=	netem_get,
 975	.put		=	netem_put,
 976	.walk		=	netem_walk,
 977	.dump		=	netem_dump_class,
 978};
 979
 980static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
 981	.id		=	"netem",
 982	.cl_ops		=	&netem_class_ops,
 983	.priv_size	=	sizeof(struct netem_sched_data),
 984	.enqueue	=	netem_enqueue,
 985	.dequeue	=	netem_dequeue,
 986	.peek		=	qdisc_peek_dequeued,
 987	.drop		=	netem_drop,
 988	.init		=	netem_init,
 989	.reset		=	netem_reset,
 990	.destroy	=	netem_destroy,
 991	.change		=	netem_change,
 992	.dump		=	netem_dump,
 993	.owner		=	THIS_MODULE,
 994};
 995
 996
 997static int __init netem_module_init(void)
 998{
 999	pr_info("netem: version " VERSION "\n");
1000	return register_qdisc(&netem_qdisc_ops);
1001}
1002static void __exit netem_module_exit(void)
1003{
1004	unregister_qdisc(&netem_qdisc_ops);
1005}
1006module_init(netem_module_init)
1007module_exit(netem_module_exit)
1008MODULE_LICENSE("GPL");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * net/sched/sch_netem.c	Network emulator
   4 *
 
 
 
 
 
   5 *  		Many of the algorithms and ideas for this came from
   6 *		NIST Net which is not copyrighted.
   7 *
   8 * Authors:	Stephen Hemminger <shemminger@osdl.org>
   9 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/errno.h>
  18#include <linux/skbuff.h>
  19#include <linux/vmalloc.h>
  20#include <linux/rtnetlink.h>
  21#include <linux/reciprocal_div.h>
  22#include <linux/rbtree.h>
  23
  24#include <net/netlink.h>
  25#include <net/pkt_sched.h>
  26#include <net/inet_ecn.h>
  27
  28#define VERSION "1.3"
  29
  30/*	Network Emulation Queuing algorithm.
  31	====================================
  32
  33	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  34		 Network Emulation Tool
  35		 [2] Luigi Rizzo, DummyNet for FreeBSD
  36
  37	 ----------------------------------------------------------------
  38
  39	 This started out as a simple way to delay outgoing packets to
  40	 test TCP but has grown to include most of the functionality
  41	 of a full blown network emulator like NISTnet. It can delay
  42	 packets and add random jitter (and correlation). The random
  43	 distribution can be loaded from a table as well to provide
  44	 normal, Pareto, or experimental curves. Packet loss,
  45	 duplication, and reordering can also be emulated.
  46
  47	 This qdisc does not do classification that can be handled in
  48	 layering other disciplines.  It does not need to do bandwidth
  49	 control either since that can be handled by using token
  50	 bucket or other rate control.
  51
  52     Correlated Loss Generator models
  53
  54	Added generation of correlated loss according to the
  55	"Gilbert-Elliot" model, a 4-state markov model.
  56
  57	References:
  58	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  59	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  60	and intuitive loss model for packet networks and its implementation
  61	in the Netem module in the Linux kernel", available in [1]
  62
  63	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  64		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  65*/
  66
  67struct disttable {
  68	u32  size;
  69	s16 table[];
  70};
  71
  72struct netem_sched_data {
  73	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
  74	struct rb_root t_root;
  75
  76	/* a linear queue; reduces rbtree rebalancing when jitter is low */
  77	struct sk_buff	*t_head;
  78	struct sk_buff	*t_tail;
  79
  80	/* optional qdisc for classful handling (NULL at netem init) */
  81	struct Qdisc	*qdisc;
  82
  83	struct qdisc_watchdog watchdog;
  84
  85	s64 latency;
  86	s64 jitter;
  87
  88	u32 loss;
  89	u32 ecn;
  90	u32 limit;
  91	u32 counter;
  92	u32 gap;
  93	u32 duplicate;
  94	u32 reorder;
  95	u32 corrupt;
  96	u64 rate;
  97	s32 packet_overhead;
  98	u32 cell_size;
  99	struct reciprocal_value cell_size_reciprocal;
 100	s32 cell_overhead;
 101
 102	struct crndstate {
 103		u32 last;
 104		u32 rho;
 105	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 106
 107	struct disttable *delay_dist;
 
 
 
 108
 109	enum  {
 110		CLG_RANDOM,
 111		CLG_4_STATES,
 112		CLG_GILB_ELL,
 113	} loss_model;
 114
 115	enum {
 116		TX_IN_GAP_PERIOD = 1,
 117		TX_IN_BURST_PERIOD,
 118		LOST_IN_GAP_PERIOD,
 119		LOST_IN_BURST_PERIOD,
 120	} _4_state_model;
 121
 122	enum {
 123		GOOD_STATE = 1,
 124		BAD_STATE,
 125	} GE_state_model;
 126
 127	/* Correlated Loss Generation models */
 128	struct clgstate {
 129		/* state of the Markov chain */
 130		u8 state;
 131
 132		/* 4-states and Gilbert-Elliot models */
 133		u32 a1;	/* p13 for 4-states or p for GE */
 134		u32 a2;	/* p31 for 4-states or r for GE */
 135		u32 a3;	/* p32 for 4-states or h for GE */
 136		u32 a4;	/* p14 for 4-states or 1-k for GE */
 137		u32 a5; /* p23 used only in 4-states */
 138	} clg;
 139
 140	struct tc_netem_slot slot_config;
 141	struct slotstate {
 142		u64 slot_next;
 143		s32 packets_left;
 144		s32 bytes_left;
 145	} slot;
 146
 147	struct disttable *slot_dist;
 148};
 149
 150/* Time stamp put into socket buffer control block
 151 * Only valid when skbs are in our internal t(ime)fifo queue.
 152 *
 153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
 154 * and skb->next & skb->prev are scratch space for a qdisc,
 155 * we save skb->tstamp value in skb->cb[] before destroying it.
 156 */
 157struct netem_skb_cb {
 158	u64	        time_to_send;
 159};
 160
 161static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 162{
 163	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
 164	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 165	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 166}
 167
 168/* init_crandom - initialize correlated random number generator
 169 * Use entropy source for initial seed.
 170 */
 171static void init_crandom(struct crndstate *state, unsigned long rho)
 172{
 173	state->rho = rho;
 174	state->last = prandom_u32();
 175}
 176
 177/* get_crandom - correlated random number generator
 178 * Next number depends on last value.
 179 * rho is scaled to avoid floating point.
 180 */
 181static u32 get_crandom(struct crndstate *state)
 182{
 183	u64 value, rho;
 184	unsigned long answer;
 185
 186	if (!state || state->rho == 0)	/* no correlation */
 187		return prandom_u32();
 188
 189	value = prandom_u32();
 190	rho = (u64)state->rho + 1;
 191	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 192	state->last = answer;
 193	return answer;
 194}
 195
 196/* loss_4state - 4-state model loss generator
 197 * Generates losses according to the 4-state Markov chain adopted in
 198 * the GI (General and Intuitive) loss model.
 199 */
 200static bool loss_4state(struct netem_sched_data *q)
 201{
 202	struct clgstate *clg = &q->clg;
 203	u32 rnd = prandom_u32();
 204
 205	/*
 206	 * Makes a comparison between rnd and the transition
 207	 * probabilities outgoing from the current state, then decides the
 208	 * next state and if the next packet has to be transmitted or lost.
 209	 * The four states correspond to:
 210	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
 211	 *   LOST_IN_BURST_PERIOD => isolated losses within a gap period
 212	 *   LOST_IN_GAP_PERIOD => lost packets within a burst period
 213	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
 214	 */
 215	switch (clg->state) {
 216	case TX_IN_GAP_PERIOD:
 217		if (rnd < clg->a4) {
 218			clg->state = LOST_IN_BURST_PERIOD;
 219			return true;
 220		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
 221			clg->state = LOST_IN_GAP_PERIOD;
 222			return true;
 223		} else if (clg->a1 + clg->a4 < rnd) {
 224			clg->state = TX_IN_GAP_PERIOD;
 225		}
 226
 227		break;
 228	case TX_IN_BURST_PERIOD:
 229		if (rnd < clg->a5) {
 230			clg->state = LOST_IN_GAP_PERIOD;
 231			return true;
 232		} else {
 233			clg->state = TX_IN_BURST_PERIOD;
 234		}
 235
 236		break;
 237	case LOST_IN_GAP_PERIOD:
 238		if (rnd < clg->a3)
 239			clg->state = TX_IN_BURST_PERIOD;
 240		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 241			clg->state = TX_IN_GAP_PERIOD;
 
 242		} else if (clg->a2 + clg->a3 < rnd) {
 243			clg->state = LOST_IN_GAP_PERIOD;
 244			return true;
 245		}
 246		break;
 247	case LOST_IN_BURST_PERIOD:
 248		clg->state = TX_IN_GAP_PERIOD;
 249		break;
 250	}
 251
 252	return false;
 253}
 254
 255/* loss_gilb_ell - Gilbert-Elliot model loss generator
 256 * Generates losses according to the Gilbert-Elliot loss model or
 257 * its special cases  (Gilbert or Simple Gilbert)
 258 *
 259 * Makes a comparison between random number and the transition
 260 * probabilities outgoing from the current state, then decides the
 261 * next state. A second random number is extracted and the comparison
 262 * with the loss probability of the current state decides if the next
 263 * packet will be transmitted or lost.
 264 */
 265static bool loss_gilb_ell(struct netem_sched_data *q)
 266{
 267	struct clgstate *clg = &q->clg;
 268
 269	switch (clg->state) {
 270	case GOOD_STATE:
 271		if (prandom_u32() < clg->a1)
 272			clg->state = BAD_STATE;
 273		if (prandom_u32() < clg->a4)
 274			return true;
 275		break;
 276	case BAD_STATE:
 277		if (prandom_u32() < clg->a2)
 278			clg->state = GOOD_STATE;
 279		if (prandom_u32() > clg->a3)
 280			return true;
 281	}
 282
 283	return false;
 284}
 285
 286static bool loss_event(struct netem_sched_data *q)
 287{
 288	switch (q->loss_model) {
 289	case CLG_RANDOM:
 290		/* Random packet drop 0 => none, ~0 => all */
 291		return q->loss && q->loss >= get_crandom(&q->loss_cor);
 292
 293	case CLG_4_STATES:
 294		/* 4state loss model algorithm (used also for GI model)
 295		* Extracts a value from the markov 4 state loss generator,
 296		* if it is 1 drops a packet and if needed writes the event in
 297		* the kernel logs
 298		*/
 299		return loss_4state(q);
 300
 301	case CLG_GILB_ELL:
 302		/* Gilbert-Elliot loss model algorithm
 303		* Extracts a value from the Gilbert-Elliot loss generator,
 304		* if it is 1 drops a packet and if needed writes the event in
 305		* the kernel logs
 306		*/
 307		return loss_gilb_ell(q);
 308	}
 309
 310	return false;	/* not reached */
 311}
 312
 313
 314/* tabledist - return a pseudo-randomly distributed value with mean mu and
 315 * std deviation sigma.  Uses table lookup to approximate the desired
 316 * distribution, and a uniformly-distributed pseudo-random source.
 317 */
 318static s64 tabledist(s64 mu, s32 sigma,
 319		     struct crndstate *state,
 320		     const struct disttable *dist)
 321{
 322	s64 x;
 323	long t;
 324	u32 rnd;
 325
 326	if (sigma == 0)
 327		return mu;
 328
 329	rnd = get_crandom(state);
 330
 331	/* default uniform distribution */
 332	if (dist == NULL)
 333		return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
 334
 335	t = dist->table[rnd % dist->size];
 336	x = (sigma % NETEM_DIST_SCALE) * t;
 337	if (x >= 0)
 338		x += NETEM_DIST_SCALE/2;
 339	else
 340		x -= NETEM_DIST_SCALE/2;
 341
 342	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 343}
 344
 345static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
 346{
 
 
 347	len += q->packet_overhead;
 348
 349	if (q->cell_size) {
 350		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 351
 352		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 353			cells++;
 354		len = cells * (q->cell_size + q->cell_overhead);
 355	}
 356
 357	return div64_u64(len * NSEC_PER_SEC, q->rate);
 358}
 359
 360static void tfifo_reset(struct Qdisc *sch)
 361{
 362	struct netem_sched_data *q = qdisc_priv(sch);
 363	struct rb_node *p = rb_first(&q->t_root);
 364
 365	while (p) {
 366		struct sk_buff *skb = rb_to_skb(p);
 367
 368		p = rb_next(p);
 369		rb_erase(&skb->rbnode, &q->t_root);
 370		rtnl_kfree_skbs(skb, skb);
 371	}
 372
 373	rtnl_kfree_skbs(q->t_head, q->t_tail);
 374	q->t_head = NULL;
 375	q->t_tail = NULL;
 376}
 377
 378static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 379{
 380	struct netem_sched_data *q = qdisc_priv(sch);
 381	u64 tnext = netem_skb_cb(nskb)->time_to_send;
 
 
 
 
 
 382
 383	if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
 384		if (q->t_tail)
 385			q->t_tail->next = nskb;
 386		else
 387			q->t_head = nskb;
 388		q->t_tail = nskb;
 389	} else {
 390		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
 391
 392		while (*p) {
 393			struct sk_buff *skb;
 394
 395			parent = *p;
 396			skb = rb_to_skb(parent);
 397			if (tnext >= netem_skb_cb(skb)->time_to_send)
 398				p = &parent->rb_right;
 399			else
 400				p = &parent->rb_left;
 401		}
 402		rb_link_node(&nskb->rbnode, parent, p);
 403		rb_insert_color(&nskb->rbnode, &q->t_root);
 404	}
 405	sch->q.qlen++;
 406}
 407
 408/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
 409 * when we statistically choose to corrupt one, we instead segment it, returning
 410 * the first packet to be corrupted, and re-enqueue the remaining frames
 411 */
 412static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
 413				     struct sk_buff **to_free)
 414{
 415	struct sk_buff *segs;
 416	netdev_features_t features = netif_skb_features(skb);
 417
 418	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 419
 420	if (IS_ERR_OR_NULL(segs)) {
 421		qdisc_drop(skb, sch, to_free);
 422		return NULL;
 423	}
 424	consume_skb(skb);
 425	return segs;
 426}
 427
 428/*
 429 * Insert one skb into qdisc.
 430 * Note: parent depends on return value to account for queue length.
 431 * 	NET_XMIT_DROP: queue length didn't change.
 432 *      NET_XMIT_SUCCESS: one skb was queued.
 433 */
 434static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 435			 struct sk_buff **to_free)
 436{
 437	struct netem_sched_data *q = qdisc_priv(sch);
 438	/* We don't fill cb now as skb_unshare() may invalidate it */
 439	struct netem_skb_cb *cb;
 440	struct sk_buff *skb2;
 441	struct sk_buff *segs = NULL;
 442	unsigned int prev_len = qdisc_pkt_len(skb);
 443	int count = 1;
 444	int rc = NET_XMIT_SUCCESS;
 445	int rc_drop = NET_XMIT_DROP;
 446
 447	/* Do not fool qdisc_drop_all() */
 448	skb->prev = NULL;
 449
 450	/* Random duplication */
 451	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
 452		++count;
 453
 454	/* Drop packet? */
 455	if (loss_event(q)) {
 456		if (q->ecn && INET_ECN_set_ce(skb))
 457			qdisc_qstats_drop(sch); /* mark packet */
 458		else
 459			--count;
 460	}
 461	if (count == 0) {
 462		qdisc_qstats_drop(sch);
 463		__qdisc_drop(skb, to_free);
 464		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 465	}
 466
 467	/* If a delay is expected, orphan the skb. (orphaning usually takes
 468	 * place at TX completion time, so _before_ the link transit delay)
 469	 */
 470	if (q->latency || q->jitter || q->rate)
 471		skb_orphan_partial(skb);
 472
 473	/*
 474	 * If we need to duplicate packet, then re-insert at top of the
 475	 * qdisc tree, since parent queuer expects that only one
 476	 * skb will be queued.
 477	 */
 478	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 479		struct Qdisc *rootq = qdisc_root_bh(sch);
 480		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 
 481
 482		q->duplicate = 0;
 483		rootq->enqueue(skb2, rootq, to_free);
 484		q->duplicate = dupsave;
 485		rc_drop = NET_XMIT_SUCCESS;
 486	}
 487
 488	/*
 489	 * Randomized packet corruption.
 490	 * Make copy if needed since we are modifying
 491	 * If packet is going to be hardware checksummed, then
 492	 * do it now in software before we mangle it.
 493	 */
 494	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
 495		if (skb_is_gso(skb)) {
 496			skb = netem_segment(skb, sch, to_free);
 497			if (!skb)
 498				return rc_drop;
 499			segs = skb->next;
 500			skb_mark_not_on_list(skb);
 501			qdisc_skb_cb(skb)->pkt_len = skb->len;
 502		}
 503
 504		skb = skb_unshare(skb, GFP_ATOMIC);
 505		if (unlikely(!skb)) {
 506			qdisc_qstats_drop(sch);
 507			goto finish_segs;
 508		}
 509		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 510		    skb_checksum_help(skb)) {
 511			qdisc_drop(skb, sch, to_free);
 512			skb = NULL;
 513			goto finish_segs;
 514		}
 515
 516		skb->data[prandom_u32() % skb_headlen(skb)] ^=
 517			1<<(prandom_u32() % 8);
 518	}
 519
 520	if (unlikely(sch->q.qlen >= sch->limit)) {
 521		/* re-link segs, so that qdisc_drop_all() frees them all */
 522		skb->next = segs;
 523		qdisc_drop_all(skb, sch, to_free);
 524		return rc_drop;
 525	}
 526
 527	qdisc_qstats_backlog_inc(sch, skb);
 528
 529	cb = netem_skb_cb(skb);
 530	if (q->gap == 0 ||		/* not doing reordering */
 531	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 532	    q->reorder < get_crandom(&q->reorder_cor)) {
 533		u64 now;
 534		s64 delay;
 535
 536		delay = tabledist(q->latency, q->jitter,
 537				  &q->delay_cor, q->delay_dist);
 538
 539		now = ktime_get_ns();
 540
 541		if (q->rate) {
 542			struct netem_skb_cb *last = NULL;
 543
 544			if (sch->q.tail)
 545				last = netem_skb_cb(sch->q.tail);
 546			if (q->t_root.rb_node) {
 547				struct sk_buff *t_skb;
 548				struct netem_skb_cb *t_last;
 549
 550				t_skb = skb_rb_last(&q->t_root);
 551				t_last = netem_skb_cb(t_skb);
 552				if (!last ||
 553				    t_last->time_to_send > last->time_to_send)
 554					last = t_last;
 555			}
 556			if (q->t_tail) {
 557				struct netem_skb_cb *t_last =
 558					netem_skb_cb(q->t_tail);
 559
 560				if (!last ||
 561				    t_last->time_to_send > last->time_to_send)
 562					last = t_last;
 563			}
 564
 565			if (last) {
 566				/*
 567				 * Last packet in queue is reference point (now),
 568				 * calculate this time bonus and subtract
 
 569				 * from delay.
 570				 */
 571				delay -= last->time_to_send - now;
 572				delay = max_t(s64, 0, delay);
 573				now = last->time_to_send;
 574			}
 575
 576			delay += packet_time_ns(qdisc_pkt_len(skb), q);
 577		}
 578
 579		cb->time_to_send = now + delay;
 580		++q->counter;
 581		tfifo_enqueue(skb, sch);
 582	} else {
 583		/*
 584		 * Do re-ordering by putting one out of N packets at the front
 585		 * of the queue.
 586		 */
 587		cb->time_to_send = ktime_get_ns();
 588		q->counter = 0;
 589
 590		__qdisc_enqueue_head(skb, &sch->q);
 591		sch->qstats.requeues++;
 592	}
 593
 594finish_segs:
 595	if (segs) {
 596		unsigned int len, last_len;
 597		int nb;
 598
 599		len = skb ? skb->len : 0;
 600		nb = skb ? 1 : 0;
 601
 602		while (segs) {
 603			skb2 = segs->next;
 604			skb_mark_not_on_list(segs);
 605			qdisc_skb_cb(segs)->pkt_len = segs->len;
 606			last_len = segs->len;
 607			rc = qdisc_enqueue(segs, sch, to_free);
 608			if (rc != NET_XMIT_SUCCESS) {
 609				if (net_xmit_drop_count(rc))
 610					qdisc_qstats_drop(sch);
 611			} else {
 612				nb++;
 613				len += last_len;
 614			}
 615			segs = skb2;
 616		}
 617		/* Parent qdiscs accounted for 1 skb of size @prev_len */
 618		qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
 619	} else if (!skb) {
 620		return NET_XMIT_DROP;
 621	}
 622	return NET_XMIT_SUCCESS;
 623}
 624
 625/* Delay the next round with a new future slot with a
 626 * correct number of bytes and packets.
 627 */
 628
 629static void get_slot_next(struct netem_sched_data *q, u64 now)
 630{
 631	s64 next_delay;
 
 632
 633	if (!q->slot_dist)
 634		next_delay = q->slot_config.min_delay +
 635				(prandom_u32() *
 636				 (q->slot_config.max_delay -
 637				  q->slot_config.min_delay) >> 32);
 638	else
 639		next_delay = tabledist(q->slot_config.dist_delay,
 640				       (s32)(q->slot_config.dist_jitter),
 641				       NULL, q->slot_dist);
 642
 643	q->slot.slot_next = now + next_delay;
 644	q->slot.packets_left = q->slot_config.max_packets;
 645	q->slot.bytes_left = q->slot_config.max_bytes;
 646}
 647
 648static struct sk_buff *netem_peek(struct netem_sched_data *q)
 649{
 650	struct sk_buff *skb = skb_rb_first(&q->t_root);
 651	u64 t1, t2;
 652
 653	if (!skb)
 654		return q->t_head;
 655	if (!q->t_head)
 656		return skb;
 657
 658	t1 = netem_skb_cb(skb)->time_to_send;
 659	t2 = netem_skb_cb(q->t_head)->time_to_send;
 660	if (t1 < t2)
 661		return skb;
 662	return q->t_head;
 663}
 664
 665static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
 666{
 667	if (skb == q->t_head) {
 668		q->t_head = skb->next;
 669		if (!q->t_head)
 670			q->t_tail = NULL;
 671	} else {
 672		rb_erase(&skb->rbnode, &q->t_root);
 673	}
 674}
 675
 676static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 677{
 678	struct netem_sched_data *q = qdisc_priv(sch);
 679	struct sk_buff *skb;
 680
 
 
 
 681tfifo_dequeue:
 682	skb = __qdisc_dequeue_head(&sch->q);
 683	if (skb) {
 684		qdisc_qstats_backlog_dec(sch, skb);
 685deliver:
 686		qdisc_bstats_update(sch, skb);
 687		return skb;
 688	}
 689	skb = netem_peek(q);
 690	if (skb) {
 691		u64 time_to_send;
 692		u64 now = ktime_get_ns();
 693
 694		/* if more time remaining? */
 695		time_to_send = netem_skb_cb(skb)->time_to_send;
 696		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
 697			get_slot_next(q, now);
 698
 699		if (time_to_send <= now && q->slot.slot_next <= now) {
 700			netem_erase_head(q, skb);
 701			sch->q.qlen--;
 702			qdisc_qstats_backlog_dec(sch, skb);
 703			skb->next = NULL;
 704			skb->prev = NULL;
 705			/* skb->dev shares skb->rbnode area,
 706			 * we need to restore its value.
 707			 */
 708			skb->dev = qdisc_dev(sch);
 
 
 709
 710			if (q->slot.slot_next) {
 711				q->slot.packets_left--;
 712				q->slot.bytes_left -= qdisc_pkt_len(skb);
 713				if (q->slot.packets_left <= 0 ||
 714				    q->slot.bytes_left <= 0)
 715					get_slot_next(q, now);
 716			}
 717
 718			if (q->qdisc) {
 719				unsigned int pkt_len = qdisc_pkt_len(skb);
 720				struct sk_buff *to_free = NULL;
 721				int err;
 722
 723				err = qdisc_enqueue(skb, q->qdisc, &to_free);
 724				kfree_skb_list(to_free);
 725				if (err != NET_XMIT_SUCCESS &&
 726				    net_xmit_drop_count(err)) {
 727					qdisc_qstats_drop(sch);
 728					qdisc_tree_reduce_backlog(sch, 1,
 729								  pkt_len);
 730				}
 731				goto tfifo_dequeue;
 732			}
 733			goto deliver;
 
 
 
 734		}
 735
 736		if (q->qdisc) {
 737			skb = q->qdisc->ops->dequeue(q->qdisc);
 738			if (skb)
 739				goto deliver;
 740		}
 741
 742		qdisc_watchdog_schedule_ns(&q->watchdog,
 743					   max(time_to_send,
 744					       q->slot.slot_next));
 745	}
 746
 747	if (q->qdisc) {
 748		skb = q->qdisc->ops->dequeue(q->qdisc);
 749		if (skb)
 750			goto deliver;
 751	}
 752	return NULL;
 753}
 754
 755static void netem_reset(struct Qdisc *sch)
 756{
 757	struct netem_sched_data *q = qdisc_priv(sch);
 758
 759	qdisc_reset_queue(sch);
 760	tfifo_reset(sch);
 761	if (q->qdisc)
 762		qdisc_reset(q->qdisc);
 763	qdisc_watchdog_cancel(&q->watchdog);
 764}
 765
 766static void dist_free(struct disttable *d)
 767{
 768	kvfree(d);
 
 
 
 
 
 769}
 770
 771/*
 772 * Distribution data is a variable size payload containing
 773 * signed 16 bit values.
 774 */
 775
 776static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
 777			  const struct nlattr *attr)
 778{
 
 779	size_t n = nla_len(attr)/sizeof(__s16);
 780	const __s16 *data = nla_data(attr);
 781	spinlock_t *root_lock;
 782	struct disttable *d;
 783	int i;
 
 784
 785	if (!n || n > NETEM_DIST_MAX)
 786		return -EINVAL;
 787
 788	d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
 
 
 
 789	if (!d)
 790		return -ENOMEM;
 791
 792	d->size = n;
 793	for (i = 0; i < n; i++)
 794		d->table[i] = data[i];
 795
 796	root_lock = qdisc_root_sleeping_lock(sch);
 797
 798	spin_lock_bh(root_lock);
 799	swap(*tbl, d);
 800	spin_unlock_bh(root_lock);
 801
 802	dist_free(d);
 803	return 0;
 804}
 805
 806static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
 807{
 808	const struct tc_netem_slot *c = nla_data(attr);
 809
 810	q->slot_config = *c;
 811	if (q->slot_config.max_packets == 0)
 812		q->slot_config.max_packets = INT_MAX;
 813	if (q->slot_config.max_bytes == 0)
 814		q->slot_config.max_bytes = INT_MAX;
 815
 816	/* capping dist_jitter to the range acceptable by tabledist() */
 817	q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
 818
 819	q->slot.packets_left = q->slot_config.max_packets;
 820	q->slot.bytes_left = q->slot_config.max_bytes;
 821	if (q->slot_config.min_delay | q->slot_config.max_delay |
 822	    q->slot_config.dist_jitter)
 823		q->slot.slot_next = ktime_get_ns();
 824	else
 825		q->slot.slot_next = 0;
 826}
 827
 828static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 829{
 
 830	const struct tc_netem_corr *c = nla_data(attr);
 831
 832	init_crandom(&q->delay_cor, c->delay_corr);
 833	init_crandom(&q->loss_cor, c->loss_corr);
 834	init_crandom(&q->dup_cor, c->dup_corr);
 835}
 836
 837static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
 838{
 
 839	const struct tc_netem_reorder *r = nla_data(attr);
 840
 841	q->reorder = r->probability;
 842	init_crandom(&q->reorder_cor, r->correlation);
 843}
 844
 845static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
 846{
 
 847	const struct tc_netem_corrupt *r = nla_data(attr);
 848
 849	q->corrupt = r->probability;
 850	init_crandom(&q->corrupt_cor, r->correlation);
 851}
 852
 853static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
 854{
 
 855	const struct tc_netem_rate *r = nla_data(attr);
 856
 857	q->rate = r->rate;
 858	q->packet_overhead = r->packet_overhead;
 859	q->cell_size = r->cell_size;
 860	q->cell_overhead = r->cell_overhead;
 861	if (q->cell_size)
 862		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 863	else
 864		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
 865}
 866
 867static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
 868{
 
 869	const struct nlattr *la;
 870	int rem;
 871
 872	nla_for_each_nested(la, attr, rem) {
 873		u16 type = nla_type(la);
 874
 875		switch (type) {
 876		case NETEM_LOSS_GI: {
 877			const struct tc_netem_gimodel *gi = nla_data(la);
 878
 879			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 880				pr_info("netem: incorrect gi model size\n");
 881				return -EINVAL;
 882			}
 883
 884			q->loss_model = CLG_4_STATES;
 885
 886			q->clg.state = TX_IN_GAP_PERIOD;
 887			q->clg.a1 = gi->p13;
 888			q->clg.a2 = gi->p31;
 889			q->clg.a3 = gi->p32;
 890			q->clg.a4 = gi->p14;
 891			q->clg.a5 = gi->p23;
 892			break;
 893		}
 894
 895		case NETEM_LOSS_GE: {
 896			const struct tc_netem_gemodel *ge = nla_data(la);
 897
 898			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 899				pr_info("netem: incorrect ge model size\n");
 900				return -EINVAL;
 901			}
 902
 903			q->loss_model = CLG_GILB_ELL;
 904			q->clg.state = GOOD_STATE;
 905			q->clg.a1 = ge->p;
 906			q->clg.a2 = ge->r;
 907			q->clg.a3 = ge->h;
 908			q->clg.a4 = ge->k1;
 909			break;
 910		}
 911
 912		default:
 913			pr_info("netem: unknown loss type %u\n", type);
 914			return -EINVAL;
 915		}
 916	}
 917
 918	return 0;
 919}
 920
 921static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 922	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 923	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 924	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 925	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 926	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 927	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 928	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 929	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
 930	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
 931	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
 932};
 933
 934static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 935		      const struct nla_policy *policy, int len)
 936{
 937	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 938
 939	if (nested_len < 0) {
 940		pr_info("netem: invalid attributes len %d\n", nested_len);
 941		return -EINVAL;
 942	}
 943
 944	if (nested_len >= nla_attr_size(0))
 945		return nla_parse_deprecated(tb, maxtype,
 946					    nla_data(nla) + NLA_ALIGN(len),
 947					    nested_len, policy, NULL);
 948
 949	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 950	return 0;
 951}
 952
 953/* Parse netlink message to set options */
 954static int netem_change(struct Qdisc *sch, struct nlattr *opt,
 955			struct netlink_ext_ack *extack)
 956{
 957	struct netem_sched_data *q = qdisc_priv(sch);
 958	struct nlattr *tb[TCA_NETEM_MAX + 1];
 959	struct tc_netem_qopt *qopt;
 960	struct clgstate old_clg;
 961	int old_loss_model = CLG_RANDOM;
 962	int ret;
 963
 964	if (opt == NULL)
 965		return -EINVAL;
 966
 967	qopt = nla_data(opt);
 968	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 969	if (ret < 0)
 970		return ret;
 971
 972	/* backup q->clg and q->loss_model */
 973	old_clg = q->clg;
 974	old_loss_model = q->loss_model;
 975
 976	if (tb[TCA_NETEM_LOSS]) {
 977		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
 978		if (ret) {
 979			q->loss_model = old_loss_model;
 980			return ret;
 981		}
 982	} else {
 983		q->loss_model = CLG_RANDOM;
 984	}
 985
 986	if (tb[TCA_NETEM_DELAY_DIST]) {
 987		ret = get_dist_table(sch, &q->delay_dist,
 988				     tb[TCA_NETEM_DELAY_DIST]);
 989		if (ret)
 990			goto get_table_failure;
 991	}
 992
 993	if (tb[TCA_NETEM_SLOT_DIST]) {
 994		ret = get_dist_table(sch, &q->slot_dist,
 995				     tb[TCA_NETEM_SLOT_DIST]);
 996		if (ret)
 997			goto get_table_failure;
 998	}
 999
1000	sch->limit = qopt->limit;
1001
1002	q->latency = PSCHED_TICKS2NS(qopt->latency);
1003	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1004	q->limit = qopt->limit;
1005	q->gap = qopt->gap;
1006	q->counter = 0;
1007	q->loss = qopt->loss;
1008	q->duplicate = qopt->duplicate;
1009
1010	/* for compatibility with earlier versions.
1011	 * if gap is set, need to assume 100% probability
1012	 */
1013	if (q->gap)
1014		q->reorder = ~0;
1015
1016	if (tb[TCA_NETEM_CORR])
1017		get_correlation(q, tb[TCA_NETEM_CORR]);
 
 
 
 
 
 
1018
1019	if (tb[TCA_NETEM_REORDER])
1020		get_reorder(q, tb[TCA_NETEM_REORDER]);
1021
1022	if (tb[TCA_NETEM_CORRUPT])
1023		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1024
1025	if (tb[TCA_NETEM_RATE])
1026		get_rate(q, tb[TCA_NETEM_RATE]);
1027
1028	if (tb[TCA_NETEM_RATE64])
1029		q->rate = max_t(u64, q->rate,
1030				nla_get_u64(tb[TCA_NETEM_RATE64]));
1031
1032	if (tb[TCA_NETEM_LATENCY64])
1033		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1034
1035	if (tb[TCA_NETEM_JITTER64])
1036		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1037
1038	if (tb[TCA_NETEM_ECN])
1039		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1040
1041	if (tb[TCA_NETEM_SLOT])
1042		get_slot(q, tb[TCA_NETEM_SLOT]);
1043
1044	/* capping jitter to the range acceptable by tabledist() */
1045	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1046
1047	return ret;
1048
1049get_table_failure:
1050	/* recover clg and loss_model, in case of
1051	 * q->clg and q->loss_model were modified
1052	 * in get_loss_clg()
1053	 */
1054	q->clg = old_clg;
1055	q->loss_model = old_loss_model;
1056	return ret;
1057}
1058
1059static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1060		      struct netlink_ext_ack *extack)
1061{
1062	struct netem_sched_data *q = qdisc_priv(sch);
1063	int ret;
1064
1065	qdisc_watchdog_init(&q->watchdog, sch);
1066
1067	if (!opt)
1068		return -EINVAL;
1069
 
 
1070	q->loss_model = CLG_RANDOM;
1071	ret = netem_change(sch, opt, extack);
1072	if (ret)
1073		pr_info("netem: change failed\n");
1074	return ret;
1075}
1076
1077static void netem_destroy(struct Qdisc *sch)
1078{
1079	struct netem_sched_data *q = qdisc_priv(sch);
1080
1081	qdisc_watchdog_cancel(&q->watchdog);
1082	if (q->qdisc)
1083		qdisc_put(q->qdisc);
1084	dist_free(q->delay_dist);
1085	dist_free(q->slot_dist);
1086}
1087
1088static int dump_loss_model(const struct netem_sched_data *q,
1089			   struct sk_buff *skb)
1090{
1091	struct nlattr *nest;
1092
1093	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1094	if (nest == NULL)
1095		goto nla_put_failure;
1096
1097	switch (q->loss_model) {
1098	case CLG_RANDOM:
1099		/* legacy loss model */
1100		nla_nest_cancel(skb, nest);
1101		return 0;	/* no data */
1102
1103	case CLG_4_STATES: {
1104		struct tc_netem_gimodel gi = {
1105			.p13 = q->clg.a1,
1106			.p31 = q->clg.a2,
1107			.p32 = q->clg.a3,
1108			.p14 = q->clg.a4,
1109			.p23 = q->clg.a5,
1110		};
1111
1112		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1113			goto nla_put_failure;
1114		break;
1115	}
1116	case CLG_GILB_ELL: {
1117		struct tc_netem_gemodel ge = {
1118			.p = q->clg.a1,
1119			.r = q->clg.a2,
1120			.h = q->clg.a3,
1121			.k1 = q->clg.a4,
1122		};
1123
1124		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1125			goto nla_put_failure;
1126		break;
1127	}
1128	}
1129
1130	nla_nest_end(skb, nest);
1131	return 0;
1132
1133nla_put_failure:
1134	nla_nest_cancel(skb, nest);
1135	return -1;
1136}
1137
1138static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1139{
1140	const struct netem_sched_data *q = qdisc_priv(sch);
1141	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1142	struct tc_netem_qopt qopt;
1143	struct tc_netem_corr cor;
1144	struct tc_netem_reorder reorder;
1145	struct tc_netem_corrupt corrupt;
1146	struct tc_netem_rate rate;
1147	struct tc_netem_slot slot;
1148
1149	qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1150			     UINT_MAX);
1151	qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1152			    UINT_MAX);
1153	qopt.limit = q->limit;
1154	qopt.loss = q->loss;
1155	qopt.gap = q->gap;
1156	qopt.duplicate = q->duplicate;
1157	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1158		goto nla_put_failure;
1159
1160	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1161		goto nla_put_failure;
1162
1163	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1164		goto nla_put_failure;
1165
1166	cor.delay_corr = q->delay_cor.rho;
1167	cor.loss_corr = q->loss_cor.rho;
1168	cor.dup_corr = q->dup_cor.rho;
1169	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1170		goto nla_put_failure;
1171
1172	reorder.probability = q->reorder;
1173	reorder.correlation = q->reorder_cor.rho;
1174	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1175		goto nla_put_failure;
1176
1177	corrupt.probability = q->corrupt;
1178	corrupt.correlation = q->corrupt_cor.rho;
1179	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1180		goto nla_put_failure;
1181
1182	if (q->rate >= (1ULL << 32)) {
1183		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1184				      TCA_NETEM_PAD))
1185			goto nla_put_failure;
1186		rate.rate = ~0U;
1187	} else {
1188		rate.rate = q->rate;
1189	}
1190	rate.packet_overhead = q->packet_overhead;
1191	rate.cell_size = q->cell_size;
1192	rate.cell_overhead = q->cell_overhead;
1193	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1194		goto nla_put_failure;
1195
1196	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1197		goto nla_put_failure;
1198
1199	if (dump_loss_model(q, skb) != 0)
1200		goto nla_put_failure;
1201
1202	if (q->slot_config.min_delay | q->slot_config.max_delay |
1203	    q->slot_config.dist_jitter) {
1204		slot = q->slot_config;
1205		if (slot.max_packets == INT_MAX)
1206			slot.max_packets = 0;
1207		if (slot.max_bytes == INT_MAX)
1208			slot.max_bytes = 0;
1209		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1210			goto nla_put_failure;
1211	}
1212
1213	return nla_nest_end(skb, nla);
1214
1215nla_put_failure:
1216	nlmsg_trim(skb, nla);
1217	return -1;
1218}
1219
1220static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1221			  struct sk_buff *skb, struct tcmsg *tcm)
1222{
1223	struct netem_sched_data *q = qdisc_priv(sch);
1224
1225	if (cl != 1 || !q->qdisc) 	/* only one class */
1226		return -ENOENT;
1227
1228	tcm->tcm_handle |= TC_H_MIN(1);
1229	tcm->tcm_info = q->qdisc->handle;
1230
1231	return 0;
1232}
1233
1234static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1235		     struct Qdisc **old, struct netlink_ext_ack *extack)
1236{
1237	struct netem_sched_data *q = qdisc_priv(sch);
1238
1239	*old = qdisc_replace(sch, new, &q->qdisc);
 
 
 
 
 
 
 
 
1240	return 0;
1241}
1242
1243static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1244{
1245	struct netem_sched_data *q = qdisc_priv(sch);
1246	return q->qdisc;
1247}
1248
1249static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1250{
1251	return 1;
1252}
1253
 
 
 
 
1254static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1255{
1256	if (!walker->stop) {
1257		if (walker->count >= walker->skip)
1258			if (walker->fn(sch, 1, walker) < 0) {
1259				walker->stop = 1;
1260				return;
1261			}
1262		walker->count++;
1263	}
1264}
1265
1266static const struct Qdisc_class_ops netem_class_ops = {
1267	.graft		=	netem_graft,
1268	.leaf		=	netem_leaf,
1269	.find		=	netem_find,
 
1270	.walk		=	netem_walk,
1271	.dump		=	netem_dump_class,
1272};
1273
1274static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1275	.id		=	"netem",
1276	.cl_ops		=	&netem_class_ops,
1277	.priv_size	=	sizeof(struct netem_sched_data),
1278	.enqueue	=	netem_enqueue,
1279	.dequeue	=	netem_dequeue,
1280	.peek		=	qdisc_peek_dequeued,
 
1281	.init		=	netem_init,
1282	.reset		=	netem_reset,
1283	.destroy	=	netem_destroy,
1284	.change		=	netem_change,
1285	.dump		=	netem_dump,
1286	.owner		=	THIS_MODULE,
1287};
1288
1289
1290static int __init netem_module_init(void)
1291{
1292	pr_info("netem: version " VERSION "\n");
1293	return register_qdisc(&netem_qdisc_ops);
1294}
1295static void __exit netem_module_exit(void)
1296{
1297	unregister_qdisc(&netem_qdisc_ops);
1298}
1299module_init(netem_module_init)
1300module_exit(netem_module_exit)
1301MODULE_LICENSE("GPL");