Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * net/sched/sch_netem.c	Network emulator
   3 *
   4 * 		This program is free software; you can redistribute it and/or
   5 * 		modify it under the terms of the GNU General Public License
   6 * 		as published by the Free Software Foundation; either version
   7 * 		2 of the License.
   8 *
   9 *  		Many of the algorithms and ideas for this came from
  10 *		NIST Net which is not copyrighted.
  11 *
  12 * Authors:	Stephen Hemminger <shemminger@osdl.org>
  13 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/rtnetlink.h>
  25#include <linux/reciprocal_div.h>
 
  26
  27#include <net/netlink.h>
  28#include <net/pkt_sched.h>
  29#include <net/inet_ecn.h>
  30
  31#define VERSION "1.3"
  32
  33/*	Network Emulation Queuing algorithm.
  34	====================================
  35
  36	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  37		 Network Emulation Tool
  38		 [2] Luigi Rizzo, DummyNet for FreeBSD
  39
  40	 ----------------------------------------------------------------
  41
  42	 This started out as a simple way to delay outgoing packets to
  43	 test TCP but has grown to include most of the functionality
  44	 of a full blown network emulator like NISTnet. It can delay
  45	 packets and add random jitter (and correlation). The random
  46	 distribution can be loaded from a table as well to provide
  47	 normal, Pareto, or experimental curves. Packet loss,
  48	 duplication, and reordering can also be emulated.
  49
  50	 This qdisc does not do classification that can be handled in
  51	 layering other disciplines.  It does not need to do bandwidth
  52	 control either since that can be handled by using token
  53	 bucket or other rate control.
  54
  55     Correlated Loss Generator models
  56
  57	Added generation of correlated loss according to the
  58	"Gilbert-Elliot" model, a 4-state markov model.
  59
  60	References:
  61	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  62	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  63	and intuitive loss model for packet networks and its implementation
  64	in the Netem module in the Linux kernel", available in [1]
  65
  66	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  67		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  68*/
  69
  70struct netem_sched_data {
  71	/* internal t(ime)fifo qdisc uses sch->q and sch->limit */
 
  72
  73	/* optional qdisc for classful handling (NULL at netem init) */
  74	struct Qdisc	*qdisc;
  75
  76	struct qdisc_watchdog watchdog;
  77
  78	psched_tdiff_t latency;
  79	psched_tdiff_t jitter;
  80
  81	u32 loss;
  82	u32 ecn;
  83	u32 limit;
  84	u32 counter;
  85	u32 gap;
  86	u32 duplicate;
  87	u32 reorder;
  88	u32 corrupt;
  89	u32 rate;
  90	s32 packet_overhead;
  91	u32 cell_size;
  92	u32 cell_size_reciprocal;
  93	s32 cell_overhead;
  94
  95	struct crndstate {
  96		u32 last;
  97		u32 rho;
  98	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
  99
 100	struct disttable {
 101		u32  size;
 102		s16 table[0];
 103	} *delay_dist;
 104
 105	enum  {
 106		CLG_RANDOM,
 107		CLG_4_STATES,
 108		CLG_GILB_ELL,
 109	} loss_model;
 110
 
 
 
 
 
 
 
 
 
 
 
 
 111	/* Correlated Loss Generation models */
 112	struct clgstate {
 113		/* state of the Markov chain */
 114		u8 state;
 115
 116		/* 4-states and Gilbert-Elliot models */
 117		u32 a1;	/* p13 for 4-states or p for GE */
 118		u32 a2;	/* p31 for 4-states or r for GE */
 119		u32 a3;	/* p32 for 4-states or h for GE */
 120		u32 a4;	/* p14 for 4-states or 1-k for GE */
 121		u32 a5; /* p23 used only in 4-states */
 122	} clg;
 123
 124};
 125
 126/* Time stamp put into socket buffer control block
 127 * Only valid when skbs are in our internal t(ime)fifo queue.
 
 
 
 
 128 */
 129struct netem_skb_cb {
 130	psched_time_t	time_to_send;
 
 131};
 132
 
 
 
 
 
 
 133static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 134{
 
 135	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 136	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 137}
 138
 139/* init_crandom - initialize correlated random number generator
 140 * Use entropy source for initial seed.
 141 */
 142static void init_crandom(struct crndstate *state, unsigned long rho)
 143{
 144	state->rho = rho;
 145	state->last = net_random();
 146}
 147
 148/* get_crandom - correlated random number generator
 149 * Next number depends on last value.
 150 * rho is scaled to avoid floating point.
 151 */
 152static u32 get_crandom(struct crndstate *state)
 153{
 154	u64 value, rho;
 155	unsigned long answer;
 156
 157	if (state->rho == 0)	/* no correlation */
 158		return net_random();
 159
 160	value = net_random();
 161	rho = (u64)state->rho + 1;
 162	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 163	state->last = answer;
 164	return answer;
 165}
 166
 167/* loss_4state - 4-state model loss generator
 168 * Generates losses according to the 4-state Markov chain adopted in
 169 * the GI (General and Intuitive) loss model.
 170 */
 171static bool loss_4state(struct netem_sched_data *q)
 172{
 173	struct clgstate *clg = &q->clg;
 174	u32 rnd = net_random();
 175
 176	/*
 177	 * Makes a comparison between rnd and the transition
 178	 * probabilities outgoing from the current state, then decides the
 179	 * next state and if the next packet has to be transmitted or lost.
 180	 * The four states correspond to:
 181	 *   1 => successfully transmitted packets within a gap period
 182	 *   4 => isolated losses within a gap period
 183	 *   3 => lost packets within a burst period
 184	 *   2 => successfully transmitted packets within a burst period
 185	 */
 186	switch (clg->state) {
 187	case 1:
 188		if (rnd < clg->a4) {
 189			clg->state = 4;
 190			return true;
 191		} else if (clg->a4 < rnd && rnd < clg->a1) {
 192			clg->state = 3;
 193			return true;
 194		} else if (clg->a1 < rnd)
 195			clg->state = 1;
 
 196
 197		break;
 198	case 2:
 199		if (rnd < clg->a5) {
 200			clg->state = 3;
 201			return true;
 202		} else
 203			clg->state = 2;
 
 204
 205		break;
 206	case 3:
 207		if (rnd < clg->a3)
 208			clg->state = 2;
 209		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 210			clg->state = 1;
 211			return true;
 212		} else if (clg->a2 + clg->a3 < rnd) {
 213			clg->state = 3;
 214			return true;
 215		}
 216		break;
 217	case 4:
 218		clg->state = 1;
 219		break;
 220	}
 221
 222	return false;
 223}
 224
 225/* loss_gilb_ell - Gilbert-Elliot model loss generator
 226 * Generates losses according to the Gilbert-Elliot loss model or
 227 * its special cases  (Gilbert or Simple Gilbert)
 228 *
 229 * Makes a comparison between random number and the transition
 230 * probabilities outgoing from the current state, then decides the
 231 * next state. A second random number is extracted and the comparison
 232 * with the loss probability of the current state decides if the next
 233 * packet will be transmitted or lost.
 234 */
 235static bool loss_gilb_ell(struct netem_sched_data *q)
 236{
 237	struct clgstate *clg = &q->clg;
 238
 239	switch (clg->state) {
 240	case 1:
 241		if (net_random() < clg->a1)
 242			clg->state = 2;
 243		if (net_random() < clg->a4)
 244			return true;
 245	case 2:
 246		if (net_random() < clg->a2)
 247			clg->state = 1;
 248		if (clg->a3 > net_random())
 
 249			return true;
 250	}
 251
 252	return false;
 253}
 254
 255static bool loss_event(struct netem_sched_data *q)
 256{
 257	switch (q->loss_model) {
 258	case CLG_RANDOM:
 259		/* Random packet drop 0 => none, ~0 => all */
 260		return q->loss && q->loss >= get_crandom(&q->loss_cor);
 261
 262	case CLG_4_STATES:
 263		/* 4state loss model algorithm (used also for GI model)
 264		* Extracts a value from the markov 4 state loss generator,
 265		* if it is 1 drops a packet and if needed writes the event in
 266		* the kernel logs
 267		*/
 268		return loss_4state(q);
 269
 270	case CLG_GILB_ELL:
 271		/* Gilbert-Elliot loss model algorithm
 272		* Extracts a value from the Gilbert-Elliot loss generator,
 273		* if it is 1 drops a packet and if needed writes the event in
 274		* the kernel logs
 275		*/
 276		return loss_gilb_ell(q);
 277	}
 278
 279	return false;	/* not reached */
 280}
 281
 282
 283/* tabledist - return a pseudo-randomly distributed value with mean mu and
 284 * std deviation sigma.  Uses table lookup to approximate the desired
 285 * distribution, and a uniformly-distributed pseudo-random source.
 286 */
 287static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
 288				struct crndstate *state,
 289				const struct disttable *dist)
 290{
 291	psched_tdiff_t x;
 292	long t;
 293	u32 rnd;
 294
 295	if (sigma == 0)
 296		return mu;
 297
 298	rnd = get_crandom(state);
 299
 300	/* default uniform distribution */
 301	if (dist == NULL)
 302		return (rnd % (2*sigma)) - sigma + mu;
 303
 304	t = dist->table[rnd % dist->size];
 305	x = (sigma % NETEM_DIST_SCALE) * t;
 306	if (x >= 0)
 307		x += NETEM_DIST_SCALE/2;
 308	else
 309		x -= NETEM_DIST_SCALE/2;
 310
 311	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 312}
 313
 314static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
 315{
 316	u64 ticks;
 317
 318	len += q->packet_overhead;
 319
 320	if (q->cell_size) {
 321		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 322
 323		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 324			cells++;
 325		len = cells * (q->cell_size + q->cell_overhead);
 326	}
 327
 328	ticks = (u64)len * NSEC_PER_SEC;
 329
 330	do_div(ticks, q->rate);
 331	return PSCHED_NS2TICKS(ticks);
 332}
 333
 
 
 
 
 
 
 
 
 
 
 
 
 
 334static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 335{
 336	struct sk_buff_head *list = &sch->q;
 337	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
 338	struct sk_buff *skb = skb_peek_tail(list);
 339
 340	/* Optimize for add at tail */
 341	if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
 342		return __skb_queue_tail(list, nskb);
 343
 344	skb_queue_reverse_walk(list, skb) {
 
 345		if (tnext >= netem_skb_cb(skb)->time_to_send)
 346			break;
 
 
 347	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 348
 349	__skb_queue_after(list, skb, nskb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 350}
 351
 352/*
 353 * Insert one skb into qdisc.
 354 * Note: parent depends on return value to account for queue length.
 355 * 	NET_XMIT_DROP: queue length didn't change.
 356 *      NET_XMIT_SUCCESS: one skb was queued.
 357 */
 358static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 359{
 360	struct netem_sched_data *q = qdisc_priv(sch);
 361	/* We don't fill cb now as skb_unshare() may invalidate it */
 362	struct netem_skb_cb *cb;
 363	struct sk_buff *skb2;
 
 
 
 364	int count = 1;
 
 365
 366	/* Random duplication */
 367	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
 368		++count;
 369
 370	/* Drop packet? */
 371	if (loss_event(q)) {
 372		if (q->ecn && INET_ECN_set_ce(skb))
 373			sch->qstats.drops++; /* mark packet */
 374		else
 375			--count;
 376	}
 377	if (count == 0) {
 378		sch->qstats.drops++;
 379		kfree_skb(skb);
 380		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 381	}
 382
 383	skb_orphan(skb);
 
 
 
 
 384
 385	/*
 386	 * If we need to duplicate packet, then re-insert at top of the
 387	 * qdisc tree, since parent queuer expects that only one
 388	 * skb will be queued.
 389	 */
 390	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 391		struct Qdisc *rootq = qdisc_root(sch);
 392		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 393		q->duplicate = 0;
 394
 395		qdisc_enqueue_root(skb2, rootq);
 
 396		q->duplicate = dupsave;
 397	}
 398
 399	/*
 400	 * Randomized packet corruption.
 401	 * Make copy if needed since we are modifying
 402	 * If packet is going to be hardware checksummed, then
 403	 * do it now in software before we mangle it.
 404	 */
 405	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
 406		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
 407		    (skb->ip_summed == CHECKSUM_PARTIAL &&
 408		     skb_checksum_help(skb)))
 409			return qdisc_drop(skb, sch);
 
 
 
 
 
 
 410
 411		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
 
 
 
 
 
 
 
 
 
 
 
 
 412	}
 413
 414	if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
 415		return qdisc_reshape_fail(skb, sch);
 416
 417	sch->qstats.backlog += qdisc_pkt_len(skb);
 418
 419	cb = netem_skb_cb(skb);
 420	if (q->gap == 0 ||		/* not doing reordering */
 421	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 422	    q->reorder < get_crandom(&q->reorder_cor)) {
 423		psched_time_t now;
 424		psched_tdiff_t delay;
 425
 426		delay = tabledist(q->latency, q->jitter,
 427				  &q->delay_cor, q->delay_dist);
 428
 429		now = psched_get_time();
 430
 431		if (q->rate) {
 432			struct sk_buff_head *list = &sch->q;
 433
 434			delay += packet_len_2_sched_time(skb->len, q);
 435
 436			if (!skb_queue_empty(list)) {
 
 
 437				/*
 438				 * Last packet in queue is reference point (now).
 439				 * First packet in queue is already in flight,
 440				 * calculate this time bonus and substract
 441				 * from delay.
 442				 */
 443				delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
 444				now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
 
 445			}
 
 
 446		}
 447
 448		cb->time_to_send = now + delay;
 
 449		++q->counter;
 450		tfifo_enqueue(skb, sch);
 451	} else {
 452		/*
 453		 * Do re-ordering by putting one out of N packets at the front
 454		 * of the queue.
 455		 */
 456		cb->time_to_send = psched_get_time();
 457		q->counter = 0;
 458
 459		__skb_queue_head(&sch->q, skb);
 460		sch->qstats.requeues++;
 461	}
 462
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463	return NET_XMIT_SUCCESS;
 464}
 465
 466static unsigned int netem_drop(struct Qdisc *sch)
 467{
 468	struct netem_sched_data *q = qdisc_priv(sch);
 469	unsigned int len;
 470
 471	len = qdisc_queue_drop(sch);
 472	if (!len && q->qdisc && q->qdisc->ops->drop)
 473	    len = q->qdisc->ops->drop(q->qdisc);
 474	if (len)
 475		sch->qstats.drops++;
 476
 477	return len;
 478}
 479
 480static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 481{
 482	struct netem_sched_data *q = qdisc_priv(sch);
 483	struct sk_buff *skb;
 484
 485	if (qdisc_is_throttled(sch))
 486		return NULL;
 487
 488tfifo_dequeue:
 489	skb = qdisc_peek_head(sch);
 490	if (skb) {
 491		const struct netem_skb_cb *cb = netem_skb_cb(skb);
 
 
 
 
 
 
 
 
 
 492
 493		/* if more time remaining? */
 494		if (cb->time_to_send <= psched_get_time()) {
 495			__skb_unlink(skb, &sch->q);
 496			sch->qstats.backlog -= qdisc_pkt_len(skb);
 
 
 
 
 
 
 497
 498#ifdef CONFIG_NET_CLS_ACT
 499			/*
 500			 * If it's at ingress let's pretend the delay is
 501			 * from the network (tstamp will be updated).
 502			 */
 503			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
 504				skb->tstamp.tv64 = 0;
 505#endif
 506
 507			if (q->qdisc) {
 508				int err = qdisc_enqueue(skb, q->qdisc);
 509
 510				if (unlikely(err != NET_XMIT_SUCCESS)) {
 511					if (net_xmit_drop_count(err)) {
 512						sch->qstats.drops++;
 513						qdisc_tree_decrease_qlen(sch, 1);
 514					}
 
 
 
 
 515				}
 516				goto tfifo_dequeue;
 517			}
 518deliver:
 519			qdisc_unthrottled(sch);
 520			qdisc_bstats_update(sch, skb);
 521			return skb;
 522		}
 523
 524		if (q->qdisc) {
 525			skb = q->qdisc->ops->dequeue(q->qdisc);
 526			if (skb)
 527				goto deliver;
 528		}
 529		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
 530	}
 531
 532	if (q->qdisc) {
 533		skb = q->qdisc->ops->dequeue(q->qdisc);
 534		if (skb)
 535			goto deliver;
 536	}
 537	return NULL;
 538}
 539
 540static void netem_reset(struct Qdisc *sch)
 541{
 542	struct netem_sched_data *q = qdisc_priv(sch);
 543
 544	qdisc_reset_queue(sch);
 
 545	if (q->qdisc)
 546		qdisc_reset(q->qdisc);
 547	qdisc_watchdog_cancel(&q->watchdog);
 548}
 549
 550static void dist_free(struct disttable *d)
 551{
 552	if (d) {
 553		if (is_vmalloc_addr(d))
 554			vfree(d);
 555		else
 556			kfree(d);
 557	}
 558}
 559
 560/*
 561 * Distribution data is a variable size payload containing
 562 * signed 16 bit values.
 563 */
 564static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 565{
 566	struct netem_sched_data *q = qdisc_priv(sch);
 567	size_t n = nla_len(attr)/sizeof(__s16);
 568	const __s16 *data = nla_data(attr);
 569	spinlock_t *root_lock;
 570	struct disttable *d;
 571	int i;
 572	size_t s;
 573
 574	if (n > NETEM_DIST_MAX)
 575		return -EINVAL;
 576
 577	s = sizeof(struct disttable) + n * sizeof(s16);
 578	d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
 579	if (!d)
 580		d = vmalloc(s);
 581	if (!d)
 582		return -ENOMEM;
 583
 584	d->size = n;
 585	for (i = 0; i < n; i++)
 586		d->table[i] = data[i];
 587
 588	root_lock = qdisc_root_sleeping_lock(sch);
 589
 590	spin_lock_bh(root_lock);
 591	swap(q->delay_dist, d);
 592	spin_unlock_bh(root_lock);
 593
 594	dist_free(d);
 595	return 0;
 596}
 597
 598static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
 599{
 600	struct netem_sched_data *q = qdisc_priv(sch);
 601	const struct tc_netem_corr *c = nla_data(attr);
 602
 603	init_crandom(&q->delay_cor, c->delay_corr);
 604	init_crandom(&q->loss_cor, c->loss_corr);
 605	init_crandom(&q->dup_cor, c->dup_corr);
 606}
 607
 608static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
 609{
 610	struct netem_sched_data *q = qdisc_priv(sch);
 611	const struct tc_netem_reorder *r = nla_data(attr);
 612
 613	q->reorder = r->probability;
 614	init_crandom(&q->reorder_cor, r->correlation);
 615}
 616
 617static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
 618{
 619	struct netem_sched_data *q = qdisc_priv(sch);
 620	const struct tc_netem_corrupt *r = nla_data(attr);
 621
 622	q->corrupt = r->probability;
 623	init_crandom(&q->corrupt_cor, r->correlation);
 624}
 625
 626static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
 627{
 628	struct netem_sched_data *q = qdisc_priv(sch);
 629	const struct tc_netem_rate *r = nla_data(attr);
 630
 631	q->rate = r->rate;
 632	q->packet_overhead = r->packet_overhead;
 633	q->cell_size = r->cell_size;
 
 634	if (q->cell_size)
 635		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 636	q->cell_overhead = r->cell_overhead;
 
 637}
 638
 639static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
 640{
 641	struct netem_sched_data *q = qdisc_priv(sch);
 642	const struct nlattr *la;
 643	int rem;
 644
 645	nla_for_each_nested(la, attr, rem) {
 646		u16 type = nla_type(la);
 647
 648		switch(type) {
 649		case NETEM_LOSS_GI: {
 650			const struct tc_netem_gimodel *gi = nla_data(la);
 651
 652			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 653				pr_info("netem: incorrect gi model size\n");
 654				return -EINVAL;
 655			}
 656
 657			q->loss_model = CLG_4_STATES;
 658
 659			q->clg.state = 1;
 660			q->clg.a1 = gi->p13;
 661			q->clg.a2 = gi->p31;
 662			q->clg.a3 = gi->p32;
 663			q->clg.a4 = gi->p14;
 664			q->clg.a5 = gi->p23;
 665			break;
 666		}
 667
 668		case NETEM_LOSS_GE: {
 669			const struct tc_netem_gemodel *ge = nla_data(la);
 670
 671			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 672				pr_info("netem: incorrect ge model size\n");
 673				return -EINVAL;
 674			}
 675
 676			q->loss_model = CLG_GILB_ELL;
 677			q->clg.state = 1;
 678			q->clg.a1 = ge->p;
 679			q->clg.a2 = ge->r;
 680			q->clg.a3 = ge->h;
 681			q->clg.a4 = ge->k1;
 682			break;
 683		}
 684
 685		default:
 686			pr_info("netem: unknown loss type %u\n", type);
 687			return -EINVAL;
 688		}
 689	}
 690
 691	return 0;
 692}
 693
 694static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 695	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 696	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 697	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 698	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 699	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 700	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 
 701};
 702
 703static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 704		      const struct nla_policy *policy, int len)
 705{
 706	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 707
 708	if (nested_len < 0) {
 709		pr_info("netem: invalid attributes len %d\n", nested_len);
 710		return -EINVAL;
 711	}
 712
 713	if (nested_len >= nla_attr_size(0))
 714		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
 715				 nested_len, policy);
 716
 717	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 718	return 0;
 719}
 720
 721/* Parse netlink message to set options */
 722static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 723{
 724	struct netem_sched_data *q = qdisc_priv(sch);
 725	struct nlattr *tb[TCA_NETEM_MAX + 1];
 726	struct tc_netem_qopt *qopt;
 
 
 727	int ret;
 728
 729	if (opt == NULL)
 730		return -EINVAL;
 731
 732	qopt = nla_data(opt);
 733	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 734	if (ret < 0)
 735		return ret;
 736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737	sch->limit = qopt->limit;
 738
 739	q->latency = qopt->latency;
 740	q->jitter = qopt->jitter;
 741	q->limit = qopt->limit;
 742	q->gap = qopt->gap;
 743	q->counter = 0;
 744	q->loss = qopt->loss;
 745	q->duplicate = qopt->duplicate;
 746
 747	/* for compatibility with earlier versions.
 748	 * if gap is set, need to assume 100% probability
 749	 */
 750	if (q->gap)
 751		q->reorder = ~0;
 752
 753	if (tb[TCA_NETEM_CORR])
 754		get_correlation(sch, tb[TCA_NETEM_CORR]);
 755
 756	if (tb[TCA_NETEM_DELAY_DIST]) {
 757		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
 758		if (ret)
 759			return ret;
 760	}
 761
 762	if (tb[TCA_NETEM_REORDER])
 763		get_reorder(sch, tb[TCA_NETEM_REORDER]);
 764
 765	if (tb[TCA_NETEM_CORRUPT])
 766		get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
 767
 768	if (tb[TCA_NETEM_RATE])
 769		get_rate(sch, tb[TCA_NETEM_RATE]);
 
 
 
 
 770
 771	if (tb[TCA_NETEM_ECN])
 772		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 773
 774	q->loss_model = CLG_RANDOM;
 775	if (tb[TCA_NETEM_LOSS])
 776		ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
 777
 778	return ret;
 779}
 780
 781static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 782{
 783	struct netem_sched_data *q = qdisc_priv(sch);
 784	int ret;
 785
 786	if (!opt)
 787		return -EINVAL;
 788
 789	qdisc_watchdog_init(&q->watchdog, sch);
 790
 791	q->loss_model = CLG_RANDOM;
 792	ret = netem_change(sch, opt);
 793	if (ret)
 794		pr_info("netem: change failed\n");
 795	return ret;
 796}
 797
 798static void netem_destroy(struct Qdisc *sch)
 799{
 800	struct netem_sched_data *q = qdisc_priv(sch);
 801
 802	qdisc_watchdog_cancel(&q->watchdog);
 803	if (q->qdisc)
 804		qdisc_destroy(q->qdisc);
 805	dist_free(q->delay_dist);
 806}
 807
 808static int dump_loss_model(const struct netem_sched_data *q,
 809			   struct sk_buff *skb)
 810{
 811	struct nlattr *nest;
 812
 813	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
 814	if (nest == NULL)
 815		goto nla_put_failure;
 816
 817	switch (q->loss_model) {
 818	case CLG_RANDOM:
 819		/* legacy loss model */
 820		nla_nest_cancel(skb, nest);
 821		return 0;	/* no data */
 822
 823	case CLG_4_STATES: {
 824		struct tc_netem_gimodel gi = {
 825			.p13 = q->clg.a1,
 826			.p31 = q->clg.a2,
 827			.p32 = q->clg.a3,
 828			.p14 = q->clg.a4,
 829			.p23 = q->clg.a5,
 830		};
 831
 832		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
 833			goto nla_put_failure;
 834		break;
 835	}
 836	case CLG_GILB_ELL: {
 837		struct tc_netem_gemodel ge = {
 838			.p = q->clg.a1,
 839			.r = q->clg.a2,
 840			.h = q->clg.a3,
 841			.k1 = q->clg.a4,
 842		};
 843
 844		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
 845			goto nla_put_failure;
 846		break;
 847	}
 848	}
 849
 850	nla_nest_end(skb, nest);
 851	return 0;
 852
 853nla_put_failure:
 854	nla_nest_cancel(skb, nest);
 855	return -1;
 856}
 857
 858static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 859{
 860	const struct netem_sched_data *q = qdisc_priv(sch);
 861	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
 862	struct tc_netem_qopt qopt;
 863	struct tc_netem_corr cor;
 864	struct tc_netem_reorder reorder;
 865	struct tc_netem_corrupt corrupt;
 866	struct tc_netem_rate rate;
 867
 868	qopt.latency = q->latency;
 869	qopt.jitter = q->jitter;
 870	qopt.limit = q->limit;
 871	qopt.loss = q->loss;
 872	qopt.gap = q->gap;
 873	qopt.duplicate = q->duplicate;
 874	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
 875		goto nla_put_failure;
 876
 877	cor.delay_corr = q->delay_cor.rho;
 878	cor.loss_corr = q->loss_cor.rho;
 879	cor.dup_corr = q->dup_cor.rho;
 880	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
 881		goto nla_put_failure;
 882
 883	reorder.probability = q->reorder;
 884	reorder.correlation = q->reorder_cor.rho;
 885	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
 886		goto nla_put_failure;
 887
 888	corrupt.probability = q->corrupt;
 889	corrupt.correlation = q->corrupt_cor.rho;
 890	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
 891		goto nla_put_failure;
 892
 893	rate.rate = q->rate;
 
 
 
 
 
 
 
 894	rate.packet_overhead = q->packet_overhead;
 895	rate.cell_size = q->cell_size;
 896	rate.cell_overhead = q->cell_overhead;
 897	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
 898		goto nla_put_failure;
 899
 900	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
 901		goto nla_put_failure;
 902
 903	if (dump_loss_model(q, skb) != 0)
 904		goto nla_put_failure;
 905
 906	return nla_nest_end(skb, nla);
 907
 908nla_put_failure:
 909	nlmsg_trim(skb, nla);
 910	return -1;
 911}
 912
 913static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 914			  struct sk_buff *skb, struct tcmsg *tcm)
 915{
 916	struct netem_sched_data *q = qdisc_priv(sch);
 917
 918	if (cl != 1 || !q->qdisc) 	/* only one class */
 919		return -ENOENT;
 920
 921	tcm->tcm_handle |= TC_H_MIN(1);
 922	tcm->tcm_info = q->qdisc->handle;
 923
 924	return 0;
 925}
 926
 927static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 928		     struct Qdisc **old)
 929{
 930	struct netem_sched_data *q = qdisc_priv(sch);
 931
 932	sch_tree_lock(sch);
 933	*old = q->qdisc;
 934	q->qdisc = new;
 935	if (*old) {
 936		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 937		qdisc_reset(*old);
 938	}
 939	sch_tree_unlock(sch);
 940
 941	return 0;
 942}
 943
 944static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
 945{
 946	struct netem_sched_data *q = qdisc_priv(sch);
 947	return q->qdisc;
 948}
 949
 950static unsigned long netem_get(struct Qdisc *sch, u32 classid)
 951{
 952	return 1;
 953}
 954
 955static void netem_put(struct Qdisc *sch, unsigned long arg)
 956{
 957}
 958
 959static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 960{
 961	if (!walker->stop) {
 962		if (walker->count >= walker->skip)
 963			if (walker->fn(sch, 1, walker) < 0) {
 964				walker->stop = 1;
 965				return;
 966			}
 967		walker->count++;
 968	}
 969}
 970
 971static const struct Qdisc_class_ops netem_class_ops = {
 972	.graft		=	netem_graft,
 973	.leaf		=	netem_leaf,
 974	.get		=	netem_get,
 975	.put		=	netem_put,
 976	.walk		=	netem_walk,
 977	.dump		=	netem_dump_class,
 978};
 979
 980static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
 981	.id		=	"netem",
 982	.cl_ops		=	&netem_class_ops,
 983	.priv_size	=	sizeof(struct netem_sched_data),
 984	.enqueue	=	netem_enqueue,
 985	.dequeue	=	netem_dequeue,
 986	.peek		=	qdisc_peek_dequeued,
 987	.drop		=	netem_drop,
 988	.init		=	netem_init,
 989	.reset		=	netem_reset,
 990	.destroy	=	netem_destroy,
 991	.change		=	netem_change,
 992	.dump		=	netem_dump,
 993	.owner		=	THIS_MODULE,
 994};
 995
 996
 997static int __init netem_module_init(void)
 998{
 999	pr_info("netem: version " VERSION "\n");
1000	return register_qdisc(&netem_qdisc_ops);
1001}
1002static void __exit netem_module_exit(void)
1003{
1004	unregister_qdisc(&netem_qdisc_ops);
1005}
1006module_init(netem_module_init)
1007module_exit(netem_module_exit)
1008MODULE_LICENSE("GPL");
v4.10.11
   1/*
   2 * net/sched/sch_netem.c	Network emulator
   3 *
   4 * 		This program is free software; you can redistribute it and/or
   5 * 		modify it under the terms of the GNU General Public License
   6 * 		as published by the Free Software Foundation; either version
   7 * 		2 of the License.
   8 *
   9 *  		Many of the algorithms and ideas for this came from
  10 *		NIST Net which is not copyrighted.
  11 *
  12 * Authors:	Stephen Hemminger <shemminger@osdl.org>
  13 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/rtnetlink.h>
  25#include <linux/reciprocal_div.h>
  26#include <linux/rbtree.h>
  27
  28#include <net/netlink.h>
  29#include <net/pkt_sched.h>
  30#include <net/inet_ecn.h>
  31
  32#define VERSION "1.3"
  33
  34/*	Network Emulation Queuing algorithm.
  35	====================================
  36
  37	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  38		 Network Emulation Tool
  39		 [2] Luigi Rizzo, DummyNet for FreeBSD
  40
  41	 ----------------------------------------------------------------
  42
  43	 This started out as a simple way to delay outgoing packets to
  44	 test TCP but has grown to include most of the functionality
  45	 of a full blown network emulator like NISTnet. It can delay
  46	 packets and add random jitter (and correlation). The random
  47	 distribution can be loaded from a table as well to provide
  48	 normal, Pareto, or experimental curves. Packet loss,
  49	 duplication, and reordering can also be emulated.
  50
  51	 This qdisc does not do classification that can be handled in
  52	 layering other disciplines.  It does not need to do bandwidth
  53	 control either since that can be handled by using token
  54	 bucket or other rate control.
  55
  56     Correlated Loss Generator models
  57
  58	Added generation of correlated loss according to the
  59	"Gilbert-Elliot" model, a 4-state markov model.
  60
  61	References:
  62	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  63	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  64	and intuitive loss model for packet networks and its implementation
  65	in the Netem module in the Linux kernel", available in [1]
  66
  67	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  68		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  69*/
  70
  71struct netem_sched_data {
  72	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
  73	struct rb_root t_root;
  74
  75	/* optional qdisc for classful handling (NULL at netem init) */
  76	struct Qdisc	*qdisc;
  77
  78	struct qdisc_watchdog watchdog;
  79
  80	psched_tdiff_t latency;
  81	psched_tdiff_t jitter;
  82
  83	u32 loss;
  84	u32 ecn;
  85	u32 limit;
  86	u32 counter;
  87	u32 gap;
  88	u32 duplicate;
  89	u32 reorder;
  90	u32 corrupt;
  91	u64 rate;
  92	s32 packet_overhead;
  93	u32 cell_size;
  94	struct reciprocal_value cell_size_reciprocal;
  95	s32 cell_overhead;
  96
  97	struct crndstate {
  98		u32 last;
  99		u32 rho;
 100	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 101
 102	struct disttable {
 103		u32  size;
 104		s16 table[0];
 105	} *delay_dist;
 106
 107	enum  {
 108		CLG_RANDOM,
 109		CLG_4_STATES,
 110		CLG_GILB_ELL,
 111	} loss_model;
 112
 113	enum {
 114		TX_IN_GAP_PERIOD = 1,
 115		TX_IN_BURST_PERIOD,
 116		LOST_IN_GAP_PERIOD,
 117		LOST_IN_BURST_PERIOD,
 118	} _4_state_model;
 119
 120	enum {
 121		GOOD_STATE = 1,
 122		BAD_STATE,
 123	} GE_state_model;
 124
 125	/* Correlated Loss Generation models */
 126	struct clgstate {
 127		/* state of the Markov chain */
 128		u8 state;
 129
 130		/* 4-states and Gilbert-Elliot models */
 131		u32 a1;	/* p13 for 4-states or p for GE */
 132		u32 a2;	/* p31 for 4-states or r for GE */
 133		u32 a3;	/* p32 for 4-states or h for GE */
 134		u32 a4;	/* p14 for 4-states or 1-k for GE */
 135		u32 a5; /* p23 used only in 4-states */
 136	} clg;
 137
 138};
 139
 140/* Time stamp put into socket buffer control block
 141 * Only valid when skbs are in our internal t(ime)fifo queue.
 142 *
 143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
 144 * and skb->next & skb->prev are scratch space for a qdisc,
 145 * we save skb->tstamp value in skb->cb[] before destroying it.
 146 */
 147struct netem_skb_cb {
 148	psched_time_t	time_to_send;
 149	ktime_t		tstamp_save;
 150};
 151
 152
 153static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
 154{
 155	return rb_entry(rb, struct sk_buff, rbnode);
 156}
 157
 158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 159{
 160	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
 161	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 162	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 163}
 164
 165/* init_crandom - initialize correlated random number generator
 166 * Use entropy source for initial seed.
 167 */
 168static void init_crandom(struct crndstate *state, unsigned long rho)
 169{
 170	state->rho = rho;
 171	state->last = prandom_u32();
 172}
 173
 174/* get_crandom - correlated random number generator
 175 * Next number depends on last value.
 176 * rho is scaled to avoid floating point.
 177 */
 178static u32 get_crandom(struct crndstate *state)
 179{
 180	u64 value, rho;
 181	unsigned long answer;
 182
 183	if (state->rho == 0)	/* no correlation */
 184		return prandom_u32();
 185
 186	value = prandom_u32();
 187	rho = (u64)state->rho + 1;
 188	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 189	state->last = answer;
 190	return answer;
 191}
 192
 193/* loss_4state - 4-state model loss generator
 194 * Generates losses according to the 4-state Markov chain adopted in
 195 * the GI (General and Intuitive) loss model.
 196 */
 197static bool loss_4state(struct netem_sched_data *q)
 198{
 199	struct clgstate *clg = &q->clg;
 200	u32 rnd = prandom_u32();
 201
 202	/*
 203	 * Makes a comparison between rnd and the transition
 204	 * probabilities outgoing from the current state, then decides the
 205	 * next state and if the next packet has to be transmitted or lost.
 206	 * The four states correspond to:
 207	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
 208	 *   LOST_IN_BURST_PERIOD => isolated losses within a gap period
 209	 *   LOST_IN_GAP_PERIOD => lost packets within a burst period
 210	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
 211	 */
 212	switch (clg->state) {
 213	case TX_IN_GAP_PERIOD:
 214		if (rnd < clg->a4) {
 215			clg->state = LOST_IN_BURST_PERIOD;
 216			return true;
 217		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
 218			clg->state = LOST_IN_GAP_PERIOD;
 219			return true;
 220		} else if (clg->a1 + clg->a4 < rnd) {
 221			clg->state = TX_IN_GAP_PERIOD;
 222		}
 223
 224		break;
 225	case TX_IN_BURST_PERIOD:
 226		if (rnd < clg->a5) {
 227			clg->state = LOST_IN_GAP_PERIOD;
 228			return true;
 229		} else {
 230			clg->state = TX_IN_BURST_PERIOD;
 231		}
 232
 233		break;
 234	case LOST_IN_GAP_PERIOD:
 235		if (rnd < clg->a3)
 236			clg->state = TX_IN_BURST_PERIOD;
 237		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 238			clg->state = TX_IN_GAP_PERIOD;
 
 239		} else if (clg->a2 + clg->a3 < rnd) {
 240			clg->state = LOST_IN_GAP_PERIOD;
 241			return true;
 242		}
 243		break;
 244	case LOST_IN_BURST_PERIOD:
 245		clg->state = TX_IN_GAP_PERIOD;
 246		break;
 247	}
 248
 249	return false;
 250}
 251
 252/* loss_gilb_ell - Gilbert-Elliot model loss generator
 253 * Generates losses according to the Gilbert-Elliot loss model or
 254 * its special cases  (Gilbert or Simple Gilbert)
 255 *
 256 * Makes a comparison between random number and the transition
 257 * probabilities outgoing from the current state, then decides the
 258 * next state. A second random number is extracted and the comparison
 259 * with the loss probability of the current state decides if the next
 260 * packet will be transmitted or lost.
 261 */
 262static bool loss_gilb_ell(struct netem_sched_data *q)
 263{
 264	struct clgstate *clg = &q->clg;
 265
 266	switch (clg->state) {
 267	case GOOD_STATE:
 268		if (prandom_u32() < clg->a1)
 269			clg->state = BAD_STATE;
 270		if (prandom_u32() < clg->a4)
 271			return true;
 272		break;
 273	case BAD_STATE:
 274		if (prandom_u32() < clg->a2)
 275			clg->state = GOOD_STATE;
 276		if (prandom_u32() > clg->a3)
 277			return true;
 278	}
 279
 280	return false;
 281}
 282
 283static bool loss_event(struct netem_sched_data *q)
 284{
 285	switch (q->loss_model) {
 286	case CLG_RANDOM:
 287		/* Random packet drop 0 => none, ~0 => all */
 288		return q->loss && q->loss >= get_crandom(&q->loss_cor);
 289
 290	case CLG_4_STATES:
 291		/* 4state loss model algorithm (used also for GI model)
 292		* Extracts a value from the markov 4 state loss generator,
 293		* if it is 1 drops a packet and if needed writes the event in
 294		* the kernel logs
 295		*/
 296		return loss_4state(q);
 297
 298	case CLG_GILB_ELL:
 299		/* Gilbert-Elliot loss model algorithm
 300		* Extracts a value from the Gilbert-Elliot loss generator,
 301		* if it is 1 drops a packet and if needed writes the event in
 302		* the kernel logs
 303		*/
 304		return loss_gilb_ell(q);
 305	}
 306
 307	return false;	/* not reached */
 308}
 309
 310
 311/* tabledist - return a pseudo-randomly distributed value with mean mu and
 312 * std deviation sigma.  Uses table lookup to approximate the desired
 313 * distribution, and a uniformly-distributed pseudo-random source.
 314 */
 315static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
 316				struct crndstate *state,
 317				const struct disttable *dist)
 318{
 319	psched_tdiff_t x;
 320	long t;
 321	u32 rnd;
 322
 323	if (sigma == 0)
 324		return mu;
 325
 326	rnd = get_crandom(state);
 327
 328	/* default uniform distribution */
 329	if (dist == NULL)
 330		return (rnd % (2*sigma)) - sigma + mu;
 331
 332	t = dist->table[rnd % dist->size];
 333	x = (sigma % NETEM_DIST_SCALE) * t;
 334	if (x >= 0)
 335		x += NETEM_DIST_SCALE/2;
 336	else
 337		x -= NETEM_DIST_SCALE/2;
 338
 339	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 340}
 341
 342static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
 343{
 344	u64 ticks;
 345
 346	len += q->packet_overhead;
 347
 348	if (q->cell_size) {
 349		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 350
 351		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 352			cells++;
 353		len = cells * (q->cell_size + q->cell_overhead);
 354	}
 355
 356	ticks = (u64)len * NSEC_PER_SEC;
 357
 358	do_div(ticks, q->rate);
 359	return PSCHED_NS2TICKS(ticks);
 360}
 361
 362static void tfifo_reset(struct Qdisc *sch)
 363{
 364	struct netem_sched_data *q = qdisc_priv(sch);
 365	struct rb_node *p;
 366
 367	while ((p = rb_first(&q->t_root))) {
 368		struct sk_buff *skb = netem_rb_to_skb(p);
 369
 370		rb_erase(p, &q->t_root);
 371		rtnl_kfree_skbs(skb, skb);
 372	}
 373}
 374
 375static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 376{
 377	struct netem_sched_data *q = qdisc_priv(sch);
 378	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
 379	struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
 380
 381	while (*p) {
 382		struct sk_buff *skb;
 
 383
 384		parent = *p;
 385		skb = netem_rb_to_skb(parent);
 386		if (tnext >= netem_skb_cb(skb)->time_to_send)
 387			p = &parent->rb_right;
 388		else
 389			p = &parent->rb_left;
 390	}
 391	rb_link_node(&nskb->rbnode, parent, p);
 392	rb_insert_color(&nskb->rbnode, &q->t_root);
 393	sch->q.qlen++;
 394}
 395
 396/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
 397 * when we statistically choose to corrupt one, we instead segment it, returning
 398 * the first packet to be corrupted, and re-enqueue the remaining frames
 399 */
 400static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
 401				     struct sk_buff **to_free)
 402{
 403	struct sk_buff *segs;
 404	netdev_features_t features = netif_skb_features(skb);
 405
 406	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 407
 408	if (IS_ERR_OR_NULL(segs)) {
 409		qdisc_drop(skb, sch, to_free);
 410		return NULL;
 411	}
 412	consume_skb(skb);
 413	return segs;
 414}
 415
 416static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
 417{
 418	skb->next = qh->head;
 419
 420	if (!qh->head)
 421		qh->tail = skb;
 422	qh->head = skb;
 423	qh->qlen++;
 424}
 425
 426/*
 427 * Insert one skb into qdisc.
 428 * Note: parent depends on return value to account for queue length.
 429 * 	NET_XMIT_DROP: queue length didn't change.
 430 *      NET_XMIT_SUCCESS: one skb was queued.
 431 */
 432static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 433			 struct sk_buff **to_free)
 434{
 435	struct netem_sched_data *q = qdisc_priv(sch);
 436	/* We don't fill cb now as skb_unshare() may invalidate it */
 437	struct netem_skb_cb *cb;
 438	struct sk_buff *skb2;
 439	struct sk_buff *segs = NULL;
 440	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
 441	int nb = 0;
 442	int count = 1;
 443	int rc = NET_XMIT_SUCCESS;
 444
 445	/* Random duplication */
 446	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
 447		++count;
 448
 449	/* Drop packet? */
 450	if (loss_event(q)) {
 451		if (q->ecn && INET_ECN_set_ce(skb))
 452			qdisc_qstats_drop(sch); /* mark packet */
 453		else
 454			--count;
 455	}
 456	if (count == 0) {
 457		qdisc_qstats_drop(sch);
 458		__qdisc_drop(skb, to_free);
 459		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 460	}
 461
 462	/* If a delay is expected, orphan the skb. (orphaning usually takes
 463	 * place at TX completion time, so _before_ the link transit delay)
 464	 */
 465	if (q->latency || q->jitter)
 466		skb_orphan_partial(skb);
 467
 468	/*
 469	 * If we need to duplicate packet, then re-insert at top of the
 470	 * qdisc tree, since parent queuer expects that only one
 471	 * skb will be queued.
 472	 */
 473	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 474		struct Qdisc *rootq = qdisc_root(sch);
 475		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 
 476
 477		q->duplicate = 0;
 478		rootq->enqueue(skb2, rootq, to_free);
 479		q->duplicate = dupsave;
 480	}
 481
 482	/*
 483	 * Randomized packet corruption.
 484	 * Make copy if needed since we are modifying
 485	 * If packet is going to be hardware checksummed, then
 486	 * do it now in software before we mangle it.
 487	 */
 488	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
 489		if (skb_is_gso(skb)) {
 490			segs = netem_segment(skb, sch, to_free);
 491			if (!segs)
 492				return NET_XMIT_DROP;
 493		} else {
 494			segs = skb;
 495		}
 496
 497		skb = segs;
 498		segs = segs->next;
 499
 500		skb = skb_unshare(skb, GFP_ATOMIC);
 501		if (unlikely(!skb)) {
 502			qdisc_qstats_drop(sch);
 503			goto finish_segs;
 504		}
 505		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 506		    skb_checksum_help(skb)) {
 507			qdisc_drop(skb, sch, to_free);
 508			goto finish_segs;
 509		}
 510
 511		skb->data[prandom_u32() % skb_headlen(skb)] ^=
 512			1<<(prandom_u32() % 8);
 513	}
 514
 515	if (unlikely(sch->q.qlen >= sch->limit))
 516		return qdisc_drop(skb, sch, to_free);
 517
 518	qdisc_qstats_backlog_inc(sch, skb);
 519
 520	cb = netem_skb_cb(skb);
 521	if (q->gap == 0 ||		/* not doing reordering */
 522	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 523	    q->reorder < get_crandom(&q->reorder_cor)) {
 524		psched_time_t now;
 525		psched_tdiff_t delay;
 526
 527		delay = tabledist(q->latency, q->jitter,
 528				  &q->delay_cor, q->delay_dist);
 529
 530		now = psched_get_time();
 531
 532		if (q->rate) {
 533			struct sk_buff *last;
 534
 535			if (sch->q.qlen)
 536				last = sch->q.tail;
 537			else
 538				last = netem_rb_to_skb(rb_last(&q->t_root));
 539			if (last) {
 540				/*
 541				 * Last packet in queue is reference point (now),
 542				 * calculate this time bonus and subtract
 
 543				 * from delay.
 544				 */
 545				delay -= netem_skb_cb(last)->time_to_send - now;
 546				delay = max_t(psched_tdiff_t, 0, delay);
 547				now = netem_skb_cb(last)->time_to_send;
 548			}
 549
 550			delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
 551		}
 552
 553		cb->time_to_send = now + delay;
 554		cb->tstamp_save = skb->tstamp;
 555		++q->counter;
 556		tfifo_enqueue(skb, sch);
 557	} else {
 558		/*
 559		 * Do re-ordering by putting one out of N packets at the front
 560		 * of the queue.
 561		 */
 562		cb->time_to_send = psched_get_time();
 563		q->counter = 0;
 564
 565		netem_enqueue_skb_head(&sch->q, skb);
 566		sch->qstats.requeues++;
 567	}
 568
 569finish_segs:
 570	if (segs) {
 571		while (segs) {
 572			skb2 = segs->next;
 573			segs->next = NULL;
 574			qdisc_skb_cb(segs)->pkt_len = segs->len;
 575			last_len = segs->len;
 576			rc = qdisc_enqueue(segs, sch, to_free);
 577			if (rc != NET_XMIT_SUCCESS) {
 578				if (net_xmit_drop_count(rc))
 579					qdisc_qstats_drop(sch);
 580			} else {
 581				nb++;
 582				len += last_len;
 583			}
 584			segs = skb2;
 585		}
 586		sch->q.qlen += nb;
 587		if (nb > 1)
 588			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
 589	}
 590	return NET_XMIT_SUCCESS;
 591}
 592
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 594{
 595	struct netem_sched_data *q = qdisc_priv(sch);
 596	struct sk_buff *skb;
 597	struct rb_node *p;
 
 
 598
 599tfifo_dequeue:
 600	skb = __qdisc_dequeue_head(&sch->q);
 601	if (skb) {
 602		qdisc_qstats_backlog_dec(sch, skb);
 603deliver:
 604		qdisc_bstats_update(sch, skb);
 605		return skb;
 606	}
 607	p = rb_first(&q->t_root);
 608	if (p) {
 609		psched_time_t time_to_send;
 610
 611		skb = netem_rb_to_skb(p);
 612
 613		/* if more time remaining? */
 614		time_to_send = netem_skb_cb(skb)->time_to_send;
 615		if (time_to_send <= psched_get_time()) {
 616			rb_erase(p, &q->t_root);
 617
 618			sch->q.qlen--;
 619			qdisc_qstats_backlog_dec(sch, skb);
 620			skb->next = NULL;
 621			skb->prev = NULL;
 622			skb->tstamp = netem_skb_cb(skb)->tstamp_save;
 623
 624#ifdef CONFIG_NET_CLS_ACT
 625			/*
 626			 * If it's at ingress let's pretend the delay is
 627			 * from the network (tstamp will be updated).
 628			 */
 629			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
 630				skb->tstamp = 0;
 631#endif
 632
 633			if (q->qdisc) {
 634				unsigned int pkt_len = qdisc_pkt_len(skb);
 635				struct sk_buff *to_free = NULL;
 636				int err;
 637
 638				err = qdisc_enqueue(skb, q->qdisc, &to_free);
 639				kfree_skb_list(to_free);
 640				if (err != NET_XMIT_SUCCESS &&
 641				    net_xmit_drop_count(err)) {
 642					qdisc_qstats_drop(sch);
 643					qdisc_tree_reduce_backlog(sch, 1,
 644								  pkt_len);
 645				}
 646				goto tfifo_dequeue;
 647			}
 648			goto deliver;
 
 
 
 649		}
 650
 651		if (q->qdisc) {
 652			skb = q->qdisc->ops->dequeue(q->qdisc);
 653			if (skb)
 654				goto deliver;
 655		}
 656		qdisc_watchdog_schedule(&q->watchdog, time_to_send);
 657	}
 658
 659	if (q->qdisc) {
 660		skb = q->qdisc->ops->dequeue(q->qdisc);
 661		if (skb)
 662			goto deliver;
 663	}
 664	return NULL;
 665}
 666
 667static void netem_reset(struct Qdisc *sch)
 668{
 669	struct netem_sched_data *q = qdisc_priv(sch);
 670
 671	qdisc_reset_queue(sch);
 672	tfifo_reset(sch);
 673	if (q->qdisc)
 674		qdisc_reset(q->qdisc);
 675	qdisc_watchdog_cancel(&q->watchdog);
 676}
 677
 678static void dist_free(struct disttable *d)
 679{
 680	kvfree(d);
 
 
 
 
 
 681}
 682
 683/*
 684 * Distribution data is a variable size payload containing
 685 * signed 16 bit values.
 686 */
 687static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 688{
 689	struct netem_sched_data *q = qdisc_priv(sch);
 690	size_t n = nla_len(attr)/sizeof(__s16);
 691	const __s16 *data = nla_data(attr);
 692	spinlock_t *root_lock;
 693	struct disttable *d;
 694	int i;
 695	size_t s;
 696
 697	if (n > NETEM_DIST_MAX)
 698		return -EINVAL;
 699
 700	s = sizeof(struct disttable) + n * sizeof(s16);
 701	d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
 702	if (!d)
 703		d = vmalloc(s);
 704	if (!d)
 705		return -ENOMEM;
 706
 707	d->size = n;
 708	for (i = 0; i < n; i++)
 709		d->table[i] = data[i];
 710
 711	root_lock = qdisc_root_sleeping_lock(sch);
 712
 713	spin_lock_bh(root_lock);
 714	swap(q->delay_dist, d);
 715	spin_unlock_bh(root_lock);
 716
 717	dist_free(d);
 718	return 0;
 719}
 720
 721static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 722{
 
 723	const struct tc_netem_corr *c = nla_data(attr);
 724
 725	init_crandom(&q->delay_cor, c->delay_corr);
 726	init_crandom(&q->loss_cor, c->loss_corr);
 727	init_crandom(&q->dup_cor, c->dup_corr);
 728}
 729
 730static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
 731{
 
 732	const struct tc_netem_reorder *r = nla_data(attr);
 733
 734	q->reorder = r->probability;
 735	init_crandom(&q->reorder_cor, r->correlation);
 736}
 737
 738static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
 739{
 
 740	const struct tc_netem_corrupt *r = nla_data(attr);
 741
 742	q->corrupt = r->probability;
 743	init_crandom(&q->corrupt_cor, r->correlation);
 744}
 745
 746static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
 747{
 
 748	const struct tc_netem_rate *r = nla_data(attr);
 749
 750	q->rate = r->rate;
 751	q->packet_overhead = r->packet_overhead;
 752	q->cell_size = r->cell_size;
 753	q->cell_overhead = r->cell_overhead;
 754	if (q->cell_size)
 755		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 756	else
 757		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
 758}
 759
 760static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
 761{
 
 762	const struct nlattr *la;
 763	int rem;
 764
 765	nla_for_each_nested(la, attr, rem) {
 766		u16 type = nla_type(la);
 767
 768		switch (type) {
 769		case NETEM_LOSS_GI: {
 770			const struct tc_netem_gimodel *gi = nla_data(la);
 771
 772			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 773				pr_info("netem: incorrect gi model size\n");
 774				return -EINVAL;
 775			}
 776
 777			q->loss_model = CLG_4_STATES;
 778
 779			q->clg.state = TX_IN_GAP_PERIOD;
 780			q->clg.a1 = gi->p13;
 781			q->clg.a2 = gi->p31;
 782			q->clg.a3 = gi->p32;
 783			q->clg.a4 = gi->p14;
 784			q->clg.a5 = gi->p23;
 785			break;
 786		}
 787
 788		case NETEM_LOSS_GE: {
 789			const struct tc_netem_gemodel *ge = nla_data(la);
 790
 791			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 792				pr_info("netem: incorrect ge model size\n");
 793				return -EINVAL;
 794			}
 795
 796			q->loss_model = CLG_GILB_ELL;
 797			q->clg.state = GOOD_STATE;
 798			q->clg.a1 = ge->p;
 799			q->clg.a2 = ge->r;
 800			q->clg.a3 = ge->h;
 801			q->clg.a4 = ge->k1;
 802			break;
 803		}
 804
 805		default:
 806			pr_info("netem: unknown loss type %u\n", type);
 807			return -EINVAL;
 808		}
 809	}
 810
 811	return 0;
 812}
 813
 814static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 815	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 816	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 817	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 818	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 819	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 820	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 821	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 822};
 823
 824static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 825		      const struct nla_policy *policy, int len)
 826{
 827	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 828
 829	if (nested_len < 0) {
 830		pr_info("netem: invalid attributes len %d\n", nested_len);
 831		return -EINVAL;
 832	}
 833
 834	if (nested_len >= nla_attr_size(0))
 835		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
 836				 nested_len, policy);
 837
 838	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 839	return 0;
 840}
 841
 842/* Parse netlink message to set options */
 843static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 844{
 845	struct netem_sched_data *q = qdisc_priv(sch);
 846	struct nlattr *tb[TCA_NETEM_MAX + 1];
 847	struct tc_netem_qopt *qopt;
 848	struct clgstate old_clg;
 849	int old_loss_model = CLG_RANDOM;
 850	int ret;
 851
 852	if (opt == NULL)
 853		return -EINVAL;
 854
 855	qopt = nla_data(opt);
 856	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 857	if (ret < 0)
 858		return ret;
 859
 860	/* backup q->clg and q->loss_model */
 861	old_clg = q->clg;
 862	old_loss_model = q->loss_model;
 863
 864	if (tb[TCA_NETEM_LOSS]) {
 865		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
 866		if (ret) {
 867			q->loss_model = old_loss_model;
 868			return ret;
 869		}
 870	} else {
 871		q->loss_model = CLG_RANDOM;
 872	}
 873
 874	if (tb[TCA_NETEM_DELAY_DIST]) {
 875		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
 876		if (ret) {
 877			/* recover clg and loss_model, in case of
 878			 * q->clg and q->loss_model were modified
 879			 * in get_loss_clg()
 880			 */
 881			q->clg = old_clg;
 882			q->loss_model = old_loss_model;
 883			return ret;
 884		}
 885	}
 886
 887	sch->limit = qopt->limit;
 888
 889	q->latency = qopt->latency;
 890	q->jitter = qopt->jitter;
 891	q->limit = qopt->limit;
 892	q->gap = qopt->gap;
 893	q->counter = 0;
 894	q->loss = qopt->loss;
 895	q->duplicate = qopt->duplicate;
 896
 897	/* for compatibility with earlier versions.
 898	 * if gap is set, need to assume 100% probability
 899	 */
 900	if (q->gap)
 901		q->reorder = ~0;
 902
 903	if (tb[TCA_NETEM_CORR])
 904		get_correlation(q, tb[TCA_NETEM_CORR]);
 
 
 
 
 
 
 905
 906	if (tb[TCA_NETEM_REORDER])
 907		get_reorder(q, tb[TCA_NETEM_REORDER]);
 908
 909	if (tb[TCA_NETEM_CORRUPT])
 910		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
 911
 912	if (tb[TCA_NETEM_RATE])
 913		get_rate(q, tb[TCA_NETEM_RATE]);
 914
 915	if (tb[TCA_NETEM_RATE64])
 916		q->rate = max_t(u64, q->rate,
 917				nla_get_u64(tb[TCA_NETEM_RATE64]));
 918
 919	if (tb[TCA_NETEM_ECN])
 920		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 921
 
 
 
 
 922	return ret;
 923}
 924
 925static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 926{
 927	struct netem_sched_data *q = qdisc_priv(sch);
 928	int ret;
 929
 930	if (!opt)
 931		return -EINVAL;
 932
 933	qdisc_watchdog_init(&q->watchdog, sch);
 934
 935	q->loss_model = CLG_RANDOM;
 936	ret = netem_change(sch, opt);
 937	if (ret)
 938		pr_info("netem: change failed\n");
 939	return ret;
 940}
 941
 942static void netem_destroy(struct Qdisc *sch)
 943{
 944	struct netem_sched_data *q = qdisc_priv(sch);
 945
 946	qdisc_watchdog_cancel(&q->watchdog);
 947	if (q->qdisc)
 948		qdisc_destroy(q->qdisc);
 949	dist_free(q->delay_dist);
 950}
 951
 952static int dump_loss_model(const struct netem_sched_data *q,
 953			   struct sk_buff *skb)
 954{
 955	struct nlattr *nest;
 956
 957	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
 958	if (nest == NULL)
 959		goto nla_put_failure;
 960
 961	switch (q->loss_model) {
 962	case CLG_RANDOM:
 963		/* legacy loss model */
 964		nla_nest_cancel(skb, nest);
 965		return 0;	/* no data */
 966
 967	case CLG_4_STATES: {
 968		struct tc_netem_gimodel gi = {
 969			.p13 = q->clg.a1,
 970			.p31 = q->clg.a2,
 971			.p32 = q->clg.a3,
 972			.p14 = q->clg.a4,
 973			.p23 = q->clg.a5,
 974		};
 975
 976		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
 977			goto nla_put_failure;
 978		break;
 979	}
 980	case CLG_GILB_ELL: {
 981		struct tc_netem_gemodel ge = {
 982			.p = q->clg.a1,
 983			.r = q->clg.a2,
 984			.h = q->clg.a3,
 985			.k1 = q->clg.a4,
 986		};
 987
 988		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
 989			goto nla_put_failure;
 990		break;
 991	}
 992	}
 993
 994	nla_nest_end(skb, nest);
 995	return 0;
 996
 997nla_put_failure:
 998	nla_nest_cancel(skb, nest);
 999	return -1;
1000}
1001
1002static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1003{
1004	const struct netem_sched_data *q = qdisc_priv(sch);
1005	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1006	struct tc_netem_qopt qopt;
1007	struct tc_netem_corr cor;
1008	struct tc_netem_reorder reorder;
1009	struct tc_netem_corrupt corrupt;
1010	struct tc_netem_rate rate;
1011
1012	qopt.latency = q->latency;
1013	qopt.jitter = q->jitter;
1014	qopt.limit = q->limit;
1015	qopt.loss = q->loss;
1016	qopt.gap = q->gap;
1017	qopt.duplicate = q->duplicate;
1018	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1019		goto nla_put_failure;
1020
1021	cor.delay_corr = q->delay_cor.rho;
1022	cor.loss_corr = q->loss_cor.rho;
1023	cor.dup_corr = q->dup_cor.rho;
1024	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1025		goto nla_put_failure;
1026
1027	reorder.probability = q->reorder;
1028	reorder.correlation = q->reorder_cor.rho;
1029	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1030		goto nla_put_failure;
1031
1032	corrupt.probability = q->corrupt;
1033	corrupt.correlation = q->corrupt_cor.rho;
1034	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1035		goto nla_put_failure;
1036
1037	if (q->rate >= (1ULL << 32)) {
1038		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1039				      TCA_NETEM_PAD))
1040			goto nla_put_failure;
1041		rate.rate = ~0U;
1042	} else {
1043		rate.rate = q->rate;
1044	}
1045	rate.packet_overhead = q->packet_overhead;
1046	rate.cell_size = q->cell_size;
1047	rate.cell_overhead = q->cell_overhead;
1048	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1049		goto nla_put_failure;
1050
1051	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1052		goto nla_put_failure;
1053
1054	if (dump_loss_model(q, skb) != 0)
1055		goto nla_put_failure;
1056
1057	return nla_nest_end(skb, nla);
1058
1059nla_put_failure:
1060	nlmsg_trim(skb, nla);
1061	return -1;
1062}
1063
1064static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1065			  struct sk_buff *skb, struct tcmsg *tcm)
1066{
1067	struct netem_sched_data *q = qdisc_priv(sch);
1068
1069	if (cl != 1 || !q->qdisc) 	/* only one class */
1070		return -ENOENT;
1071
1072	tcm->tcm_handle |= TC_H_MIN(1);
1073	tcm->tcm_info = q->qdisc->handle;
1074
1075	return 0;
1076}
1077
1078static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1079		     struct Qdisc **old)
1080{
1081	struct netem_sched_data *q = qdisc_priv(sch);
1082
1083	*old = qdisc_replace(sch, new, &q->qdisc);
 
 
 
 
 
 
 
 
1084	return 0;
1085}
1086
1087static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1088{
1089	struct netem_sched_data *q = qdisc_priv(sch);
1090	return q->qdisc;
1091}
1092
1093static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1094{
1095	return 1;
1096}
1097
1098static void netem_put(struct Qdisc *sch, unsigned long arg)
1099{
1100}
1101
1102static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1103{
1104	if (!walker->stop) {
1105		if (walker->count >= walker->skip)
1106			if (walker->fn(sch, 1, walker) < 0) {
1107				walker->stop = 1;
1108				return;
1109			}
1110		walker->count++;
1111	}
1112}
1113
1114static const struct Qdisc_class_ops netem_class_ops = {
1115	.graft		=	netem_graft,
1116	.leaf		=	netem_leaf,
1117	.get		=	netem_get,
1118	.put		=	netem_put,
1119	.walk		=	netem_walk,
1120	.dump		=	netem_dump_class,
1121};
1122
1123static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1124	.id		=	"netem",
1125	.cl_ops		=	&netem_class_ops,
1126	.priv_size	=	sizeof(struct netem_sched_data),
1127	.enqueue	=	netem_enqueue,
1128	.dequeue	=	netem_dequeue,
1129	.peek		=	qdisc_peek_dequeued,
 
1130	.init		=	netem_init,
1131	.reset		=	netem_reset,
1132	.destroy	=	netem_destroy,
1133	.change		=	netem_change,
1134	.dump		=	netem_dump,
1135	.owner		=	THIS_MODULE,
1136};
1137
1138
1139static int __init netem_module_init(void)
1140{
1141	pr_info("netem: version " VERSION "\n");
1142	return register_qdisc(&netem_qdisc_ops);
1143}
1144static void __exit netem_module_exit(void)
1145{
1146	unregister_qdisc(&netem_qdisc_ops);
1147}
1148module_init(netem_module_init)
1149module_exit(netem_module_exit)
1150MODULE_LICENSE("GPL");