Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * net/sched/sch_netem.c	Network emulator
   3 *
   4 * 		This program is free software; you can redistribute it and/or
   5 * 		modify it under the terms of the GNU General Public License
   6 * 		as published by the Free Software Foundation; either version
   7 * 		2 of the License.
   8 *
   9 *  		Many of the algorithms and ideas for this came from
  10 *		NIST Net which is not copyrighted.
  11 *
  12 * Authors:	Stephen Hemminger <shemminger@osdl.org>
  13 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/rtnetlink.h>
  25#include <linux/reciprocal_div.h>
  26#include <linux/rbtree.h>
  27
  28#include <net/netlink.h>
  29#include <net/pkt_sched.h>
  30#include <net/inet_ecn.h>
  31
  32#define VERSION "1.3"
  33
  34/*	Network Emulation Queuing algorithm.
  35	====================================
  36
  37	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
  38		 Network Emulation Tool
  39		 [2] Luigi Rizzo, DummyNet for FreeBSD
  40
  41	 ----------------------------------------------------------------
  42
  43	 This started out as a simple way to delay outgoing packets to
  44	 test TCP but has grown to include most of the functionality
  45	 of a full blown network emulator like NISTnet. It can delay
  46	 packets and add random jitter (and correlation). The random
  47	 distribution can be loaded from a table as well to provide
  48	 normal, Pareto, or experimental curves. Packet loss,
  49	 duplication, and reordering can also be emulated.
  50
  51	 This qdisc does not do classification that can be handled in
  52	 layering other disciplines.  It does not need to do bandwidth
  53	 control either since that can be handled by using token
  54	 bucket or other rate control.
  55
  56     Correlated Loss Generator models
  57
  58	Added generation of correlated loss according to the
  59	"Gilbert-Elliot" model, a 4-state markov model.
  60
  61	References:
  62	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
  63	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
  64	and intuitive loss model for packet networks and its implementation
  65	in the Netem module in the Linux kernel", available in [1]
  66
  67	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
  68		 Fabio Ludovici <fabio.ludovici at yahoo.it>
  69*/
  70
  71struct netem_sched_data {
  72	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
  73	struct rb_root t_root;
  74
  75	/* optional qdisc for classful handling (NULL at netem init) */
  76	struct Qdisc	*qdisc;
  77
  78	struct qdisc_watchdog watchdog;
  79
  80	s64 latency;
  81	s64 jitter;
  82
  83	u32 loss;
  84	u32 ecn;
  85	u32 limit;
  86	u32 counter;
  87	u32 gap;
  88	u32 duplicate;
  89	u32 reorder;
  90	u32 corrupt;
  91	u64 rate;
  92	s32 packet_overhead;
  93	u32 cell_size;
  94	struct reciprocal_value cell_size_reciprocal;
  95	s32 cell_overhead;
  96
  97	struct crndstate {
  98		u32 last;
  99		u32 rho;
 100	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 101
 102	struct disttable {
 103		u32  size;
 104		s16 table[0];
 105	} *delay_dist;
 106
 107	enum  {
 108		CLG_RANDOM,
 109		CLG_4_STATES,
 110		CLG_GILB_ELL,
 111	} loss_model;
 112
 113	enum {
 114		TX_IN_GAP_PERIOD = 1,
 115		TX_IN_BURST_PERIOD,
 116		LOST_IN_GAP_PERIOD,
 117		LOST_IN_BURST_PERIOD,
 118	} _4_state_model;
 119
 120	enum {
 121		GOOD_STATE = 1,
 122		BAD_STATE,
 123	} GE_state_model;
 124
 125	/* Correlated Loss Generation models */
 126	struct clgstate {
 127		/* state of the Markov chain */
 128		u8 state;
 129
 130		/* 4-states and Gilbert-Elliot models */
 131		u32 a1;	/* p13 for 4-states or p for GE */
 132		u32 a2;	/* p31 for 4-states or r for GE */
 133		u32 a3;	/* p32 for 4-states or h for GE */
 134		u32 a4;	/* p14 for 4-states or 1-k for GE */
 135		u32 a5; /* p23 used only in 4-states */
 136	} clg;
 137
 138	struct tc_netem_slot slot_config;
 139	struct slotstate {
 140		u64 slot_next;
 141		s32 packets_left;
 142		s32 bytes_left;
 143	} slot;
 144
 145};
 146
 147/* Time stamp put into socket buffer control block
 148 * Only valid when skbs are in our internal t(ime)fifo queue.
 149 *
 150 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
 151 * and skb->next & skb->prev are scratch space for a qdisc,
 152 * we save skb->tstamp value in skb->cb[] before destroying it.
 153 */
 154struct netem_skb_cb {
 155	u64	        time_to_send;
 156};
 157
 158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 159{
 160	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
 161	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
 162	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 163}
 164
 165/* init_crandom - initialize correlated random number generator
 166 * Use entropy source for initial seed.
 167 */
 168static void init_crandom(struct crndstate *state, unsigned long rho)
 169{
 170	state->rho = rho;
 171	state->last = prandom_u32();
 172}
 173
 174/* get_crandom - correlated random number generator
 175 * Next number depends on last value.
 176 * rho is scaled to avoid floating point.
 177 */
 178static u32 get_crandom(struct crndstate *state)
 179{
 180	u64 value, rho;
 181	unsigned long answer;
 182
 183	if (state->rho == 0)	/* no correlation */
 184		return prandom_u32();
 185
 186	value = prandom_u32();
 187	rho = (u64)state->rho + 1;
 188	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
 189	state->last = answer;
 190	return answer;
 191}
 192
 193/* loss_4state - 4-state model loss generator
 194 * Generates losses according to the 4-state Markov chain adopted in
 195 * the GI (General and Intuitive) loss model.
 196 */
 197static bool loss_4state(struct netem_sched_data *q)
 198{
 199	struct clgstate *clg = &q->clg;
 200	u32 rnd = prandom_u32();
 201
 202	/*
 203	 * Makes a comparison between rnd and the transition
 204	 * probabilities outgoing from the current state, then decides the
 205	 * next state and if the next packet has to be transmitted or lost.
 206	 * The four states correspond to:
 207	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
 208	 *   LOST_IN_BURST_PERIOD => isolated losses within a gap period
 209	 *   LOST_IN_GAP_PERIOD => lost packets within a burst period
 210	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
 211	 */
 212	switch (clg->state) {
 213	case TX_IN_GAP_PERIOD:
 214		if (rnd < clg->a4) {
 215			clg->state = LOST_IN_BURST_PERIOD;
 216			return true;
 217		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
 218			clg->state = LOST_IN_GAP_PERIOD;
 219			return true;
 220		} else if (clg->a1 + clg->a4 < rnd) {
 221			clg->state = TX_IN_GAP_PERIOD;
 222		}
 223
 224		break;
 225	case TX_IN_BURST_PERIOD:
 226		if (rnd < clg->a5) {
 227			clg->state = LOST_IN_GAP_PERIOD;
 228			return true;
 229		} else {
 230			clg->state = TX_IN_BURST_PERIOD;
 231		}
 232
 233		break;
 234	case LOST_IN_GAP_PERIOD:
 235		if (rnd < clg->a3)
 236			clg->state = TX_IN_BURST_PERIOD;
 237		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
 238			clg->state = TX_IN_GAP_PERIOD;
 
 239		} else if (clg->a2 + clg->a3 < rnd) {
 240			clg->state = LOST_IN_GAP_PERIOD;
 241			return true;
 242		}
 243		break;
 244	case LOST_IN_BURST_PERIOD:
 245		clg->state = TX_IN_GAP_PERIOD;
 246		break;
 247	}
 248
 249	return false;
 250}
 251
 252/* loss_gilb_ell - Gilbert-Elliot model loss generator
 253 * Generates losses according to the Gilbert-Elliot loss model or
 254 * its special cases  (Gilbert or Simple Gilbert)
 255 *
 256 * Makes a comparison between random number and the transition
 257 * probabilities outgoing from the current state, then decides the
 258 * next state. A second random number is extracted and the comparison
 259 * with the loss probability of the current state decides if the next
 260 * packet will be transmitted or lost.
 261 */
 262static bool loss_gilb_ell(struct netem_sched_data *q)
 263{
 264	struct clgstate *clg = &q->clg;
 265
 266	switch (clg->state) {
 267	case GOOD_STATE:
 268		if (prandom_u32() < clg->a1)
 269			clg->state = BAD_STATE;
 270		if (prandom_u32() < clg->a4)
 271			return true;
 272		break;
 273	case BAD_STATE:
 274		if (prandom_u32() < clg->a2)
 275			clg->state = GOOD_STATE;
 276		if (prandom_u32() > clg->a3)
 277			return true;
 278	}
 279
 280	return false;
 281}
 282
 283static bool loss_event(struct netem_sched_data *q)
 284{
 285	switch (q->loss_model) {
 286	case CLG_RANDOM:
 287		/* Random packet drop 0 => none, ~0 => all */
 288		return q->loss && q->loss >= get_crandom(&q->loss_cor);
 289
 290	case CLG_4_STATES:
 291		/* 4state loss model algorithm (used also for GI model)
 292		* Extracts a value from the markov 4 state loss generator,
 293		* if it is 1 drops a packet and if needed writes the event in
 294		* the kernel logs
 295		*/
 296		return loss_4state(q);
 297
 298	case CLG_GILB_ELL:
 299		/* Gilbert-Elliot loss model algorithm
 300		* Extracts a value from the Gilbert-Elliot loss generator,
 301		* if it is 1 drops a packet and if needed writes the event in
 302		* the kernel logs
 303		*/
 304		return loss_gilb_ell(q);
 305	}
 306
 307	return false;	/* not reached */
 308}
 309
 310
 311/* tabledist - return a pseudo-randomly distributed value with mean mu and
 312 * std deviation sigma.  Uses table lookup to approximate the desired
 313 * distribution, and a uniformly-distributed pseudo-random source.
 314 */
 315static s64 tabledist(s64 mu, s32 sigma,
 316		     struct crndstate *state,
 317		     const struct disttable *dist)
 318{
 319	s64 x;
 320	long t;
 321	u32 rnd;
 322
 323	if (sigma == 0)
 324		return mu;
 325
 326	rnd = get_crandom(state);
 327
 328	/* default uniform distribution */
 329	if (dist == NULL)
 330		return ((rnd % (2 * sigma)) + mu) - sigma;
 331
 332	t = dist->table[rnd % dist->size];
 333	x = (sigma % NETEM_DIST_SCALE) * t;
 334	if (x >= 0)
 335		x += NETEM_DIST_SCALE/2;
 336	else
 337		x -= NETEM_DIST_SCALE/2;
 338
 339	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 340}
 341
 342static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
 343{
 344	len += q->packet_overhead;
 345
 346	if (q->cell_size) {
 347		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
 348
 349		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
 350			cells++;
 351		len = cells * (q->cell_size + q->cell_overhead);
 352	}
 353
 354	return div64_u64(len * NSEC_PER_SEC, q->rate);
 355}
 356
 357static void tfifo_reset(struct Qdisc *sch)
 358{
 359	struct netem_sched_data *q = qdisc_priv(sch);
 360	struct rb_node *p = rb_first(&q->t_root);
 361
 362	while (p) {
 363		struct sk_buff *skb = rb_to_skb(p);
 364
 365		p = rb_next(p);
 366		rb_erase(&skb->rbnode, &q->t_root);
 367		rtnl_kfree_skbs(skb, skb);
 368	}
 369}
 370
 371static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 372{
 373	struct netem_sched_data *q = qdisc_priv(sch);
 374	u64 tnext = netem_skb_cb(nskb)->time_to_send;
 375	struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
 376
 377	while (*p) {
 378		struct sk_buff *skb;
 379
 380		parent = *p;
 381		skb = rb_to_skb(parent);
 382		if (tnext >= netem_skb_cb(skb)->time_to_send)
 383			p = &parent->rb_right;
 384		else
 385			p = &parent->rb_left;
 386	}
 387	rb_link_node(&nskb->rbnode, parent, p);
 388	rb_insert_color(&nskb->rbnode, &q->t_root);
 389	sch->q.qlen++;
 390}
 391
 392/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
 393 * when we statistically choose to corrupt one, we instead segment it, returning
 394 * the first packet to be corrupted, and re-enqueue the remaining frames
 395 */
 396static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
 397				     struct sk_buff **to_free)
 398{
 399	struct sk_buff *segs;
 400	netdev_features_t features = netif_skb_features(skb);
 401
 402	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 403
 404	if (IS_ERR_OR_NULL(segs)) {
 405		qdisc_drop(skb, sch, to_free);
 406		return NULL;
 407	}
 408	consume_skb(skb);
 409	return segs;
 410}
 411
 412static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
 413{
 414	skb->next = qh->head;
 415
 416	if (!qh->head)
 417		qh->tail = skb;
 418	qh->head = skb;
 419	qh->qlen++;
 420}
 421
 422/*
 423 * Insert one skb into qdisc.
 424 * Note: parent depends on return value to account for queue length.
 425 * 	NET_XMIT_DROP: queue length didn't change.
 426 *      NET_XMIT_SUCCESS: one skb was queued.
 427 */
 428static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 429			 struct sk_buff **to_free)
 430{
 431	struct netem_sched_data *q = qdisc_priv(sch);
 432	/* We don't fill cb now as skb_unshare() may invalidate it */
 433	struct netem_skb_cb *cb;
 434	struct sk_buff *skb2;
 435	struct sk_buff *segs = NULL;
 436	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
 437	int nb = 0;
 438	int count = 1;
 439	int rc = NET_XMIT_SUCCESS;
 440
 441	/* Random duplication */
 442	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
 443		++count;
 444
 445	/* Drop packet? */
 446	if (loss_event(q)) {
 447		if (q->ecn && INET_ECN_set_ce(skb))
 448			qdisc_qstats_drop(sch); /* mark packet */
 449		else
 450			--count;
 451	}
 452	if (count == 0) {
 453		qdisc_qstats_drop(sch);
 454		__qdisc_drop(skb, to_free);
 455		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 456	}
 457
 458	/* If a delay is expected, orphan the skb. (orphaning usually takes
 459	 * place at TX completion time, so _before_ the link transit delay)
 460	 */
 461	if (q->latency || q->jitter || q->rate)
 462		skb_orphan_partial(skb);
 463
 464	/*
 465	 * If we need to duplicate packet, then re-insert at top of the
 466	 * qdisc tree, since parent queuer expects that only one
 467	 * skb will be queued.
 468	 */
 469	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
 470		struct Qdisc *rootq = qdisc_root(sch);
 471		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 472
 473		q->duplicate = 0;
 474		rootq->enqueue(skb2, rootq, to_free);
 
 475		q->duplicate = dupsave;
 476	}
 477
 478	/*
 479	 * Randomized packet corruption.
 480	 * Make copy if needed since we are modifying
 481	 * If packet is going to be hardware checksummed, then
 482	 * do it now in software before we mangle it.
 483	 */
 484	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
 485		if (skb_is_gso(skb)) {
 486			segs = netem_segment(skb, sch, to_free);
 487			if (!segs)
 488				return NET_XMIT_DROP;
 489		} else {
 490			segs = skb;
 491		}
 492
 493		skb = segs;
 494		segs = segs->next;
 495
 496		skb = skb_unshare(skb, GFP_ATOMIC);
 497		if (unlikely(!skb)) {
 498			qdisc_qstats_drop(sch);
 499			goto finish_segs;
 500		}
 501		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 502		    skb_checksum_help(skb)) {
 503			qdisc_drop(skb, sch, to_free);
 504			goto finish_segs;
 505		}
 506
 507		skb->data[prandom_u32() % skb_headlen(skb)] ^=
 508			1<<(prandom_u32() % 8);
 509	}
 510
 511	if (unlikely(sch->q.qlen >= sch->limit))
 512		return qdisc_drop_all(skb, sch, to_free);
 513
 514	qdisc_qstats_backlog_inc(sch, skb);
 515
 516	cb = netem_skb_cb(skb);
 517	if (q->gap == 0 ||		/* not doing reordering */
 518	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 519	    q->reorder < get_crandom(&q->reorder_cor)) {
 520		u64 now;
 521		s64 delay;
 522
 523		delay = tabledist(q->latency, q->jitter,
 524				  &q->delay_cor, q->delay_dist);
 525
 526		now = ktime_get_ns();
 527
 528		if (q->rate) {
 529			struct netem_skb_cb *last = NULL;
 530
 531			if (sch->q.tail)
 532				last = netem_skb_cb(sch->q.tail);
 533			if (q->t_root.rb_node) {
 534				struct sk_buff *t_skb;
 535				struct netem_skb_cb *t_last;
 536
 537				t_skb = skb_rb_last(&q->t_root);
 538				t_last = netem_skb_cb(t_skb);
 539				if (!last ||
 540				    t_last->time_to_send > last->time_to_send) {
 541					last = t_last;
 542				}
 543			}
 544
 545			if (last) {
 546				/*
 547				 * Last packet in queue is reference point (now),
 548				 * calculate this time bonus and subtract
 549				 * from delay.
 550				 */
 551				delay -= last->time_to_send - now;
 552				delay = max_t(s64, 0, delay);
 553				now = last->time_to_send;
 554			}
 555
 556			delay += packet_time_ns(qdisc_pkt_len(skb), q);
 557		}
 558
 559		cb->time_to_send = now + delay;
 560		++q->counter;
 561		tfifo_enqueue(skb, sch);
 562	} else {
 563		/*
 564		 * Do re-ordering by putting one out of N packets at the front
 565		 * of the queue.
 566		 */
 567		cb->time_to_send = ktime_get_ns();
 568		q->counter = 0;
 569
 570		netem_enqueue_skb_head(&sch->q, skb);
 571		sch->qstats.requeues++;
 
 
 572	}
 573
 574finish_segs:
 575	if (segs) {
 576		while (segs) {
 577			skb2 = segs->next;
 578			segs->next = NULL;
 579			qdisc_skb_cb(segs)->pkt_len = segs->len;
 580			last_len = segs->len;
 581			rc = qdisc_enqueue(segs, sch, to_free);
 582			if (rc != NET_XMIT_SUCCESS) {
 583				if (net_xmit_drop_count(rc))
 584					qdisc_qstats_drop(sch);
 585			} else {
 586				nb++;
 587				len += last_len;
 588			}
 589			segs = skb2;
 590		}
 591		sch->q.qlen += nb;
 592		if (nb > 1)
 593			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
 594	}
 
 
 595	return NET_XMIT_SUCCESS;
 596}
 597
 598/* Delay the next round with a new future slot with a
 599 * correct number of bytes and packets.
 600 */
 601
 602static void get_slot_next(struct netem_sched_data *q, u64 now)
 603{
 604	q->slot.slot_next = now + q->slot_config.min_delay +
 605		(prandom_u32() *
 606			(q->slot_config.max_delay -
 607				q->slot_config.min_delay) >> 32);
 608	q->slot.packets_left = q->slot_config.max_packets;
 609	q->slot.bytes_left = q->slot_config.max_bytes;
 
 
 610}
 611
 612static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 613{
 614	struct netem_sched_data *q = qdisc_priv(sch);
 615	struct sk_buff *skb;
 616	struct rb_node *p;
 617
 618tfifo_dequeue:
 619	skb = __qdisc_dequeue_head(&sch->q);
 620	if (skb) {
 621		qdisc_qstats_backlog_dec(sch, skb);
 622deliver:
 623		qdisc_bstats_update(sch, skb);
 624		return skb;
 625	}
 626	p = rb_first(&q->t_root);
 627	if (p) {
 628		u64 time_to_send;
 629		u64 now = ktime_get_ns();
 630
 631		skb = rb_to_skb(p);
 
 
 
 632
 633		/* if more time remaining? */
 634		time_to_send = netem_skb_cb(skb)->time_to_send;
 635		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
 636			get_slot_next(q, now);
 637
 638		if (time_to_send <= now &&  q->slot.slot_next <= now) {
 639			rb_erase(p, &q->t_root);
 640			sch->q.qlen--;
 641			qdisc_qstats_backlog_dec(sch, skb);
 642			skb->next = NULL;
 643			skb->prev = NULL;
 644			/* skb->dev shares skb->rbnode area,
 645			 * we need to restore its value.
 646			 */
 647			skb->dev = qdisc_dev(sch);
 648
 649#ifdef CONFIG_NET_CLS_ACT
 650			/*
 651			 * If it's at ingress let's pretend the delay is
 652			 * from the network (tstamp will be updated).
 653			 */
 654			if (skb->tc_redirected && skb->tc_from_ingress)
 655				skb->tstamp = 0;
 656#endif
 657
 658			if (q->slot.slot_next) {
 659				q->slot.packets_left--;
 660				q->slot.bytes_left -= qdisc_pkt_len(skb);
 661				if (q->slot.packets_left <= 0 ||
 662				    q->slot.bytes_left <= 0)
 663					get_slot_next(q, now);
 664			}
 665
 666			if (q->qdisc) {
 667				unsigned int pkt_len = qdisc_pkt_len(skb);
 668				struct sk_buff *to_free = NULL;
 669				int err;
 670
 671				err = qdisc_enqueue(skb, q->qdisc, &to_free);
 672				kfree_skb_list(to_free);
 673				if (err != NET_XMIT_SUCCESS &&
 674				    net_xmit_drop_count(err)) {
 675					qdisc_qstats_drop(sch);
 676					qdisc_tree_reduce_backlog(sch, 1,
 677								  pkt_len);
 678				}
 679				goto tfifo_dequeue;
 680			}
 681			goto deliver;
 682		}
 683
 684		if (q->qdisc) {
 685			skb = q->qdisc->ops->dequeue(q->qdisc);
 686			if (skb)
 687				goto deliver;
 688		}
 689
 690		qdisc_watchdog_schedule_ns(&q->watchdog,
 691					   max(time_to_send,
 692					       q->slot.slot_next));
 693	}
 694
 695	if (q->qdisc) {
 696		skb = q->qdisc->ops->dequeue(q->qdisc);
 697		if (skb)
 698			goto deliver;
 699	}
 700	return NULL;
 701}
 702
 703static void netem_reset(struct Qdisc *sch)
 704{
 705	struct netem_sched_data *q = qdisc_priv(sch);
 706
 707	qdisc_reset_queue(sch);
 708	tfifo_reset(sch);
 709	if (q->qdisc)
 710		qdisc_reset(q->qdisc);
 711	qdisc_watchdog_cancel(&q->watchdog);
 712}
 713
 714static void dist_free(struct disttable *d)
 715{
 716	kvfree(d);
 
 
 
 
 
 717}
 718
 719/*
 720 * Distribution data is a variable size payload containing
 721 * signed 16 bit values.
 722 */
 723
 724static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 725{
 726	struct netem_sched_data *q = qdisc_priv(sch);
 727	size_t n = nla_len(attr)/sizeof(__s16);
 728	const __s16 *data = nla_data(attr);
 729	spinlock_t *root_lock;
 730	struct disttable *d;
 731	int i;
 
 732
 733	if (n > NETEM_DIST_MAX)
 734		return -EINVAL;
 735
 736	d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
 
 
 
 737	if (!d)
 738		return -ENOMEM;
 739
 740	d->size = n;
 741	for (i = 0; i < n; i++)
 742		d->table[i] = data[i];
 743
 744	root_lock = qdisc_root_sleeping_lock(sch);
 745
 746	spin_lock_bh(root_lock);
 747	swap(q->delay_dist, d);
 
 748	spin_unlock_bh(root_lock);
 749
 750	dist_free(d);
 751	return 0;
 752}
 753
 754static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
 755{
 756	const struct tc_netem_slot *c = nla_data(attr);
 757
 758	q->slot_config = *c;
 759	if (q->slot_config.max_packets == 0)
 760		q->slot_config.max_packets = INT_MAX;
 761	if (q->slot_config.max_bytes == 0)
 762		q->slot_config.max_bytes = INT_MAX;
 763	q->slot.packets_left = q->slot_config.max_packets;
 764	q->slot.bytes_left = q->slot_config.max_bytes;
 765	if (q->slot_config.min_delay | q->slot_config.max_delay)
 766		q->slot.slot_next = ktime_get_ns();
 767	else
 768		q->slot.slot_next = 0;
 769}
 770
 771static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 772{
 
 773	const struct tc_netem_corr *c = nla_data(attr);
 774
 775	init_crandom(&q->delay_cor, c->delay_corr);
 776	init_crandom(&q->loss_cor, c->loss_corr);
 777	init_crandom(&q->dup_cor, c->dup_corr);
 778}
 779
 780static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
 781{
 
 782	const struct tc_netem_reorder *r = nla_data(attr);
 783
 784	q->reorder = r->probability;
 785	init_crandom(&q->reorder_cor, r->correlation);
 786}
 787
 788static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
 789{
 
 790	const struct tc_netem_corrupt *r = nla_data(attr);
 791
 792	q->corrupt = r->probability;
 793	init_crandom(&q->corrupt_cor, r->correlation);
 794}
 795
 796static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
 797{
 798	const struct tc_netem_rate *r = nla_data(attr);
 799
 800	q->rate = r->rate;
 801	q->packet_overhead = r->packet_overhead;
 802	q->cell_size = r->cell_size;
 803	q->cell_overhead = r->cell_overhead;
 804	if (q->cell_size)
 805		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
 806	else
 807		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
 808}
 809
 810static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
 811{
 
 812	const struct nlattr *la;
 813	int rem;
 814
 815	nla_for_each_nested(la, attr, rem) {
 816		u16 type = nla_type(la);
 817
 818		switch (type) {
 819		case NETEM_LOSS_GI: {
 820			const struct tc_netem_gimodel *gi = nla_data(la);
 821
 822			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 823				pr_info("netem: incorrect gi model size\n");
 824				return -EINVAL;
 825			}
 826
 827			q->loss_model = CLG_4_STATES;
 828
 829			q->clg.state = TX_IN_GAP_PERIOD;
 830			q->clg.a1 = gi->p13;
 831			q->clg.a2 = gi->p31;
 832			q->clg.a3 = gi->p32;
 833			q->clg.a4 = gi->p14;
 834			q->clg.a5 = gi->p23;
 835			break;
 836		}
 837
 838		case NETEM_LOSS_GE: {
 839			const struct tc_netem_gemodel *ge = nla_data(la);
 840
 841			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
 842				pr_info("netem: incorrect ge model size\n");
 843				return -EINVAL;
 844			}
 845
 846			q->loss_model = CLG_GILB_ELL;
 847			q->clg.state = GOOD_STATE;
 848			q->clg.a1 = ge->p;
 849			q->clg.a2 = ge->r;
 850			q->clg.a3 = ge->h;
 851			q->clg.a4 = ge->k1;
 852			break;
 853		}
 854
 855		default:
 856			pr_info("netem: unknown loss type %u\n", type);
 857			return -EINVAL;
 858		}
 859	}
 860
 861	return 0;
 862}
 863
 864static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 865	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 866	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 867	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 868	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 869	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 870	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 871	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 872	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
 873	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
 874	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
 875};
 876
 877static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 878		      const struct nla_policy *policy, int len)
 879{
 880	int nested_len = nla_len(nla) - NLA_ALIGN(len);
 881
 882	if (nested_len < 0) {
 883		pr_info("netem: invalid attributes len %d\n", nested_len);
 884		return -EINVAL;
 885	}
 886
 887	if (nested_len >= nla_attr_size(0))
 888		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
 889				 nested_len, policy, NULL);
 890
 891	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 892	return 0;
 893}
 894
 895/* Parse netlink message to set options */
 896static int netem_change(struct Qdisc *sch, struct nlattr *opt,
 897			struct netlink_ext_ack *extack)
 898{
 899	struct netem_sched_data *q = qdisc_priv(sch);
 900	struct nlattr *tb[TCA_NETEM_MAX + 1];
 901	struct tc_netem_qopt *qopt;
 902	struct clgstate old_clg;
 903	int old_loss_model = CLG_RANDOM;
 904	int ret;
 905
 906	if (opt == NULL)
 907		return -EINVAL;
 908
 909	qopt = nla_data(opt);
 910	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
 911	if (ret < 0)
 912		return ret;
 913
 914	/* backup q->clg and q->loss_model */
 915	old_clg = q->clg;
 916	old_loss_model = q->loss_model;
 917
 918	if (tb[TCA_NETEM_LOSS]) {
 919		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
 920		if (ret) {
 921			q->loss_model = old_loss_model;
 922			return ret;
 923		}
 924	} else {
 925		q->loss_model = CLG_RANDOM;
 926	}
 927
 928	if (tb[TCA_NETEM_DELAY_DIST]) {
 929		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
 930		if (ret) {
 931			/* recover clg and loss_model, in case of
 932			 * q->clg and q->loss_model were modified
 933			 * in get_loss_clg()
 934			 */
 935			q->clg = old_clg;
 936			q->loss_model = old_loss_model;
 937			return ret;
 938		}
 939	}
 940
 941	sch->limit = qopt->limit;
 942
 943	q->latency = PSCHED_TICKS2NS(qopt->latency);
 944	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
 945	q->limit = qopt->limit;
 946	q->gap = qopt->gap;
 947	q->counter = 0;
 948	q->loss = qopt->loss;
 949	q->duplicate = qopt->duplicate;
 950
 951	/* for compatibility with earlier versions.
 952	 * if gap is set, need to assume 100% probability
 953	 */
 954	if (q->gap)
 955		q->reorder = ~0;
 956
 957	if (tb[TCA_NETEM_CORR])
 958		get_correlation(q, tb[TCA_NETEM_CORR]);
 
 
 
 
 
 
 959
 960	if (tb[TCA_NETEM_REORDER])
 961		get_reorder(q, tb[TCA_NETEM_REORDER]);
 962
 963	if (tb[TCA_NETEM_CORRUPT])
 964		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965
 966	if (tb[TCA_NETEM_RATE])
 967		get_rate(q, tb[TCA_NETEM_RATE]);
 
 
 
 
 968
 969	if (tb[TCA_NETEM_RATE64])
 970		q->rate = max_t(u64, q->rate,
 971				nla_get_u64(tb[TCA_NETEM_RATE64]));
 972
 973	if (tb[TCA_NETEM_LATENCY64])
 974		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
 
 975
 976	if (tb[TCA_NETEM_JITTER64])
 977		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
 978
 979	if (tb[TCA_NETEM_ECN])
 980		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 981
 982	if (tb[TCA_NETEM_SLOT])
 983		get_slot(q, tb[TCA_NETEM_SLOT]);
 984
 985	return ret;
 986}
 987
 988static int netem_init(struct Qdisc *sch, struct nlattr *opt,
 989		      struct netlink_ext_ack *extack)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 990{
 991	struct netem_sched_data *q = qdisc_priv(sch);
 992	int ret;
 993
 994	qdisc_watchdog_init(&q->watchdog, sch);
 995
 996	if (!opt)
 997		return -EINVAL;
 998
 
 
 999	q->loss_model = CLG_RANDOM;
1000	ret = netem_change(sch, opt, extack);
1001	if (ret)
 
 
 
 
 
 
 
1002		pr_info("netem: change failed\n");
 
 
1003	return ret;
1004}
1005
1006static void netem_destroy(struct Qdisc *sch)
1007{
1008	struct netem_sched_data *q = qdisc_priv(sch);
1009
1010	qdisc_watchdog_cancel(&q->watchdog);
1011	if (q->qdisc)
1012		qdisc_destroy(q->qdisc);
1013	dist_free(q->delay_dist);
1014}
1015
1016static int dump_loss_model(const struct netem_sched_data *q,
1017			   struct sk_buff *skb)
1018{
1019	struct nlattr *nest;
1020
1021	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1022	if (nest == NULL)
1023		goto nla_put_failure;
1024
1025	switch (q->loss_model) {
1026	case CLG_RANDOM:
1027		/* legacy loss model */
1028		nla_nest_cancel(skb, nest);
1029		return 0;	/* no data */
1030
1031	case CLG_4_STATES: {
1032		struct tc_netem_gimodel gi = {
1033			.p13 = q->clg.a1,
1034			.p31 = q->clg.a2,
1035			.p32 = q->clg.a3,
1036			.p14 = q->clg.a4,
1037			.p23 = q->clg.a5,
1038		};
1039
1040		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1041			goto nla_put_failure;
1042		break;
1043	}
1044	case CLG_GILB_ELL: {
1045		struct tc_netem_gemodel ge = {
1046			.p = q->clg.a1,
1047			.r = q->clg.a2,
1048			.h = q->clg.a3,
1049			.k1 = q->clg.a4,
1050		};
1051
1052		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1053			goto nla_put_failure;
1054		break;
1055	}
1056	}
1057
1058	nla_nest_end(skb, nest);
1059	return 0;
1060
1061nla_put_failure:
1062	nla_nest_cancel(skb, nest);
1063	return -1;
1064}
1065
1066static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1067{
1068	const struct netem_sched_data *q = qdisc_priv(sch);
1069	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1070	struct tc_netem_qopt qopt;
1071	struct tc_netem_corr cor;
1072	struct tc_netem_reorder reorder;
1073	struct tc_netem_corrupt corrupt;
1074	struct tc_netem_rate rate;
1075	struct tc_netem_slot slot;
1076
1077	qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1078			     UINT_MAX);
1079	qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1080			    UINT_MAX);
1081	qopt.limit = q->limit;
1082	qopt.loss = q->loss;
1083	qopt.gap = q->gap;
1084	qopt.duplicate = q->duplicate;
1085	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1086		goto nla_put_failure;
1087
1088	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1089		goto nla_put_failure;
1090
1091	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1092		goto nla_put_failure;
1093
1094	cor.delay_corr = q->delay_cor.rho;
1095	cor.loss_corr = q->loss_cor.rho;
1096	cor.dup_corr = q->dup_cor.rho;
1097	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1098		goto nla_put_failure;
1099
1100	reorder.probability = q->reorder;
1101	reorder.correlation = q->reorder_cor.rho;
1102	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1103		goto nla_put_failure;
1104
1105	corrupt.probability = q->corrupt;
1106	corrupt.correlation = q->corrupt_cor.rho;
1107	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1108		goto nla_put_failure;
1109
1110	if (q->rate >= (1ULL << 32)) {
1111		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1112				      TCA_NETEM_PAD))
1113			goto nla_put_failure;
1114		rate.rate = ~0U;
1115	} else {
1116		rate.rate = q->rate;
1117	}
1118	rate.packet_overhead = q->packet_overhead;
1119	rate.cell_size = q->cell_size;
1120	rate.cell_overhead = q->cell_overhead;
1121	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1122		goto nla_put_failure;
1123
1124	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1125		goto nla_put_failure;
1126
1127	if (dump_loss_model(q, skb) != 0)
1128		goto nla_put_failure;
1129
1130	if (q->slot_config.min_delay | q->slot_config.max_delay) {
1131		slot = q->slot_config;
1132		if (slot.max_packets == INT_MAX)
1133			slot.max_packets = 0;
1134		if (slot.max_bytes == INT_MAX)
1135			slot.max_bytes = 0;
1136		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1137			goto nla_put_failure;
1138	}
1139
1140	return nla_nest_end(skb, nla);
1141
1142nla_put_failure:
1143	nlmsg_trim(skb, nla);
1144	return -1;
1145}
1146
1147static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1148			  struct sk_buff *skb, struct tcmsg *tcm)
1149{
1150	struct netem_sched_data *q = qdisc_priv(sch);
1151
1152	if (cl != 1 || !q->qdisc) 	/* only one class */
1153		return -ENOENT;
1154
1155	tcm->tcm_handle |= TC_H_MIN(1);
1156	tcm->tcm_info = q->qdisc->handle;
1157
1158	return 0;
1159}
1160
1161static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1162		     struct Qdisc **old, struct netlink_ext_ack *extack)
1163{
1164	struct netem_sched_data *q = qdisc_priv(sch);
1165
1166	*old = qdisc_replace(sch, new, &q->qdisc);
 
 
 
 
 
 
 
 
 
1167	return 0;
1168}
1169
1170static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1171{
1172	struct netem_sched_data *q = qdisc_priv(sch);
1173	return q->qdisc;
1174}
1175
1176static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1177{
1178	return 1;
1179}
1180
 
 
 
 
1181static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1182{
1183	if (!walker->stop) {
1184		if (walker->count >= walker->skip)
1185			if (walker->fn(sch, 1, walker) < 0) {
1186				walker->stop = 1;
1187				return;
1188			}
1189		walker->count++;
1190	}
1191}
1192
1193static const struct Qdisc_class_ops netem_class_ops = {
1194	.graft		=	netem_graft,
1195	.leaf		=	netem_leaf,
1196	.find		=	netem_find,
 
1197	.walk		=	netem_walk,
1198	.dump		=	netem_dump_class,
1199};
1200
1201static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1202	.id		=	"netem",
1203	.cl_ops		=	&netem_class_ops,
1204	.priv_size	=	sizeof(struct netem_sched_data),
1205	.enqueue	=	netem_enqueue,
1206	.dequeue	=	netem_dequeue,
1207	.peek		=	qdisc_peek_dequeued,
 
1208	.init		=	netem_init,
1209	.reset		=	netem_reset,
1210	.destroy	=	netem_destroy,
1211	.change		=	netem_change,
1212	.dump		=	netem_dump,
1213	.owner		=	THIS_MODULE,
1214};
1215
1216
1217static int __init netem_module_init(void)
1218{
1219	pr_info("netem: version " VERSION "\n");
1220	return register_qdisc(&netem_qdisc_ops);
1221}
1222static void __exit netem_module_exit(void)
1223{
1224	unregister_qdisc(&netem_qdisc_ops);
1225}
1226module_init(netem_module_init)
1227module_exit(netem_module_exit)
1228MODULE_LICENSE("GPL");
v3.1
  1/*
  2 * net/sched/sch_netem.c	Network emulator
  3 *
  4 * 		This program is free software; you can redistribute it and/or
  5 * 		modify it under the terms of the GNU General Public License
  6 * 		as published by the Free Software Foundation; either version
  7 * 		2 of the License.
  8 *
  9 *  		Many of the algorithms and ideas for this came from
 10 *		NIST Net which is not copyrighted.
 11 *
 12 * Authors:	Stephen Hemminger <shemminger@osdl.org>
 13 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
 14 */
 15
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/slab.h>
 19#include <linux/types.h>
 20#include <linux/kernel.h>
 21#include <linux/errno.h>
 22#include <linux/skbuff.h>
 23#include <linux/vmalloc.h>
 24#include <linux/rtnetlink.h>
 
 
 25
 26#include <net/netlink.h>
 27#include <net/pkt_sched.h>
 
 28
 29#define VERSION "1.3"
 30
 31/*	Network Emulation Queuing algorithm.
 32	====================================
 33
 34	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
 35		 Network Emulation Tool
 36		 [2] Luigi Rizzo, DummyNet for FreeBSD
 37
 38	 ----------------------------------------------------------------
 39
 40	 This started out as a simple way to delay outgoing packets to
 41	 test TCP but has grown to include most of the functionality
 42	 of a full blown network emulator like NISTnet. It can delay
 43	 packets and add random jitter (and correlation). The random
 44	 distribution can be loaded from a table as well to provide
 45	 normal, Pareto, or experimental curves. Packet loss,
 46	 duplication, and reordering can also be emulated.
 47
 48	 This qdisc does not do classification that can be handled in
 49	 layering other disciplines.  It does not need to do bandwidth
 50	 control either since that can be handled by using token
 51	 bucket or other rate control.
 52
 53     Correlated Loss Generator models
 54
 55	Added generation of correlated loss according to the
 56	"Gilbert-Elliot" model, a 4-state markov model.
 57
 58	References:
 59	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
 60	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
 61	and intuitive loss model for packet networks and its implementation
 62	in the Netem module in the Linux kernel", available in [1]
 63
 64	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
 65		 Fabio Ludovici <fabio.ludovici at yahoo.it>
 66*/
 67
 68struct netem_sched_data {
 
 
 
 
 69	struct Qdisc	*qdisc;
 
 70	struct qdisc_watchdog watchdog;
 71
 72	psched_tdiff_t latency;
 73	psched_tdiff_t jitter;
 74
 75	u32 loss;
 
 76	u32 limit;
 77	u32 counter;
 78	u32 gap;
 79	u32 duplicate;
 80	u32 reorder;
 81	u32 corrupt;
 
 
 
 
 
 82
 83	struct crndstate {
 84		u32 last;
 85		u32 rho;
 86	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 87
 88	struct disttable {
 89		u32  size;
 90		s16 table[0];
 91	} *delay_dist;
 92
 93	enum  {
 94		CLG_RANDOM,
 95		CLG_4_STATES,
 96		CLG_GILB_ELL,
 97	} loss_model;
 98
 
 
 
 
 
 
 
 
 
 
 
 
 99	/* Correlated Loss Generation models */
100	struct clgstate {
101		/* state of the Markov chain */
102		u8 state;
103
104		/* 4-states and Gilbert-Elliot models */
105		u32 a1;	/* p13 for 4-states or p for GE */
106		u32 a2;	/* p31 for 4-states or r for GE */
107		u32 a3;	/* p32 for 4-states or h for GE */
108		u32 a4;	/* p14 for 4-states or 1-k for GE */
109		u32 a5; /* p23 used only in 4-states */
110	} clg;
111
 
 
 
 
 
 
 
112};
113
114/* Time stamp put into socket buffer control block */
 
 
 
 
 
 
115struct netem_skb_cb {
116	psched_time_t	time_to_send;
117};
118
119static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
120{
121	BUILD_BUG_ON(sizeof(skb->cb) <
122		sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
123	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
124}
125
126/* init_crandom - initialize correlated random number generator
127 * Use entropy source for initial seed.
128 */
129static void init_crandom(struct crndstate *state, unsigned long rho)
130{
131	state->rho = rho;
132	state->last = net_random();
133}
134
135/* get_crandom - correlated random number generator
136 * Next number depends on last value.
137 * rho is scaled to avoid floating point.
138 */
139static u32 get_crandom(struct crndstate *state)
140{
141	u64 value, rho;
142	unsigned long answer;
143
144	if (state->rho == 0)	/* no correlation */
145		return net_random();
146
147	value = net_random();
148	rho = (u64)state->rho + 1;
149	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
150	state->last = answer;
151	return answer;
152}
153
154/* loss_4state - 4-state model loss generator
155 * Generates losses according to the 4-state Markov chain adopted in
156 * the GI (General and Intuitive) loss model.
157 */
158static bool loss_4state(struct netem_sched_data *q)
159{
160	struct clgstate *clg = &q->clg;
161	u32 rnd = net_random();
162
163	/*
164	 * Makes a comparison between rnd and the transition
165	 * probabilities outgoing from the current state, then decides the
166	 * next state and if the next packet has to be transmitted or lost.
167	 * The four states correspond to:
168	 *   1 => successfully transmitted packets within a gap period
169	 *   4 => isolated losses within a gap period
170	 *   3 => lost packets within a burst period
171	 *   2 => successfully transmitted packets within a burst period
172	 */
173	switch (clg->state) {
174	case 1:
175		if (rnd < clg->a4) {
176			clg->state = 4;
177			return true;
178		} else if (clg->a4 < rnd && rnd < clg->a1) {
179			clg->state = 3;
180			return true;
181		} else if (clg->a1 < rnd)
182			clg->state = 1;
 
183
184		break;
185	case 2:
186		if (rnd < clg->a5) {
187			clg->state = 3;
188			return true;
189		} else
190			clg->state = 2;
 
191
192		break;
193	case 3:
194		if (rnd < clg->a3)
195			clg->state = 2;
196		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
197			clg->state = 1;
198			return true;
199		} else if (clg->a2 + clg->a3 < rnd) {
200			clg->state = 3;
201			return true;
202		}
203		break;
204	case 4:
205		clg->state = 1;
206		break;
207	}
208
209	return false;
210}
211
212/* loss_gilb_ell - Gilbert-Elliot model loss generator
213 * Generates losses according to the Gilbert-Elliot loss model or
214 * its special cases  (Gilbert or Simple Gilbert)
215 *
216 * Makes a comparison between random number and the transition
217 * probabilities outgoing from the current state, then decides the
218 * next state. A second random number is extracted and the comparison
219 * with the loss probability of the current state decides if the next
220 * packet will be transmitted or lost.
221 */
222static bool loss_gilb_ell(struct netem_sched_data *q)
223{
224	struct clgstate *clg = &q->clg;
225
226	switch (clg->state) {
227	case 1:
228		if (net_random() < clg->a1)
229			clg->state = 2;
230		if (net_random() < clg->a4)
231			return true;
232	case 2:
233		if (net_random() < clg->a2)
234			clg->state = 1;
235		if (clg->a3 > net_random())
 
236			return true;
237	}
238
239	return false;
240}
241
242static bool loss_event(struct netem_sched_data *q)
243{
244	switch (q->loss_model) {
245	case CLG_RANDOM:
246		/* Random packet drop 0 => none, ~0 => all */
247		return q->loss && q->loss >= get_crandom(&q->loss_cor);
248
249	case CLG_4_STATES:
250		/* 4state loss model algorithm (used also for GI model)
251		* Extracts a value from the markov 4 state loss generator,
252		* if it is 1 drops a packet and if needed writes the event in
253		* the kernel logs
254		*/
255		return loss_4state(q);
256
257	case CLG_GILB_ELL:
258		/* Gilbert-Elliot loss model algorithm
259		* Extracts a value from the Gilbert-Elliot loss generator,
260		* if it is 1 drops a packet and if needed writes the event in
261		* the kernel logs
262		*/
263		return loss_gilb_ell(q);
264	}
265
266	return false;	/* not reached */
267}
268
269
270/* tabledist - return a pseudo-randomly distributed value with mean mu and
271 * std deviation sigma.  Uses table lookup to approximate the desired
272 * distribution, and a uniformly-distributed pseudo-random source.
273 */
274static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
275				struct crndstate *state,
276				const struct disttable *dist)
277{
278	psched_tdiff_t x;
279	long t;
280	u32 rnd;
281
282	if (sigma == 0)
283		return mu;
284
285	rnd = get_crandom(state);
286
287	/* default uniform distribution */
288	if (dist == NULL)
289		return (rnd % (2*sigma)) - sigma + mu;
290
291	t = dist->table[rnd % dist->size];
292	x = (sigma % NETEM_DIST_SCALE) * t;
293	if (x >= 0)
294		x += NETEM_DIST_SCALE/2;
295	else
296		x -= NETEM_DIST_SCALE/2;
297
298	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
299}
300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301/*
302 * Insert one skb into qdisc.
303 * Note: parent depends on return value to account for queue length.
304 * 	NET_XMIT_DROP: queue length didn't change.
305 *      NET_XMIT_SUCCESS: one skb was queued.
306 */
307static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
308{
309	struct netem_sched_data *q = qdisc_priv(sch);
310	/* We don't fill cb now as skb_unshare() may invalidate it */
311	struct netem_skb_cb *cb;
312	struct sk_buff *skb2;
313	int ret;
 
 
314	int count = 1;
 
315
316	/* Random duplication */
317	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
318		++count;
319
320	/* Drop packet? */
321	if (loss_event(q))
322		--count;
323
 
 
 
324	if (count == 0) {
325		sch->qstats.drops++;
326		kfree_skb(skb);
327		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
328	}
329
330	skb_orphan(skb);
 
 
 
 
331
332	/*
333	 * If we need to duplicate packet, then re-insert at top of the
334	 * qdisc tree, since parent queuer expects that only one
335	 * skb will be queued.
336	 */
337	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
338		struct Qdisc *rootq = qdisc_root(sch);
339		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 
340		q->duplicate = 0;
341
342		qdisc_enqueue_root(skb2, rootq);
343		q->duplicate = dupsave;
344	}
345
346	/*
347	 * Randomized packet corruption.
348	 * Make copy if needed since we are modifying
349	 * If packet is going to be hardware checksummed, then
350	 * do it now in software before we mangle it.
351	 */
352	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
353		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
354		    (skb->ip_summed == CHECKSUM_PARTIAL &&
355		     skb_checksum_help(skb))) {
356			sch->qstats.drops++;
357			return NET_XMIT_DROP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358		}
359
360		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
 
361	}
362
 
 
 
 
 
363	cb = netem_skb_cb(skb);
364	if (q->gap == 0 ||		/* not doing reordering */
365	    q->counter < q->gap ||	/* inside last reordering gap */
366	    q->reorder < get_crandom(&q->reorder_cor)) {
367		psched_time_t now;
368		psched_tdiff_t delay;
369
370		delay = tabledist(q->latency, q->jitter,
371				  &q->delay_cor, q->delay_dist);
372
373		now = psched_get_time();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374		cb->time_to_send = now + delay;
375		++q->counter;
376		ret = qdisc_enqueue(skb, q->qdisc);
377	} else {
378		/*
379		 * Do re-ordering by putting one out of N packets at the front
380		 * of the queue.
381		 */
382		cb->time_to_send = psched_get_time();
383		q->counter = 0;
384
385		__skb_queue_head(&q->qdisc->q, skb);
386		q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
387		q->qdisc->qstats.requeues++;
388		ret = NET_XMIT_SUCCESS;
389	}
390
391	if (ret != NET_XMIT_SUCCESS) {
392		if (net_xmit_drop_count(ret)) {
393			sch->qstats.drops++;
394			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
395		}
 
 
 
396	}
397
398	sch->q.qlen++;
399	return NET_XMIT_SUCCESS;
400}
401
402static unsigned int netem_drop(struct Qdisc *sch)
 
 
 
 
403{
404	struct netem_sched_data *q = qdisc_priv(sch);
405	unsigned int len = 0;
406
407	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
408		sch->q.qlen--;
409		sch->qstats.drops++;
410	}
411	return len;
412}
413
414static struct sk_buff *netem_dequeue(struct Qdisc *sch)
415{
416	struct netem_sched_data *q = qdisc_priv(sch);
417	struct sk_buff *skb;
 
418
419	if (qdisc_is_throttled(sch))
420		return NULL;
 
 
 
 
 
 
 
 
 
 
421
422	skb = q->qdisc->ops->peek(q->qdisc);
423	if (skb) {
424		const struct netem_skb_cb *cb = netem_skb_cb(skb);
425		psched_time_t now = psched_get_time();
426
427		/* if more time remaining? */
428		if (cb->time_to_send <= now) {
429			skb = qdisc_dequeue_peeked(q->qdisc);
430			if (unlikely(!skb))
431				return NULL;
 
 
 
 
 
 
 
 
 
 
432
433#ifdef CONFIG_NET_CLS_ACT
434			/*
435			 * If it's at ingress let's pretend the delay is
436			 * from the network (tstamp will be updated).
437			 */
438			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
439				skb->tstamp.tv64 = 0;
440#endif
441
442			sch->q.qlen--;
443			qdisc_unthrottled(sch);
444			qdisc_bstats_update(sch, skb);
445			return skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
446		}
447
448		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
 
 
449	}
450
 
 
 
 
 
451	return NULL;
452}
453
454static void netem_reset(struct Qdisc *sch)
455{
456	struct netem_sched_data *q = qdisc_priv(sch);
457
458	qdisc_reset(q->qdisc);
459	sch->q.qlen = 0;
 
 
460	qdisc_watchdog_cancel(&q->watchdog);
461}
462
463static void dist_free(struct disttable *d)
464{
465	if (d) {
466		if (is_vmalloc_addr(d))
467			vfree(d);
468		else
469			kfree(d);
470	}
471}
472
473/*
474 * Distribution data is a variable size payload containing
475 * signed 16 bit values.
476 */
 
477static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
478{
479	struct netem_sched_data *q = qdisc_priv(sch);
480	size_t n = nla_len(attr)/sizeof(__s16);
481	const __s16 *data = nla_data(attr);
482	spinlock_t *root_lock;
483	struct disttable *d;
484	int i;
485	size_t s;
486
487	if (n > NETEM_DIST_MAX)
488		return -EINVAL;
489
490	s = sizeof(struct disttable) + n * sizeof(s16);
491	d = kmalloc(s, GFP_KERNEL);
492	if (!d)
493		d = vmalloc(s);
494	if (!d)
495		return -ENOMEM;
496
497	d->size = n;
498	for (i = 0; i < n; i++)
499		d->table[i] = data[i];
500
501	root_lock = qdisc_root_sleeping_lock(sch);
502
503	spin_lock_bh(root_lock);
504	dist_free(q->delay_dist);
505	q->delay_dist = d;
506	spin_unlock_bh(root_lock);
 
 
507	return 0;
508}
509
510static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511{
512	struct netem_sched_data *q = qdisc_priv(sch);
513	const struct tc_netem_corr *c = nla_data(attr);
514
515	init_crandom(&q->delay_cor, c->delay_corr);
516	init_crandom(&q->loss_cor, c->loss_corr);
517	init_crandom(&q->dup_cor, c->dup_corr);
518}
519
520static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
521{
522	struct netem_sched_data *q = qdisc_priv(sch);
523	const struct tc_netem_reorder *r = nla_data(attr);
524
525	q->reorder = r->probability;
526	init_crandom(&q->reorder_cor, r->correlation);
527}
528
529static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
530{
531	struct netem_sched_data *q = qdisc_priv(sch);
532	const struct tc_netem_corrupt *r = nla_data(attr);
533
534	q->corrupt = r->probability;
535	init_crandom(&q->corrupt_cor, r->correlation);
536}
537
538static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
539{
540	struct netem_sched_data *q = qdisc_priv(sch);
541	const struct nlattr *la;
542	int rem;
543
544	nla_for_each_nested(la, attr, rem) {
545		u16 type = nla_type(la);
546
547		switch(type) {
548		case NETEM_LOSS_GI: {
549			const struct tc_netem_gimodel *gi = nla_data(la);
550
551			if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
552				pr_info("netem: incorrect gi model size\n");
553				return -EINVAL;
554			}
555
556			q->loss_model = CLG_4_STATES;
557
558			q->clg.state = 1;
559			q->clg.a1 = gi->p13;
560			q->clg.a2 = gi->p31;
561			q->clg.a3 = gi->p32;
562			q->clg.a4 = gi->p14;
563			q->clg.a5 = gi->p23;
564			break;
565		}
566
567		case NETEM_LOSS_GE: {
568			const struct tc_netem_gemodel *ge = nla_data(la);
569
570			if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
571				pr_info("netem: incorrect gi model size\n");
572				return -EINVAL;
573			}
574
575			q->loss_model = CLG_GILB_ELL;
576			q->clg.state = 1;
577			q->clg.a1 = ge->p;
578			q->clg.a2 = ge->r;
579			q->clg.a3 = ge->h;
580			q->clg.a4 = ge->k1;
581			break;
582		}
583
584		default:
585			pr_info("netem: unknown loss type %u\n", type);
586			return -EINVAL;
587		}
588	}
589
590	return 0;
591}
592
593static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
594	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
595	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
596	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 
597	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 
 
 
 
 
598};
599
600static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
601		      const struct nla_policy *policy, int len)
602{
603	int nested_len = nla_len(nla) - NLA_ALIGN(len);
604
605	if (nested_len < 0) {
606		pr_info("netem: invalid attributes len %d\n", nested_len);
607		return -EINVAL;
608	}
609
610	if (nested_len >= nla_attr_size(0))
611		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
612				 nested_len, policy);
613
614	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
615	return 0;
616}
617
618/* Parse netlink message to set options */
619static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 
620{
621	struct netem_sched_data *q = qdisc_priv(sch);
622	struct nlattr *tb[TCA_NETEM_MAX + 1];
623	struct tc_netem_qopt *qopt;
 
 
624	int ret;
625
626	if (opt == NULL)
627		return -EINVAL;
628
629	qopt = nla_data(opt);
630	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
631	if (ret < 0)
632		return ret;
633
634	ret = fifo_set_limit(q->qdisc, qopt->limit);
635	if (ret) {
636		pr_info("netem: can't set fifo limit\n");
637		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638	}
639
640	q->latency = qopt->latency;
641	q->jitter = qopt->jitter;
 
 
642	q->limit = qopt->limit;
643	q->gap = qopt->gap;
644	q->counter = 0;
645	q->loss = qopt->loss;
646	q->duplicate = qopt->duplicate;
647
648	/* for compatibility with earlier versions.
649	 * if gap is set, need to assume 100% probability
650	 */
651	if (q->gap)
652		q->reorder = ~0;
653
654	if (tb[TCA_NETEM_CORR])
655		get_correlation(sch, tb[TCA_NETEM_CORR]);
656
657	if (tb[TCA_NETEM_DELAY_DIST]) {
658		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
659		if (ret)
660			return ret;
661	}
662
663	if (tb[TCA_NETEM_REORDER])
664		get_reorder(sch, tb[TCA_NETEM_REORDER]);
665
666	if (tb[TCA_NETEM_CORRUPT])
667		get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
668
669	q->loss_model = CLG_RANDOM;
670	if (tb[TCA_NETEM_LOSS])
671		ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
672
673	return ret;
674}
675
676/*
677 * Special case version of FIFO queue for use by netem.
678 * It queues in order based on timestamps in skb's
679 */
680struct fifo_sched_data {
681	u32 limit;
682	psched_time_t oldest;
683};
684
685static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
686{
687	struct fifo_sched_data *q = qdisc_priv(sch);
688	struct sk_buff_head *list = &sch->q;
689	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
690	struct sk_buff *skb;
691
692	if (likely(skb_queue_len(list) < q->limit)) {
693		/* Optimize for add at tail */
694		if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
695			q->oldest = tnext;
696			return qdisc_enqueue_tail(nskb, sch);
697		}
698
699		skb_queue_reverse_walk(list, skb) {
700			const struct netem_skb_cb *cb = netem_skb_cb(skb);
 
701
702			if (tnext >= cb->time_to_send)
703				break;
704		}
705
706		__skb_queue_after(list, skb, nskb);
 
707
708		sch->qstats.backlog += qdisc_pkt_len(nskb);
 
709
710		return NET_XMIT_SUCCESS;
711	}
712
713	return qdisc_reshape_fail(nskb, sch);
714}
715
716static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
717{
718	struct fifo_sched_data *q = qdisc_priv(sch);
719
720	if (opt) {
721		struct tc_fifo_qopt *ctl = nla_data(opt);
722		if (nla_len(opt) < sizeof(*ctl))
723			return -EINVAL;
724
725		q->limit = ctl->limit;
726	} else
727		q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
728
729	q->oldest = PSCHED_PASTPERFECT;
730	return 0;
731}
732
733static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
734{
735	struct fifo_sched_data *q = qdisc_priv(sch);
736	struct tc_fifo_qopt opt = { .limit = q->limit };
737
738	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
739	return skb->len;
740
741nla_put_failure:
742	return -1;
743}
744
745static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
746	.id		=	"tfifo",
747	.priv_size	=	sizeof(struct fifo_sched_data),
748	.enqueue	=	tfifo_enqueue,
749	.dequeue	=	qdisc_dequeue_head,
750	.peek		=	qdisc_peek_head,
751	.drop		=	qdisc_queue_drop,
752	.init		=	tfifo_init,
753	.reset		=	qdisc_reset_queue,
754	.change		=	tfifo_init,
755	.dump		=	tfifo_dump,
756};
757
758static int netem_init(struct Qdisc *sch, struct nlattr *opt)
759{
760	struct netem_sched_data *q = qdisc_priv(sch);
761	int ret;
762
 
 
763	if (!opt)
764		return -EINVAL;
765
766	qdisc_watchdog_init(&q->watchdog, sch);
767
768	q->loss_model = CLG_RANDOM;
769	q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
770				     TC_H_MAKE(sch->handle, 1));
771	if (!q->qdisc) {
772		pr_notice("netem: qdisc create tfifo qdisc failed\n");
773		return -ENOMEM;
774	}
775
776	ret = netem_change(sch, opt);
777	if (ret) {
778		pr_info("netem: change failed\n");
779		qdisc_destroy(q->qdisc);
780	}
781	return ret;
782}
783
784static void netem_destroy(struct Qdisc *sch)
785{
786	struct netem_sched_data *q = qdisc_priv(sch);
787
788	qdisc_watchdog_cancel(&q->watchdog);
789	qdisc_destroy(q->qdisc);
 
790	dist_free(q->delay_dist);
791}
792
793static int dump_loss_model(const struct netem_sched_data *q,
794			   struct sk_buff *skb)
795{
796	struct nlattr *nest;
797
798	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
799	if (nest == NULL)
800		goto nla_put_failure;
801
802	switch (q->loss_model) {
803	case CLG_RANDOM:
804		/* legacy loss model */
805		nla_nest_cancel(skb, nest);
806		return 0;	/* no data */
807
808	case CLG_4_STATES: {
809		struct tc_netem_gimodel gi = {
810			.p13 = q->clg.a1,
811			.p31 = q->clg.a2,
812			.p32 = q->clg.a3,
813			.p14 = q->clg.a4,
814			.p23 = q->clg.a5,
815		};
816
817		NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
 
818		break;
819	}
820	case CLG_GILB_ELL: {
821		struct tc_netem_gemodel ge = {
822			.p = q->clg.a1,
823			.r = q->clg.a2,
824			.h = q->clg.a3,
825			.k1 = q->clg.a4,
826		};
827
828		NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
 
829		break;
830	}
831	}
832
833	nla_nest_end(skb, nest);
834	return 0;
835
836nla_put_failure:
837	nla_nest_cancel(skb, nest);
838	return -1;
839}
840
841static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
842{
843	const struct netem_sched_data *q = qdisc_priv(sch);
844	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
845	struct tc_netem_qopt qopt;
846	struct tc_netem_corr cor;
847	struct tc_netem_reorder reorder;
848	struct tc_netem_corrupt corrupt;
 
 
849
850	qopt.latency = q->latency;
851	qopt.jitter = q->jitter;
 
 
852	qopt.limit = q->limit;
853	qopt.loss = q->loss;
854	qopt.gap = q->gap;
855	qopt.duplicate = q->duplicate;
856	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
 
 
 
 
 
 
 
857
858	cor.delay_corr = q->delay_cor.rho;
859	cor.loss_corr = q->loss_cor.rho;
860	cor.dup_corr = q->dup_cor.rho;
861	NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
 
862
863	reorder.probability = q->reorder;
864	reorder.correlation = q->reorder_cor.rho;
865	NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
 
866
867	corrupt.probability = q->corrupt;
868	corrupt.correlation = q->corrupt_cor.rho;
869	NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
870
871	if (dump_loss_model(q, skb) != 0)
872		goto nla_put_failure;
873
 
 
 
 
 
 
 
 
 
 
874	return nla_nest_end(skb, nla);
875
876nla_put_failure:
877	nlmsg_trim(skb, nla);
878	return -1;
879}
880
881static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
882			  struct sk_buff *skb, struct tcmsg *tcm)
883{
884	struct netem_sched_data *q = qdisc_priv(sch);
885
886	if (cl != 1) 	/* only one class */
887		return -ENOENT;
888
889	tcm->tcm_handle |= TC_H_MIN(1);
890	tcm->tcm_info = q->qdisc->handle;
891
892	return 0;
893}
894
895static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
896		     struct Qdisc **old)
897{
898	struct netem_sched_data *q = qdisc_priv(sch);
899
900	if (new == NULL)
901		new = &noop_qdisc;
902
903	sch_tree_lock(sch);
904	*old = q->qdisc;
905	q->qdisc = new;
906	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
907	qdisc_reset(*old);
908	sch_tree_unlock(sch);
909
910	return 0;
911}
912
913static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
914{
915	struct netem_sched_data *q = qdisc_priv(sch);
916	return q->qdisc;
917}
918
919static unsigned long netem_get(struct Qdisc *sch, u32 classid)
920{
921	return 1;
922}
923
924static void netem_put(struct Qdisc *sch, unsigned long arg)
925{
926}
927
928static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
929{
930	if (!walker->stop) {
931		if (walker->count >= walker->skip)
932			if (walker->fn(sch, 1, walker) < 0) {
933				walker->stop = 1;
934				return;
935			}
936		walker->count++;
937	}
938}
939
940static const struct Qdisc_class_ops netem_class_ops = {
941	.graft		=	netem_graft,
942	.leaf		=	netem_leaf,
943	.get		=	netem_get,
944	.put		=	netem_put,
945	.walk		=	netem_walk,
946	.dump		=	netem_dump_class,
947};
948
949static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
950	.id		=	"netem",
951	.cl_ops		=	&netem_class_ops,
952	.priv_size	=	sizeof(struct netem_sched_data),
953	.enqueue	=	netem_enqueue,
954	.dequeue	=	netem_dequeue,
955	.peek		=	qdisc_peek_dequeued,
956	.drop		=	netem_drop,
957	.init		=	netem_init,
958	.reset		=	netem_reset,
959	.destroy	=	netem_destroy,
960	.change		=	netem_change,
961	.dump		=	netem_dump,
962	.owner		=	THIS_MODULE,
963};
964
965
966static int __init netem_module_init(void)
967{
968	pr_info("netem: version " VERSION "\n");
969	return register_qdisc(&netem_qdisc_ops);
970}
971static void __exit netem_module_exit(void)
972{
973	unregister_qdisc(&netem_qdisc_ops);
974}
975module_init(netem_module_init)
976module_exit(netem_module_exit)
977MODULE_LICENSE("GPL");