Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Martin Devera, <devik@cdi.cz>
  10 *
  11 * Credits (in time order) for older HTB versions:
  12 *              Stef Coene <stef.coene@docum.org>
  13 *			HTB support at LARTC mailing list
  14 *		Ondrej Kraus, <krauso@barr.cz>
  15 *			found missing INIT_QDISC(htb)
  16 *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
  17 *			helped a lot to locate nasty class stall bug
  18 *		Andi Kleen, Jamal Hadi, Bert Hubert
  19 *			code review and helpful comments on shaping
  20 *		Tomasz Wrona, <tw@eter.tym.pl>
  21 *			created test case so that I was able to fix nasty bug
  22 *		Wilfried Weissmann
  23 *			spotted bug in dequeue code and helped with fix
  24 *		Jiri Fojtasek
  25 *			fixed requeue routine
  26 *		and many others. thanks.
  27 */
  28#include <linux/module.h>
  29#include <linux/moduleparam.h>
  30#include <linux/types.h>
  31#include <linux/kernel.h>
  32#include <linux/string.h>
  33#include <linux/errno.h>
  34#include <linux/skbuff.h>
  35#include <linux/list.h>
  36#include <linux/compiler.h>
  37#include <linux/rbtree.h>
  38#include <linux/workqueue.h>
  39#include <linux/slab.h>
  40#include <net/netlink.h>
 
  41#include <net/pkt_sched.h>
 
  42
  43/* HTB algorithm.
  44    Author: devik@cdi.cz
  45    ========================================================================
  46    HTB is like TBF with multiple classes. It is also similar to CBQ because
  47    it allows to assign priority to each class in hierarchy.
  48    In fact it is another implementation of Floyd's formal sharing.
  49
  50    Levels:
  51    Each class is assigned level. Leaf has ALWAYS level 0 and root
  52    classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
  53    one less than their parent.
  54*/
  55
  56static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
  57#define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
  58
  59#if HTB_VER >> 16 != TC_HTB_PROTOVER
  60#error "Mismatched sch_htb.c and pkt_sch.h"
  61#endif
  62
  63/* Module parameter and sysfs export */
  64module_param    (htb_hysteresis, int, 0640);
  65MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
  66
 
 
 
 
  67/* used internaly to keep status of single class */
  68enum htb_cmode {
  69	HTB_CANT_SEND,		/* class can't send and can't borrow */
  70	HTB_MAY_BORROW,		/* class can't send but may borrow */
  71	HTB_CAN_SEND		/* class can send */
  72};
  73
  74/* interior & leaf nodes; props specific to leaves are marked L: */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  75struct htb_class {
  76	struct Qdisc_class_common common;
  77	/* general class parameters */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  78	struct gnet_stats_basic_packed bstats;
  79	struct gnet_stats_queue qstats;
  80	struct gnet_stats_rate_est rate_est;
  81	struct tc_htb_xstats xstats;	/* our special stats */
  82	int refcnt;		/* usage count of this class */
  83
  84	/* topology */
  85	int level;		/* our level (see above) */
  86	unsigned int children;
  87	struct htb_class *parent;	/* parent class */
  88
  89	int prio;		/* these two are used only by leaves... */
  90	int quantum;		/* but stored for parent-to-leaf return */
 
  91
  92	union {
  93		struct htb_class_leaf {
  94			struct Qdisc *q;
  95			int deficit[TC_HTB_MAXDEPTH];
  96			struct list_head drop_list;
 
 
  97		} leaf;
  98		struct htb_class_inner {
  99			struct rb_root feed[TC_HTB_NUMPRIO];	/* feed trees */
 100			struct rb_node *ptr[TC_HTB_NUMPRIO];	/* current class ptr */
 101			/* When class changes from state 1->2 and disconnects from
 102			 * parent's feed then we lost ptr value and start from the
 103			 * first child again. Here we store classid of the
 104			 * last valid ptr (used when ptr is NULL).
 105			 */
 106			u32 last_ptr_id[TC_HTB_NUMPRIO];
 107		} inner;
 108	} un;
 109	struct rb_node node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
 110	struct rb_node pq_node;	/* node for event queue */
 111	psched_time_t pq_key;
 112
 113	int prio_activity;	/* for which prios are we active */
 114	enum htb_cmode cmode;	/* current mode of the class */
 115
 116	/* class attached filters */
 117	struct tcf_proto *filter_list;
 118	int filter_cnt;
 119
 120	/* token bucket parameters */
 121	struct qdisc_rate_table *rate;	/* rate table of the class itself */
 122	struct qdisc_rate_table *ceil;	/* ceiling rate (limits borrows too) */
 123	long buffer, cbuffer;	/* token bucket depth/rate */
 124	psched_tdiff_t mbuffer;	/* max wait time */
 125	long tokens, ctokens;	/* current number of tokens */
 126	psched_time_t t_c;	/* checkpoint time */
 
 
 
 
 
 127};
 128
 129struct htb_sched {
 130	struct Qdisc_class_hash clhash;
 131	struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
 132
 133	/* self list - roots of self generating tree */
 134	struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 135	int row_mask[TC_HTB_MAXDEPTH];
 136	struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 137	u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 138
 139	/* self wait list - roots of wait PQs per row */
 140	struct rb_root wait_pq[TC_HTB_MAXDEPTH];
 
 141
 142	/* time of nearest event per level (row) */
 143	psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
 
 
 144
 145	int defcls;		/* class where unclassified flows go to */
 
 
 146
 147	/* filters for qdisc itself */
 148	struct tcf_proto *filter_list;
 149
 150	int rate2quantum;	/* quant = rate / rate2quantum */
 151	psched_time_t now;	/* cached dequeue time */
 152	struct qdisc_watchdog watchdog;
 153
 154	/* non shaped skbs; let them go directly thru */
 155	struct sk_buff_head direct_queue;
 156	int direct_qlen;	/* max qlen of above */
 157
 158	long direct_pkts;
 159
 160#define HTB_WARN_TOOMANYEVENTS	0x1
 161	unsigned int warned;	/* only one warning */
 162	struct work_struct work;
 163};
 164
 165/* find class in global hash table using given handle */
 166static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 167{
 168	struct htb_sched *q = qdisc_priv(sch);
 169	struct Qdisc_class_common *clc;
 170
 171	clc = qdisc_class_find(&q->clhash, handle);
 172	if (clc == NULL)
 173		return NULL;
 174	return container_of(clc, struct htb_class, common);
 175}
 176
 
 
 
 
 177/**
 178 * htb_classify - classify a packet into class
 179 *
 180 * It returns NULL if the packet should be dropped or -1 if the packet
 181 * should be passed directly thru. In all other cases leaf class is returned.
 182 * We allow direct class selection by classid in priority. The we examine
 183 * filters in qdisc and in inner nodes (if higher filter points to the inner
 184 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
 185 * internal fifo (direct). These packets then go directly thru. If we still
 186 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
 187 * then finish and return direct queue.
 188 */
 189#define HTB_DIRECT ((struct htb_class *)-1L)
 190
 191static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
 192				      int *qerr)
 193{
 194	struct htb_sched *q = qdisc_priv(sch);
 195	struct htb_class *cl;
 196	struct tcf_result res;
 197	struct tcf_proto *tcf;
 198	int result;
 199
 200	/* allow to select class by setting skb->priority to valid classid;
 201	 * note that nfmark can be used too by attaching filter fw with no
 202	 * rules in it
 203	 */
 204	if (skb->priority == sch->handle)
 205		return HTB_DIRECT;	/* X:0 (direct flow) selected */
 206	cl = htb_find(skb->priority, sch);
 207	if (cl && cl->level == 0)
 208		return cl;
 
 
 
 
 
 
 209
 210	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 211	tcf = q->filter_list;
 212	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
 213#ifdef CONFIG_NET_CLS_ACT
 214		switch (result) {
 215		case TC_ACT_QUEUED:
 216		case TC_ACT_STOLEN:
 
 217			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
 218		case TC_ACT_SHOT:
 219			return NULL;
 220		}
 221#endif
 222		cl = (void *)res.class;
 223		if (!cl) {
 224			if (res.classid == sch->handle)
 225				return HTB_DIRECT;	/* X:0 (direct flow) */
 226			cl = htb_find(res.classid, sch);
 227			if (!cl)
 228				break;	/* filter selected invalid classid */
 229		}
 230		if (!cl->level)
 231			return cl;	/* we hit leaf; return it */
 232
 233		/* we have got inner class; apply inner filter chain */
 234		tcf = cl->filter_list;
 235	}
 236	/* classification failed; try to use default class */
 237	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
 238	if (!cl || cl->level)
 239		return HTB_DIRECT;	/* bad default .. this is safe bet */
 240	return cl;
 241}
 242
 243/**
 244 * htb_add_to_id_tree - adds class to the round robin list
 245 *
 246 * Routine adds class to the list (actually tree) sorted by classid.
 247 * Make sure that class is not already on such list for given prio.
 248 */
 249static void htb_add_to_id_tree(struct rb_root *root,
 250			       struct htb_class *cl, int prio)
 251{
 252	struct rb_node **p = &root->rb_node, *parent = NULL;
 253
 254	while (*p) {
 255		struct htb_class *c;
 256		parent = *p;
 257		c = rb_entry(parent, struct htb_class, node[prio]);
 258
 259		if (cl->common.classid > c->common.classid)
 260			p = &parent->rb_right;
 261		else
 262			p = &parent->rb_left;
 263	}
 264	rb_link_node(&cl->node[prio], parent, p);
 265	rb_insert_color(&cl->node[prio], root);
 266}
 267
 268/**
 269 * htb_add_to_wait_tree - adds class to the event queue with delay
 270 *
 271 * The class is added to priority event queue to indicate that class will
 272 * change its mode in cl->pq_key microseconds. Make sure that class is not
 273 * already in the queue.
 274 */
 275static void htb_add_to_wait_tree(struct htb_sched *q,
 276				 struct htb_class *cl, long delay)
 277{
 278	struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
 279
 280	cl->pq_key = q->now + delay;
 281	if (cl->pq_key == q->now)
 282		cl->pq_key++;
 283
 284	/* update the nearest event cache */
 285	if (q->near_ev_cache[cl->level] > cl->pq_key)
 286		q->near_ev_cache[cl->level] = cl->pq_key;
 287
 288	while (*p) {
 289		struct htb_class *c;
 290		parent = *p;
 291		c = rb_entry(parent, struct htb_class, pq_node);
 292		if (cl->pq_key >= c->pq_key)
 293			p = &parent->rb_right;
 294		else
 295			p = &parent->rb_left;
 296	}
 297	rb_link_node(&cl->pq_node, parent, p);
 298	rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
 299}
 300
 301/**
 302 * htb_next_rb_node - finds next node in binary tree
 303 *
 304 * When we are past last key we return NULL.
 305 * Average complexity is 2 steps per call.
 306 */
 307static inline void htb_next_rb_node(struct rb_node **n)
 308{
 309	*n = rb_next(*n);
 310}
 311
 312/**
 313 * htb_add_class_to_row - add class to its row
 314 *
 315 * The class is added to row at priorities marked in mask.
 316 * It does nothing if mask == 0.
 317 */
 318static inline void htb_add_class_to_row(struct htb_sched *q,
 319					struct htb_class *cl, int mask)
 320{
 321	q->row_mask[cl->level] |= mask;
 322	while (mask) {
 323		int prio = ffz(~mask);
 324		mask &= ~(1 << prio);
 325		htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
 326	}
 327}
 328
 329/* If this triggers, it is a bug in this code, but it need not be fatal */
 330static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
 331{
 332	if (RB_EMPTY_NODE(rb)) {
 333		WARN_ON(1);
 334	} else {
 335		rb_erase(rb, root);
 336		RB_CLEAR_NODE(rb);
 337	}
 338}
 339
 340
 341/**
 342 * htb_remove_class_from_row - removes class from its row
 343 *
 344 * The class is removed from row at priorities marked in mask.
 345 * It does nothing if mask == 0.
 346 */
 347static inline void htb_remove_class_from_row(struct htb_sched *q,
 348						 struct htb_class *cl, int mask)
 349{
 350	int m = 0;
 
 351
 352	while (mask) {
 353		int prio = ffz(~mask);
 
 354
 355		mask &= ~(1 << prio);
 356		if (q->ptr[cl->level][prio] == cl->node + prio)
 357			htb_next_rb_node(q->ptr[cl->level] + prio);
 358
 359		htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
 360		if (!q->row[cl->level][prio].rb_node)
 361			m |= 1 << prio;
 362	}
 363	q->row_mask[cl->level] &= ~m;
 364}
 365
 366/**
 367 * htb_activate_prios - creates active classe's feed chain
 368 *
 369 * The class is connected to ancestors and/or appropriate rows
 370 * for priorities it is participating on. cl->cmode must be new
 371 * (activated) mode. It does nothing if cl->prio_activity == 0.
 372 */
 373static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 374{
 375	struct htb_class *p = cl->parent;
 376	long m, mask = cl->prio_activity;
 377
 378	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 379		m = mask;
 380		while (m) {
 381			int prio = ffz(~m);
 382			m &= ~(1 << prio);
 383
 384			if (p->un.inner.feed[prio].rb_node)
 385				/* parent already has its feed in use so that
 386				 * reset bit in mask as parent is already ok
 387				 */
 388				mask &= ~(1 << prio);
 389
 390			htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
 391		}
 392		p->prio_activity |= mask;
 393		cl = p;
 394		p = cl->parent;
 395
 396	}
 397	if (cl->cmode == HTB_CAN_SEND && mask)
 398		htb_add_class_to_row(q, cl, mask);
 399}
 400
 401/**
 402 * htb_deactivate_prios - remove class from feed chain
 403 *
 404 * cl->cmode must represent old mode (before deactivation). It does
 405 * nothing if cl->prio_activity == 0. Class is removed from all feed
 406 * chains and rows.
 407 */
 408static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 409{
 410	struct htb_class *p = cl->parent;
 411	long m, mask = cl->prio_activity;
 412
 413	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 414		m = mask;
 415		mask = 0;
 416		while (m) {
 417			int prio = ffz(~m);
 418			m &= ~(1 << prio);
 419
 420			if (p->un.inner.ptr[prio] == cl->node + prio) {
 421				/* we are removing child which is pointed to from
 422				 * parent feed - forget the pointer but remember
 423				 * classid
 424				 */
 425				p->un.inner.last_ptr_id[prio] = cl->common.classid;
 426				p->un.inner.ptr[prio] = NULL;
 427			}
 428
 429			htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
 
 430
 431			if (!p->un.inner.feed[prio].rb_node)
 432				mask |= 1 << prio;
 433		}
 434
 435		p->prio_activity &= ~mask;
 436		cl = p;
 437		p = cl->parent;
 438
 439	}
 440	if (cl->cmode == HTB_CAN_SEND && mask)
 441		htb_remove_class_from_row(q, cl, mask);
 442}
 443
 444static inline long htb_lowater(const struct htb_class *cl)
 445{
 446	if (htb_hysteresis)
 447		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
 448	else
 449		return 0;
 450}
 451static inline long htb_hiwater(const struct htb_class *cl)
 452{
 453	if (htb_hysteresis)
 454		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
 455	else
 456		return 0;
 457}
 458
 459
 460/**
 461 * htb_class_mode - computes and returns current class mode
 462 *
 463 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
 464 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
 465 * from now to time when cl will change its state.
 466 * Also it is worth to note that class mode doesn't change simply
 467 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
 468 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
 469 * mode transitions per time unit. The speed gain is about 1/6.
 470 */
 471static inline enum htb_cmode
 472htb_class_mode(struct htb_class *cl, long *diff)
 473{
 474	long toks;
 475
 476	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
 477		*diff = -toks;
 478		return HTB_CANT_SEND;
 479	}
 480
 481	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
 482		return HTB_CAN_SEND;
 483
 484	*diff = -toks;
 485	return HTB_MAY_BORROW;
 486}
 487
 488/**
 489 * htb_change_class_mode - changes classe's mode
 490 *
 491 * This should be the only way how to change classe's mode under normal
 492 * cirsumstances. Routine will update feed lists linkage, change mode
 493 * and add class to the wait event queue if appropriate. New mode should
 494 * be different from old one and cl->pq_key has to be valid if changing
 495 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
 496 */
 497static void
 498htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
 499{
 500	enum htb_cmode new_mode = htb_class_mode(cl, diff);
 501
 502	if (new_mode == cl->cmode)
 503		return;
 504
 
 
 
 505	if (cl->prio_activity) {	/* not necessary: speed optimization */
 506		if (cl->cmode != HTB_CANT_SEND)
 507			htb_deactivate_prios(q, cl);
 508		cl->cmode = new_mode;
 509		if (new_mode != HTB_CANT_SEND)
 510			htb_activate_prios(q, cl);
 511	} else
 512		cl->cmode = new_mode;
 513}
 514
 515/**
 516 * htb_activate - inserts leaf cl into appropriate active feeds
 517 *
 518 * Routine learns (new) priority of leaf and activates feed chain
 519 * for the prio. It can be called on already active leaf safely.
 520 * It also adds leaf into droplist.
 521 */
 522static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
 523{
 524	WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
 525
 526	if (!cl->prio_activity) {
 527		cl->prio_activity = 1 << cl->prio;
 528		htb_activate_prios(q, cl);
 529		list_add_tail(&cl->un.leaf.drop_list,
 530			      q->drops + cl->prio);
 531	}
 532}
 533
 534/**
 535 * htb_deactivate - remove leaf cl from active feeds
 536 *
 537 * Make sure that leaf is active. In the other words it can't be called
 538 * with non-active leaf. It also removes class from the drop list.
 539 */
 540static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 541{
 542	WARN_ON(!cl->prio_activity);
 543
 544	htb_deactivate_prios(q, cl);
 545	cl->prio_activity = 0;
 546	list_del_init(&cl->un.leaf.drop_list);
 547}
 548
 549static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550{
 551	int uninitialized_var(ret);
 552	struct htb_sched *q = qdisc_priv(sch);
 553	struct htb_class *cl = htb_classify(skb, sch, &ret);
 554
 555	if (cl == HTB_DIRECT) {
 556		/* enqueue to helper queue */
 557		if (q->direct_queue.qlen < q->direct_qlen) {
 558			__skb_queue_tail(&q->direct_queue, skb);
 559			q->direct_pkts++;
 560		} else {
 561			return qdisc_drop(skb, sch);
 562		}
 563#ifdef CONFIG_NET_CLS_ACT
 564	} else if (!cl) {
 565		if (ret & __NET_XMIT_BYPASS)
 566			sch->qstats.drops++;
 567		kfree_skb(skb);
 568		return ret;
 569#endif
 570	} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
 
 571		if (net_xmit_drop_count(ret)) {
 572			sch->qstats.drops++;
 573			cl->qstats.drops++;
 574		}
 575		return ret;
 576	} else {
 577		htb_activate(q, cl);
 578	}
 579
 
 580	sch->q.qlen++;
 581	return NET_XMIT_SUCCESS;
 582}
 583
 584static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
 585{
 586	long toks = diff + cl->tokens;
 587
 588	if (toks > cl->buffer)
 589		toks = cl->buffer;
 590	toks -= (long) qdisc_l2t(cl->rate, bytes);
 591	if (toks <= -cl->mbuffer)
 592		toks = 1 - cl->mbuffer;
 593
 594	cl->tokens = toks;
 595}
 596
 597static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
 598{
 599	long toks = diff + cl->ctokens;
 600
 601	if (toks > cl->cbuffer)
 602		toks = cl->cbuffer;
 603	toks -= (long) qdisc_l2t(cl->ceil, bytes);
 604	if (toks <= -cl->mbuffer)
 605		toks = 1 - cl->mbuffer;
 606
 607	cl->ctokens = toks;
 608}
 609
 610/**
 611 * htb_charge_class - charges amount "bytes" to leaf and ancestors
 612 *
 613 * Routine assumes that packet "bytes" long was dequeued from leaf cl
 614 * borrowing from "level". It accounts bytes to ceil leaky bucket for
 615 * leaf and all ancestors and to rate bucket for ancestors at levels
 616 * "level" and higher. It also handles possible change of mode resulting
 617 * from the update. Note that mode can also increase here (MAY_BORROW to
 618 * CAN_SEND) because we can use more precise clock that event queue here.
 619 * In such case we remove class from event queue first.
 620 */
 621static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
 622			     int level, struct sk_buff *skb)
 623{
 624	int bytes = qdisc_pkt_len(skb);
 625	enum htb_cmode old_mode;
 626	long diff;
 627
 628	while (cl) {
 629		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
 630		if (cl->level >= level) {
 631			if (cl->level == level)
 632				cl->xstats.lends++;
 633			htb_accnt_tokens(cl, bytes, diff);
 634		} else {
 635			cl->xstats.borrows++;
 636			cl->tokens += diff;	/* we moved t_c; update tokens */
 637		}
 638		htb_accnt_ctokens(cl, bytes, diff);
 639		cl->t_c = q->now;
 640
 641		old_mode = cl->cmode;
 642		diff = 0;
 643		htb_change_class_mode(q, cl, &diff);
 644		if (old_mode != cl->cmode) {
 645			if (old_mode != HTB_CAN_SEND)
 646				htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
 647			if (cl->cmode != HTB_CAN_SEND)
 648				htb_add_to_wait_tree(q, cl, diff);
 649		}
 650
 651		/* update basic stats except for leaves which are already updated */
 652		if (cl->level)
 653			bstats_update(&cl->bstats, skb);
 654
 655		cl = cl->parent;
 656	}
 657}
 658
 659/**
 660 * htb_do_events - make mode changes to classes at the level
 661 *
 662 * Scans event queue for pending events and applies them. Returns time of
 663 * next pending event (0 for no event in pq, q->now for too many events).
 664 * Note: Applied are events whose have cl->pq_key <= q->now.
 665 */
 666static psched_time_t htb_do_events(struct htb_sched *q, int level,
 667				   unsigned long start)
 668{
 669	/* don't run for longer than 2 jiffies; 2 is used instead of
 670	 * 1 to simplify things when jiffy is going to be incremented
 671	 * too soon
 672	 */
 673	unsigned long stop_at = start + 2;
 
 
 674	while (time_before(jiffies, stop_at)) {
 675		struct htb_class *cl;
 676		long diff;
 677		struct rb_node *p = rb_first(&q->wait_pq[level]);
 678
 679		if (!p)
 680			return 0;
 681
 682		cl = rb_entry(p, struct htb_class, pq_node);
 683		if (cl->pq_key > q->now)
 684			return cl->pq_key;
 685
 686		htb_safe_rb_erase(p, q->wait_pq + level);
 687		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
 688		htb_change_class_mode(q, cl, &diff);
 689		if (cl->cmode != HTB_CAN_SEND)
 690			htb_add_to_wait_tree(q, cl, diff);
 691	}
 692
 693	/* too much load - let's continue after a break for scheduling */
 694	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
 695		pr_warning("htb: too many events!\n");
 696		q->warned |= HTB_WARN_TOOMANYEVENTS;
 697	}
 698
 699	return q->now;
 700}
 701
 702/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
 703 * is no such one exists.
 704 */
 705static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
 706					      u32 id)
 707{
 708	struct rb_node *r = NULL;
 709	while (n) {
 710		struct htb_class *cl =
 711		    rb_entry(n, struct htb_class, node[prio]);
 712
 713		if (id > cl->common.classid) {
 714			n = n->rb_right;
 715		} else if (id < cl->common.classid) {
 716			r = n;
 717			n = n->rb_left;
 718		} else {
 719			return n;
 720		}
 721	}
 722	return r;
 723}
 724
 725/**
 726 * htb_lookup_leaf - returns next leaf class in DRR order
 727 *
 728 * Find leaf where current feed pointers points to.
 729 */
 730static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
 731					 struct rb_node **pptr, u32 * pid)
 732{
 733	int i;
 734	struct {
 735		struct rb_node *root;
 736		struct rb_node **pptr;
 737		u32 *pid;
 738	} stk[TC_HTB_MAXDEPTH], *sp = stk;
 739
 740	BUG_ON(!tree->rb_node);
 741	sp->root = tree->rb_node;
 742	sp->pptr = pptr;
 743	sp->pid = pid;
 744
 745	for (i = 0; i < 65535; i++) {
 746		if (!*sp->pptr && *sp->pid) {
 747			/* ptr was invalidated but id is valid - try to recover
 748			 * the original or next ptr
 749			 */
 750			*sp->pptr =
 751			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
 752		}
 753		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
 754				 * can become out of date quickly
 755				 */
 756		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
 757			*sp->pptr = sp->root;
 758			while ((*sp->pptr)->rb_left)
 759				*sp->pptr = (*sp->pptr)->rb_left;
 760			if (sp > stk) {
 761				sp--;
 762				if (!*sp->pptr) {
 763					WARN_ON(1);
 764					return NULL;
 765				}
 766				htb_next_rb_node(sp->pptr);
 767			}
 768		} else {
 769			struct htb_class *cl;
 
 
 770			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
 771			if (!cl->level)
 772				return cl;
 773			(++sp)->root = cl->un.inner.feed[prio].rb_node;
 774			sp->pptr = cl->un.inner.ptr + prio;
 775			sp->pid = cl->un.inner.last_ptr_id + prio;
 
 776		}
 777	}
 778	WARN_ON(1);
 779	return NULL;
 780}
 781
 782/* dequeues packet at given priority and level; call only if
 783 * you are sure that there is active class at prio/level
 784 */
 785static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
 786					int level)
 787{
 788	struct sk_buff *skb = NULL;
 789	struct htb_class *cl, *start;
 
 
 
 790	/* look initial class up in the row */
 791	start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
 792				     q->ptr[level] + prio,
 793				     q->last_ptr_id[level] + prio);
 794
 795	do {
 796next:
 797		if (unlikely(!cl))
 798			return NULL;
 799
 800		/* class can be empty - it is unlikely but can be true if leaf
 801		 * qdisc drops packets in enqueue routine or if someone used
 802		 * graft operation on the leaf since last dequeue;
 803		 * simply deactivate and skip such class
 804		 */
 805		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
 806			struct htb_class *next;
 807			htb_deactivate(q, cl);
 808
 809			/* row/level might become empty */
 810			if ((q->row_mask[level] & (1 << prio)) == 0)
 811				return NULL;
 812
 813			next = htb_lookup_leaf(q->row[level] + prio,
 814					       prio, q->ptr[level] + prio,
 815					       q->last_ptr_id[level] + prio);
 816
 817			if (cl == start)	/* fix start if we just deleted it */
 818				start = next;
 819			cl = next;
 820			goto next;
 821		}
 822
 823		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
 824		if (likely(skb != NULL))
 825			break;
 826
 827		qdisc_warn_nonwc("htb", cl->un.leaf.q);
 828		htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
 829				  ptr[0]) + prio);
 830		cl = htb_lookup_leaf(q->row[level] + prio, prio,
 831				     q->ptr[level] + prio,
 832				     q->last_ptr_id[level] + prio);
 833
 834	} while (cl != start);
 835
 836	if (likely(skb != NULL)) {
 837		bstats_update(&cl->bstats, skb);
 838		cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
 839		if (cl->un.leaf.deficit[level] < 0) {
 840			cl->un.leaf.deficit[level] += cl->quantum;
 841			htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
 842					  ptr[0]) + prio);
 843		}
 844		/* this used to be after charge_class but this constelation
 845		 * gives us slightly better performance
 846		 */
 847		if (!cl->un.leaf.q->q.qlen)
 848			htb_deactivate(q, cl);
 849		htb_charge_class(q, cl, level, skb);
 850	}
 851	return skb;
 852}
 853
 854static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 855{
 856	struct sk_buff *skb;
 857	struct htb_sched *q = qdisc_priv(sch);
 858	int level;
 859	psched_time_t next_event;
 860	unsigned long start_at;
 861
 862	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 863	skb = __skb_dequeue(&q->direct_queue);
 864	if (skb != NULL) {
 865ok:
 866		qdisc_bstats_update(sch, skb);
 867		qdisc_unthrottled(sch);
 868		sch->q.qlen--;
 869		return skb;
 870	}
 871
 872	if (!sch->q.qlen)
 873		goto fin;
 874	q->now = psched_get_time();
 875	start_at = jiffies;
 876
 877	next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
 878
 879	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
 880		/* common case optimization - skip event handler quickly */
 881		int m;
 882		psched_time_t event;
 883
 884		if (q->now >= q->near_ev_cache[level]) {
 885			event = htb_do_events(q, level, start_at);
 886			if (!event)
 887				event = q->now + PSCHED_TICKS_PER_SEC;
 888			q->near_ev_cache[level] = event;
 889		} else
 890			event = q->near_ev_cache[level];
 891
 892		if (next_event > event)
 893			next_event = event;
 894
 895		m = ~q->row_mask[level];
 896		while (m != (int)(-1)) {
 897			int prio = ffz(m);
 898
 899			m |= 1 << prio;
 900			skb = htb_dequeue_tree(q, prio, level);
 901			if (likely(skb != NULL))
 902				goto ok;
 903		}
 904	}
 905	sch->qstats.overlimits++;
 906	if (likely(next_event > q->now))
 907		qdisc_watchdog_schedule(&q->watchdog, next_event);
 908	else
 909		schedule_work(&q->work);
 910fin:
 911	return skb;
 912}
 913
 914/* try to drop from each class (by prio) until one succeed */
 915static unsigned int htb_drop(struct Qdisc *sch)
 916{
 917	struct htb_sched *q = qdisc_priv(sch);
 918	int prio;
 919
 920	for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
 921		struct list_head *p;
 922		list_for_each(p, q->drops + prio) {
 923			struct htb_class *cl = list_entry(p, struct htb_class,
 924							  un.leaf.drop_list);
 925			unsigned int len;
 926			if (cl->un.leaf.q->ops->drop &&
 927			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
 928				sch->q.qlen--;
 929				if (!cl->un.leaf.q->q.qlen)
 930					htb_deactivate(q, cl);
 931				return len;
 932			}
 933		}
 934	}
 935	return 0;
 936}
 937
 938/* reset all classes */
 939/* always caled under BH & queue lock */
 940static void htb_reset(struct Qdisc *sch)
 941{
 942	struct htb_sched *q = qdisc_priv(sch);
 943	struct htb_class *cl;
 944	struct hlist_node *n;
 945	unsigned int i;
 946
 947	for (i = 0; i < q->clhash.hashsize; i++) {
 948		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
 949			if (cl->level)
 950				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
 951			else {
 952				if (cl->un.leaf.q)
 953					qdisc_reset(cl->un.leaf.q);
 954				INIT_LIST_HEAD(&cl->un.leaf.drop_list);
 955			}
 956			cl->prio_activity = 0;
 957			cl->cmode = HTB_CAN_SEND;
 958
 959		}
 960	}
 961	qdisc_watchdog_cancel(&q->watchdog);
 962	__skb_queue_purge(&q->direct_queue);
 963	sch->q.qlen = 0;
 964	memset(q->row, 0, sizeof(q->row));
 
 965	memset(q->row_mask, 0, sizeof(q->row_mask));
 966	memset(q->wait_pq, 0, sizeof(q->wait_pq));
 967	memset(q->ptr, 0, sizeof(q->ptr));
 968	for (i = 0; i < TC_HTB_NUMPRIO; i++)
 969		INIT_LIST_HEAD(q->drops + i);
 970}
 971
 972static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
 973	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
 974	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
 975	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 976	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 
 
 
 977};
 978
 979static void htb_work_func(struct work_struct *work)
 980{
 981	struct htb_sched *q = container_of(work, struct htb_sched, work);
 982	struct Qdisc *sch = q->watchdog.qdisc;
 983
 
 984	__netif_schedule(qdisc_root(sch));
 
 985}
 986
 987static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 
 988{
 989	struct htb_sched *q = qdisc_priv(sch);
 990	struct nlattr *tb[TCA_HTB_INIT + 1];
 991	struct tc_htb_glob *gopt;
 992	int err;
 993	int i;
 994
 
 
 
 995	if (!opt)
 996		return -EINVAL;
 997
 998	err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
 
 
 
 
 999	if (err < 0)
1000		return err;
1001
1002	if (tb[TCA_HTB_INIT] == NULL) {
1003		pr_err("HTB: hey probably you have bad tc tool ?\n");
1004		return -EINVAL;
1005	}
1006	gopt = nla_data(tb[TCA_HTB_INIT]);
1007	if (gopt->version != HTB_VER >> 16) {
1008		pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
1009		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1010		return -EINVAL;
1011	}
1012
1013	err = qdisc_class_hash_init(&q->clhash);
1014	if (err < 0)
1015		return err;
1016	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1017		INIT_LIST_HEAD(q->drops + i);
1018
1019	qdisc_watchdog_init(&q->watchdog, sch);
1020	INIT_WORK(&q->work, htb_work_func);
1021	skb_queue_head_init(&q->direct_queue);
1022
1023	q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1024	if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
1025		q->direct_qlen = 2;
 
1026
1027	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1028		q->rate2quantum = 1;
1029	q->defcls = gopt->defcls;
1030
1031	return 0;
1032}
1033
1034static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1035{
1036	spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1037	struct htb_sched *q = qdisc_priv(sch);
1038	struct nlattr *nest;
1039	struct tc_htb_glob gopt;
1040
1041	spin_lock_bh(root_lock);
 
 
1042
1043	gopt.direct_pkts = q->direct_pkts;
1044	gopt.version = HTB_VER;
1045	gopt.rate2quantum = q->rate2quantum;
1046	gopt.defcls = q->defcls;
1047	gopt.debug = 0;
1048
1049	nest = nla_nest_start(skb, TCA_OPTIONS);
1050	if (nest == NULL)
1051		goto nla_put_failure;
1052	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
 
1053		goto nla_put_failure;
1054	nla_nest_end(skb, nest);
1055
1056	spin_unlock_bh(root_lock);
1057	return skb->len;
1058
1059nla_put_failure:
1060	spin_unlock_bh(root_lock);
1061	nla_nest_cancel(skb, nest);
1062	return -1;
1063}
1064
1065static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1066			  struct sk_buff *skb, struct tcmsg *tcm)
1067{
1068	struct htb_class *cl = (struct htb_class *)arg;
1069	spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1070	struct nlattr *nest;
1071	struct tc_htb_opt opt;
1072
1073	spin_lock_bh(root_lock);
 
 
1074	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1075	tcm->tcm_handle = cl->common.classid;
1076	if (!cl->level && cl->un.leaf.q)
1077		tcm->tcm_info = cl->un.leaf.q->handle;
1078
1079	nest = nla_nest_start(skb, TCA_OPTIONS);
1080	if (nest == NULL)
1081		goto nla_put_failure;
1082
1083	memset(&opt, 0, sizeof(opt));
1084
1085	opt.rate = cl->rate->rate;
1086	opt.buffer = cl->buffer;
1087	opt.ceil = cl->ceil->rate;
1088	opt.cbuffer = cl->cbuffer;
1089	opt.quantum = cl->quantum;
1090	opt.prio = cl->prio;
1091	opt.level = cl->level;
1092	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1093		goto nla_put_failure;
 
 
 
 
 
 
 
 
1094
1095	nla_nest_end(skb, nest);
1096	spin_unlock_bh(root_lock);
1097	return skb->len;
1098
1099nla_put_failure:
1100	spin_unlock_bh(root_lock);
1101	nla_nest_cancel(skb, nest);
1102	return -1;
1103}
1104
1105static int
1106htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1107{
1108	struct htb_class *cl = (struct htb_class *)arg;
 
 
 
 
 
1109
1110	if (!cl->level && cl->un.leaf.q)
1111		cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1112	cl->xstats.tokens = cl->tokens;
1113	cl->xstats.ctokens = cl->ctokens;
1114
1115	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1116	    gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1117	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
 
 
 
 
 
1118		return -1;
1119
1120	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1121}
1122
1123static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1124		     struct Qdisc **old)
1125{
1126	struct htb_class *cl = (struct htb_class *)arg;
1127
1128	if (cl->level)
1129		return -EINVAL;
1130	if (new == NULL &&
1131	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1132				     cl->common.classid)) == NULL)
1133		return -ENOBUFS;
1134
1135	sch_tree_lock(sch);
1136	*old = cl->un.leaf.q;
1137	cl->un.leaf.q = new;
1138	if (*old != NULL) {
1139		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1140		qdisc_reset(*old);
1141	}
1142	sch_tree_unlock(sch);
1143	return 0;
1144}
1145
1146static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1147{
1148	struct htb_class *cl = (struct htb_class *)arg;
1149	return !cl->level ? cl->un.leaf.q : NULL;
1150}
1151
1152static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1153{
1154	struct htb_class *cl = (struct htb_class *)arg;
1155
1156	if (cl->un.leaf.q->q.qlen == 0)
1157		htb_deactivate(qdisc_priv(sch), cl);
1158}
1159
1160static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1161{
1162	struct htb_class *cl = htb_find(classid, sch);
1163	if (cl)
1164		cl->refcnt++;
1165	return (unsigned long)cl;
1166}
1167
1168static inline int htb_parent_last_child(struct htb_class *cl)
1169{
1170	if (!cl->parent)
1171		/* the root class */
1172		return 0;
1173	if (cl->parent->children > 1)
1174		/* not the last child */
1175		return 0;
1176	return 1;
1177}
1178
1179static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1180			       struct Qdisc *new_q)
1181{
1182	struct htb_class *parent = cl->parent;
1183
1184	WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1185
1186	if (parent->cmode != HTB_CAN_SEND)
1187		htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
 
1188
1189	parent->level = 0;
1190	memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1191	INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1192	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1193	parent->tokens = parent->buffer;
1194	parent->ctokens = parent->cbuffer;
1195	parent->t_c = psched_get_time();
1196	parent->cmode = HTB_CAN_SEND;
1197}
1198
1199static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1200{
1201	if (!cl->level) {
1202		WARN_ON(!cl->un.leaf.q);
1203		qdisc_destroy(cl->un.leaf.q);
1204	}
1205	gen_kill_estimator(&cl->bstats, &cl->rate_est);
1206	qdisc_put_rtab(cl->rate);
1207	qdisc_put_rtab(cl->ceil);
1208
1209	tcf_destroy_chain(&cl->filter_list);
1210	kfree(cl);
1211}
1212
1213static void htb_destroy(struct Qdisc *sch)
1214{
1215	struct htb_sched *q = qdisc_priv(sch);
1216	struct hlist_node *n, *next;
1217	struct htb_class *cl;
1218	unsigned int i;
1219
1220	cancel_work_sync(&q->work);
1221	qdisc_watchdog_cancel(&q->watchdog);
1222	/* This line used to be after htb_destroy_class call below
1223	 * and surprisingly it worked in 2.4. But it must precede it
1224	 * because filter need its target class alive to be able to call
1225	 * unbind_filter on it (without Oops).
1226	 */
1227	tcf_destroy_chain(&q->filter_list);
1228
1229	for (i = 0; i < q->clhash.hashsize; i++) {
1230		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1231			tcf_destroy_chain(&cl->filter_list);
 
 
1232	}
1233	for (i = 0; i < q->clhash.hashsize; i++) {
1234		hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1235					  common.hnode)
1236			htb_destroy_class(sch, cl);
1237	}
1238	qdisc_class_hash_destroy(&q->clhash);
1239	__skb_queue_purge(&q->direct_queue);
1240}
1241
1242static int htb_delete(struct Qdisc *sch, unsigned long arg)
1243{
1244	struct htb_sched *q = qdisc_priv(sch);
1245	struct htb_class *cl = (struct htb_class *)arg;
1246	unsigned int qlen;
1247	struct Qdisc *new_q = NULL;
1248	int last_child = 0;
1249
1250	// TODO: why don't allow to delete subtree ? references ? does
1251	// tc subsys quarantee us that in htb_destroy it holds no class
1252	// refs so that we can remove children safely there ?
 
1253	if (cl->children || cl->filter_cnt)
1254		return -EBUSY;
1255
1256	if (!cl->level && htb_parent_last_child(cl)) {
1257		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1258					  cl->parent->common.classid);
 
1259		last_child = 1;
1260	}
1261
1262	sch_tree_lock(sch);
1263
1264	if (!cl->level) {
1265		qlen = cl->un.leaf.q->q.qlen;
 
 
1266		qdisc_reset(cl->un.leaf.q);
1267		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1268	}
1269
1270	/* delete from hash and active; remainder in destroy_class */
1271	qdisc_class_hash_remove(&q->clhash, &cl->common);
1272	if (cl->parent)
1273		cl->parent->children--;
1274
1275	if (cl->prio_activity)
1276		htb_deactivate(q, cl);
1277
1278	if (cl->cmode != HTB_CAN_SEND)
1279		htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
 
1280
1281	if (last_child)
1282		htb_parent_to_leaf(q, cl, new_q);
1283
1284	BUG_ON(--cl->refcnt == 0);
1285	/*
1286	 * This shouldn't happen: we "hold" one cops->get() when called
1287	 * from tc_ctl_tclass; the destroy method is done from cops->put().
1288	 */
1289
1290	sch_tree_unlock(sch);
1291	return 0;
1292}
1293
1294static void htb_put(struct Qdisc *sch, unsigned long arg)
1295{
1296	struct htb_class *cl = (struct htb_class *)arg;
1297
1298	if (--cl->refcnt == 0)
1299		htb_destroy_class(sch, cl);
1300}
1301
1302static int htb_change_class(struct Qdisc *sch, u32 classid,
1303			    u32 parentid, struct nlattr **tca,
1304			    unsigned long *arg)
1305{
1306	int err = -EINVAL;
1307	struct htb_sched *q = qdisc_priv(sch);
1308	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1309	struct nlattr *opt = tca[TCA_OPTIONS];
1310	struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1311	struct nlattr *tb[__TCA_HTB_MAX];
1312	struct tc_htb_opt *hopt;
 
 
1313
1314	/* extract all subattrs from opt attr */
1315	if (!opt)
1316		goto failure;
1317
1318	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1319	if (err < 0)
1320		goto failure;
1321
1322	err = -EINVAL;
1323	if (tb[TCA_HTB_PARMS] == NULL)
1324		goto failure;
1325
1326	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1327
1328	hopt = nla_data(tb[TCA_HTB_PARMS]);
1329
1330	rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1331	ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1332	if (!rtab || !ctab)
1333		goto failure;
1334
 
 
 
 
 
 
 
 
 
1335	if (!cl) {		/* new class */
1336		struct Qdisc *new_q;
1337		int prio;
1338		struct {
1339			struct nlattr		nla;
1340			struct gnet_estimator	opt;
1341		} est = {
1342			.nla = {
1343				.nla_len	= nla_attr_size(sizeof(est.opt)),
1344				.nla_type	= TCA_RATE,
1345			},
1346			.opt = {
1347				/* 4s interval, 16s averaging constant */
1348				.interval	= 2,
1349				.ewma_log	= 2,
1350			},
1351		};
1352
1353		/* check for valid classid */
1354		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1355		    htb_find(classid, sch))
1356			goto failure;
1357
1358		/* check maximal depth */
1359		if (parent && parent->parent && parent->parent->level < 2) {
1360			pr_err("htb: tree is too deep\n");
1361			goto failure;
1362		}
1363		err = -ENOBUFS;
1364		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1365		if (!cl)
1366			goto failure;
1367
1368		err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1369					qdisc_root_sleeping_lock(sch),
1370					tca[TCA_RATE] ? : &est.nla);
1371		if (err) {
1372			kfree(cl);
1373			goto failure;
1374		}
 
 
 
 
 
 
 
 
 
 
 
 
1375
1376		cl->refcnt = 1;
1377		cl->children = 0;
1378		INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1379		RB_CLEAR_NODE(&cl->pq_node);
1380
1381		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1382			RB_CLEAR_NODE(&cl->node[prio]);
1383
1384		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1385		 * so that can't be used inside of sch_tree_lock
1386		 * -- thanks to Karlis Peisenieks
1387		 */
1388		new_q = qdisc_create_dflt(sch->dev_queue,
1389					  &pfifo_qdisc_ops, classid);
1390		sch_tree_lock(sch);
1391		if (parent && !parent->level) {
1392			unsigned int qlen = parent->un.leaf.q->q.qlen;
 
1393
1394			/* turn parent into inner node */
1395			qdisc_reset(parent->un.leaf.q);
1396			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1397			qdisc_destroy(parent->un.leaf.q);
1398			if (parent->prio_activity)
1399				htb_deactivate(q, parent);
1400
1401			/* remove from evt list because of level change */
1402			if (parent->cmode != HTB_CAN_SEND) {
1403				htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1404				parent->cmode = HTB_CAN_SEND;
1405			}
1406			parent->level = (parent->parent ? parent->parent->level
1407					 : TC_HTB_MAXDEPTH) - 1;
1408			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1409		}
1410		/* leaf (we) needs elementary qdisc */
1411		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1412
1413		cl->common.classid = classid;
1414		cl->parent = parent;
1415
1416		/* set class to be in HTB_CAN_SEND state */
1417		cl->tokens = hopt->buffer;
1418		cl->ctokens = hopt->cbuffer;
1419		cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;	/* 1min */
1420		cl->t_c = psched_get_time();
1421		cl->cmode = HTB_CAN_SEND;
1422
1423		/* attach to the hash list and parent's family */
1424		qdisc_class_hash_insert(&q->clhash, &cl->common);
1425		if (parent)
1426			parent->children++;
 
 
1427	} else {
1428		if (tca[TCA_RATE]) {
1429			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1430						    qdisc_root_sleeping_lock(sch),
 
 
1431						    tca[TCA_RATE]);
1432			if (err)
1433				return err;
1434		}
1435		sch_tree_lock(sch);
1436	}
1437
 
 
 
 
 
 
 
1438	/* it used to be a nasty bug here, we have to check that node
1439	 * is really leaf before changing cl->un.leaf !
1440	 */
1441	if (!cl->level) {
1442		cl->quantum = rtab->rate.rate / q->rate2quantum;
 
 
 
 
1443		if (!hopt->quantum && cl->quantum < 1000) {
1444			pr_warning(
1445			       "HTB: quantum of class %X is small. Consider r2q change.\n",
1446			       cl->common.classid);
1447			cl->quantum = 1000;
1448		}
1449		if (!hopt->quantum && cl->quantum > 200000) {
1450			pr_warning(
1451			       "HTB: quantum of class %X is big. Consider r2q change.\n",
1452			       cl->common.classid);
1453			cl->quantum = 200000;
1454		}
1455		if (hopt->quantum)
1456			cl->quantum = hopt->quantum;
1457		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1458			cl->prio = TC_HTB_NUMPRIO - 1;
1459	}
1460
1461	cl->buffer = hopt->buffer;
1462	cl->cbuffer = hopt->cbuffer;
1463	if (cl->rate)
1464		qdisc_put_rtab(cl->rate);
1465	cl->rate = rtab;
1466	if (cl->ceil)
1467		qdisc_put_rtab(cl->ceil);
1468	cl->ceil = ctab;
1469	sch_tree_unlock(sch);
1470
 
 
 
 
1471	qdisc_class_hash_grow(sch, &q->clhash);
1472
1473	*arg = (unsigned long)cl;
1474	return 0;
1475
1476failure:
1477	if (rtab)
1478		qdisc_put_rtab(rtab);
1479	if (ctab)
1480		qdisc_put_rtab(ctab);
1481	return err;
1482}
1483
1484static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
 
1485{
1486	struct htb_sched *q = qdisc_priv(sch);
1487	struct htb_class *cl = (struct htb_class *)arg;
1488	struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1489
1490	return fl;
1491}
1492
1493static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1494				     u32 classid)
1495{
1496	struct htb_class *cl = htb_find(classid, sch);
1497
1498	/*if (cl && !cl->level) return 0;
1499	 * The line above used to be there to prevent attaching filters to
1500	 * leaves. But at least tc_index filter uses this just to get class
1501	 * for other reasons so that we have to allow for it.
1502	 * ----
1503	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1504	 * another way to "lock" the class - unlike "get" this lock can
1505	 * be broken by class during destroy IIUC.
1506	 */
1507	if (cl)
1508		cl->filter_cnt++;
1509	return (unsigned long)cl;
1510}
1511
1512static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1513{
1514	struct htb_class *cl = (struct htb_class *)arg;
1515
1516	if (cl)
1517		cl->filter_cnt--;
1518}
1519
1520static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1521{
1522	struct htb_sched *q = qdisc_priv(sch);
1523	struct htb_class *cl;
1524	struct hlist_node *n;
1525	unsigned int i;
1526
1527	if (arg->stop)
1528		return;
1529
1530	for (i = 0; i < q->clhash.hashsize; i++) {
1531		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1532			if (arg->count < arg->skip) {
1533				arg->count++;
1534				continue;
1535			}
1536			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1537				arg->stop = 1;
1538				return;
1539			}
1540			arg->count++;
1541		}
1542	}
1543}
1544
1545static const struct Qdisc_class_ops htb_class_ops = {
1546	.graft		=	htb_graft,
1547	.leaf		=	htb_leaf,
1548	.qlen_notify	=	htb_qlen_notify,
1549	.get		=	htb_get,
1550	.put		=	htb_put,
1551	.change		=	htb_change_class,
1552	.delete		=	htb_delete,
1553	.walk		=	htb_walk,
1554	.tcf_chain	=	htb_find_tcf,
1555	.bind_tcf	=	htb_bind_filter,
1556	.unbind_tcf	=	htb_unbind_filter,
1557	.dump		=	htb_dump_class,
1558	.dump_stats	=	htb_dump_class_stats,
1559};
1560
1561static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1562	.cl_ops		=	&htb_class_ops,
1563	.id		=	"htb",
1564	.priv_size	=	sizeof(struct htb_sched),
1565	.enqueue	=	htb_enqueue,
1566	.dequeue	=	htb_dequeue,
1567	.peek		=	qdisc_peek_dequeued,
1568	.drop		=	htb_drop,
1569	.init		=	htb_init,
1570	.reset		=	htb_reset,
1571	.destroy	=	htb_destroy,
1572	.dump		=	htb_dump,
1573	.owner		=	THIS_MODULE,
1574};
1575
1576static int __init htb_module_init(void)
1577{
1578	return register_qdisc(&htb_qdisc_ops);
1579}
1580static void __exit htb_module_exit(void)
1581{
1582	unregister_qdisc(&htb_qdisc_ops);
1583}
1584
1585module_init(htb_module_init)
1586module_exit(htb_module_exit)
1587MODULE_LICENSE("GPL");
v4.17
   1/*
   2 * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Martin Devera, <devik@cdi.cz>
  10 *
  11 * Credits (in time order) for older HTB versions:
  12 *              Stef Coene <stef.coene@docum.org>
  13 *			HTB support at LARTC mailing list
  14 *		Ondrej Kraus, <krauso@barr.cz>
  15 *			found missing INIT_QDISC(htb)
  16 *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
  17 *			helped a lot to locate nasty class stall bug
  18 *		Andi Kleen, Jamal Hadi, Bert Hubert
  19 *			code review and helpful comments on shaping
  20 *		Tomasz Wrona, <tw@eter.tym.pl>
  21 *			created test case so that I was able to fix nasty bug
  22 *		Wilfried Weissmann
  23 *			spotted bug in dequeue code and helped with fix
  24 *		Jiri Fojtasek
  25 *			fixed requeue routine
  26 *		and many others. thanks.
  27 */
  28#include <linux/module.h>
  29#include <linux/moduleparam.h>
  30#include <linux/types.h>
  31#include <linux/kernel.h>
  32#include <linux/string.h>
  33#include <linux/errno.h>
  34#include <linux/skbuff.h>
  35#include <linux/list.h>
  36#include <linux/compiler.h>
  37#include <linux/rbtree.h>
  38#include <linux/workqueue.h>
  39#include <linux/slab.h>
  40#include <net/netlink.h>
  41#include <net/sch_generic.h>
  42#include <net/pkt_sched.h>
  43#include <net/pkt_cls.h>
  44
  45/* HTB algorithm.
  46    Author: devik@cdi.cz
  47    ========================================================================
  48    HTB is like TBF with multiple classes. It is also similar to CBQ because
  49    it allows to assign priority to each class in hierarchy.
  50    In fact it is another implementation of Floyd's formal sharing.
  51
  52    Levels:
  53    Each class is assigned level. Leaf has ALWAYS level 0 and root
  54    classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
  55    one less than their parent.
  56*/
  57
  58static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
  59#define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
  60
  61#if HTB_VER >> 16 != TC_HTB_PROTOVER
  62#error "Mismatched sch_htb.c and pkt_sch.h"
  63#endif
  64
  65/* Module parameter and sysfs export */
  66module_param    (htb_hysteresis, int, 0640);
  67MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
  68
  69static int htb_rate_est = 0; /* htb classes have a default rate estimator */
  70module_param(htb_rate_est, int, 0640);
  71MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
  72
  73/* used internaly to keep status of single class */
  74enum htb_cmode {
  75	HTB_CANT_SEND,		/* class can't send and can't borrow */
  76	HTB_MAY_BORROW,		/* class can't send but may borrow */
  77	HTB_CAN_SEND		/* class can send */
  78};
  79
  80struct htb_prio {
  81	union {
  82		struct rb_root	row;
  83		struct rb_root	feed;
  84	};
  85	struct rb_node	*ptr;
  86	/* When class changes from state 1->2 and disconnects from
  87	 * parent's feed then we lost ptr value and start from the
  88	 * first child again. Here we store classid of the
  89	 * last valid ptr (used when ptr is NULL).
  90	 */
  91	u32		last_ptr_id;
  92};
  93
  94/* interior & leaf nodes; props specific to leaves are marked L:
  95 * To reduce false sharing, place mostly read fields at beginning,
  96 * and mostly written ones at the end.
  97 */
  98struct htb_class {
  99	struct Qdisc_class_common common;
 100	struct psched_ratecfg	rate;
 101	struct psched_ratecfg	ceil;
 102	s64			buffer, cbuffer;/* token bucket depth/rate */
 103	s64			mbuffer;	/* max wait time */
 104	u32			prio;		/* these two are used only by leaves... */
 105	int			quantum;	/* but stored for parent-to-leaf return */
 106
 107	struct tcf_proto __rcu	*filter_list;	/* class attached filters */
 108	struct tcf_block	*block;
 109	int			filter_cnt;
 110
 111	int			level;		/* our level (see above) */
 112	unsigned int		children;
 113	struct htb_class	*parent;	/* parent class */
 114
 115	struct net_rate_estimator __rcu *rate_est;
 116
 117	/*
 118	 * Written often fields
 119	 */
 120	struct gnet_stats_basic_packed bstats;
 121	struct tc_htb_xstats	xstats;	/* our special stats */
 
 
 
 
 
 
 
 
 122
 123	/* token bucket parameters */
 124	s64			tokens, ctokens;/* current number of tokens */
 125	s64			t_c;		/* checkpoint time */
 126
 127	union {
 128		struct htb_class_leaf {
 
 
 129			struct list_head drop_list;
 130			int		deficit[TC_HTB_MAXDEPTH];
 131			struct Qdisc	*q;
 132		} leaf;
 133		struct htb_class_inner {
 134			struct htb_prio clprio[TC_HTB_NUMPRIO];
 
 
 
 
 
 
 
 135		} inner;
 136	} un;
 137	s64			pq_key;
 
 
 
 
 
 
 
 
 
 138
 139	int			prio_activity;	/* for which prios are we active */
 140	enum htb_cmode		cmode;		/* current mode of the class */
 141	struct rb_node		pq_node;	/* node for event queue */
 142	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
 143
 144	unsigned int drops ____cacheline_aligned_in_smp;
 145	unsigned int		overlimits;
 146};
 147
 148struct htb_level {
 149	struct rb_root	wait_pq;
 150	struct htb_prio hprio[TC_HTB_NUMPRIO];
 151};
 152
 153struct htb_sched {
 154	struct Qdisc_class_hash clhash;
 155	int			defcls;		/* class where unclassified flows go to */
 156	int			rate2quantum;	/* quant = rate / rate2quantum */
 
 
 
 
 
 157
 158	/* filters for qdisc itself */
 159	struct tcf_proto __rcu	*filter_list;
 160	struct tcf_block	*block;
 161
 162#define HTB_WARN_TOOMANYEVENTS	0x1
 163	unsigned int		warned;	/* only one warning */
 164	int			direct_qlen;
 165	struct work_struct	work;
 166
 167	/* non shaped skbs; let them go directly thru */
 168	struct qdisc_skb_head	direct_queue;
 169	long			direct_pkts;
 170
 171	struct qdisc_watchdog	watchdog;
 
 172
 173	s64			now;	/* cached dequeue time */
 174	struct list_head	drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
 
 175
 176	/* time of nearest event per level (row) */
 177	s64			near_ev_cache[TC_HTB_MAXDEPTH];
 
 178
 179	int			row_mask[TC_HTB_MAXDEPTH];
 180
 181	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
 
 
 182};
 183
 184/* find class in global hash table using given handle */
 185static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 186{
 187	struct htb_sched *q = qdisc_priv(sch);
 188	struct Qdisc_class_common *clc;
 189
 190	clc = qdisc_class_find(&q->clhash, handle);
 191	if (clc == NULL)
 192		return NULL;
 193	return container_of(clc, struct htb_class, common);
 194}
 195
 196static unsigned long htb_search(struct Qdisc *sch, u32 handle)
 197{
 198	return (unsigned long)htb_find(handle, sch);
 199}
 200/**
 201 * htb_classify - classify a packet into class
 202 *
 203 * It returns NULL if the packet should be dropped or -1 if the packet
 204 * should be passed directly thru. In all other cases leaf class is returned.
 205 * We allow direct class selection by classid in priority. The we examine
 206 * filters in qdisc and in inner nodes (if higher filter points to the inner
 207 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
 208 * internal fifo (direct). These packets then go directly thru. If we still
 209 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
 210 * then finish and return direct queue.
 211 */
 212#define HTB_DIRECT ((struct htb_class *)-1L)
 213
 214static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
 215				      int *qerr)
 216{
 217	struct htb_sched *q = qdisc_priv(sch);
 218	struct htb_class *cl;
 219	struct tcf_result res;
 220	struct tcf_proto *tcf;
 221	int result;
 222
 223	/* allow to select class by setting skb->priority to valid classid;
 224	 * note that nfmark can be used too by attaching filter fw with no
 225	 * rules in it
 226	 */
 227	if (skb->priority == sch->handle)
 228		return HTB_DIRECT;	/* X:0 (direct flow) selected */
 229	cl = htb_find(skb->priority, sch);
 230	if (cl) {
 231		if (cl->level == 0)
 232			return cl;
 233		/* Start with inner filter chain if a non-leaf class is selected */
 234		tcf = rcu_dereference_bh(cl->filter_list);
 235	} else {
 236		tcf = rcu_dereference_bh(q->filter_list);
 237	}
 238
 239	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 240	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
 
 241#ifdef CONFIG_NET_CLS_ACT
 242		switch (result) {
 243		case TC_ACT_QUEUED:
 244		case TC_ACT_STOLEN:
 245		case TC_ACT_TRAP:
 246			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 247			/* fall through */
 248		case TC_ACT_SHOT:
 249			return NULL;
 250		}
 251#endif
 252		cl = (void *)res.class;
 253		if (!cl) {
 254			if (res.classid == sch->handle)
 255				return HTB_DIRECT;	/* X:0 (direct flow) */
 256			cl = htb_find(res.classid, sch);
 257			if (!cl)
 258				break;	/* filter selected invalid classid */
 259		}
 260		if (!cl->level)
 261			return cl;	/* we hit leaf; return it */
 262
 263		/* we have got inner class; apply inner filter chain */
 264		tcf = rcu_dereference_bh(cl->filter_list);
 265	}
 266	/* classification failed; try to use default class */
 267	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
 268	if (!cl || cl->level)
 269		return HTB_DIRECT;	/* bad default .. this is safe bet */
 270	return cl;
 271}
 272
 273/**
 274 * htb_add_to_id_tree - adds class to the round robin list
 275 *
 276 * Routine adds class to the list (actually tree) sorted by classid.
 277 * Make sure that class is not already on such list for given prio.
 278 */
 279static void htb_add_to_id_tree(struct rb_root *root,
 280			       struct htb_class *cl, int prio)
 281{
 282	struct rb_node **p = &root->rb_node, *parent = NULL;
 283
 284	while (*p) {
 285		struct htb_class *c;
 286		parent = *p;
 287		c = rb_entry(parent, struct htb_class, node[prio]);
 288
 289		if (cl->common.classid > c->common.classid)
 290			p = &parent->rb_right;
 291		else
 292			p = &parent->rb_left;
 293	}
 294	rb_link_node(&cl->node[prio], parent, p);
 295	rb_insert_color(&cl->node[prio], root);
 296}
 297
 298/**
 299 * htb_add_to_wait_tree - adds class to the event queue with delay
 300 *
 301 * The class is added to priority event queue to indicate that class will
 302 * change its mode in cl->pq_key microseconds. Make sure that class is not
 303 * already in the queue.
 304 */
 305static void htb_add_to_wait_tree(struct htb_sched *q,
 306				 struct htb_class *cl, s64 delay)
 307{
 308	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
 309
 310	cl->pq_key = q->now + delay;
 311	if (cl->pq_key == q->now)
 312		cl->pq_key++;
 313
 314	/* update the nearest event cache */
 315	if (q->near_ev_cache[cl->level] > cl->pq_key)
 316		q->near_ev_cache[cl->level] = cl->pq_key;
 317
 318	while (*p) {
 319		struct htb_class *c;
 320		parent = *p;
 321		c = rb_entry(parent, struct htb_class, pq_node);
 322		if (cl->pq_key >= c->pq_key)
 323			p = &parent->rb_right;
 324		else
 325			p = &parent->rb_left;
 326	}
 327	rb_link_node(&cl->pq_node, parent, p);
 328	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 329}
 330
 331/**
 332 * htb_next_rb_node - finds next node in binary tree
 333 *
 334 * When we are past last key we return NULL.
 335 * Average complexity is 2 steps per call.
 336 */
 337static inline void htb_next_rb_node(struct rb_node **n)
 338{
 339	*n = rb_next(*n);
 340}
 341
 342/**
 343 * htb_add_class_to_row - add class to its row
 344 *
 345 * The class is added to row at priorities marked in mask.
 346 * It does nothing if mask == 0.
 347 */
 348static inline void htb_add_class_to_row(struct htb_sched *q,
 349					struct htb_class *cl, int mask)
 350{
 351	q->row_mask[cl->level] |= mask;
 352	while (mask) {
 353		int prio = ffz(~mask);
 354		mask &= ~(1 << prio);
 355		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
 356	}
 357}
 358
 359/* If this triggers, it is a bug in this code, but it need not be fatal */
 360static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
 361{
 362	if (RB_EMPTY_NODE(rb)) {
 363		WARN_ON(1);
 364	} else {
 365		rb_erase(rb, root);
 366		RB_CLEAR_NODE(rb);
 367	}
 368}
 369
 370
 371/**
 372 * htb_remove_class_from_row - removes class from its row
 373 *
 374 * The class is removed from row at priorities marked in mask.
 375 * It does nothing if mask == 0.
 376 */
 377static inline void htb_remove_class_from_row(struct htb_sched *q,
 378						 struct htb_class *cl, int mask)
 379{
 380	int m = 0;
 381	struct htb_level *hlevel = &q->hlevel[cl->level];
 382
 383	while (mask) {
 384		int prio = ffz(~mask);
 385		struct htb_prio *hprio = &hlevel->hprio[prio];
 386
 387		mask &= ~(1 << prio);
 388		if (hprio->ptr == cl->node + prio)
 389			htb_next_rb_node(&hprio->ptr);
 390
 391		htb_safe_rb_erase(cl->node + prio, &hprio->row);
 392		if (!hprio->row.rb_node)
 393			m |= 1 << prio;
 394	}
 395	q->row_mask[cl->level] &= ~m;
 396}
 397
 398/**
 399 * htb_activate_prios - creates active classe's feed chain
 400 *
 401 * The class is connected to ancestors and/or appropriate rows
 402 * for priorities it is participating on. cl->cmode must be new
 403 * (activated) mode. It does nothing if cl->prio_activity == 0.
 404 */
 405static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 406{
 407	struct htb_class *p = cl->parent;
 408	long m, mask = cl->prio_activity;
 409
 410	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 411		m = mask;
 412		while (m) {
 413			int prio = ffz(~m);
 414			m &= ~(1 << prio);
 415
 416			if (p->un.inner.clprio[prio].feed.rb_node)
 417				/* parent already has its feed in use so that
 418				 * reset bit in mask as parent is already ok
 419				 */
 420				mask &= ~(1 << prio);
 421
 422			htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
 423		}
 424		p->prio_activity |= mask;
 425		cl = p;
 426		p = cl->parent;
 427
 428	}
 429	if (cl->cmode == HTB_CAN_SEND && mask)
 430		htb_add_class_to_row(q, cl, mask);
 431}
 432
 433/**
 434 * htb_deactivate_prios - remove class from feed chain
 435 *
 436 * cl->cmode must represent old mode (before deactivation). It does
 437 * nothing if cl->prio_activity == 0. Class is removed from all feed
 438 * chains and rows.
 439 */
 440static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 441{
 442	struct htb_class *p = cl->parent;
 443	long m, mask = cl->prio_activity;
 444
 445	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 446		m = mask;
 447		mask = 0;
 448		while (m) {
 449			int prio = ffz(~m);
 450			m &= ~(1 << prio);
 451
 452			if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
 453				/* we are removing child which is pointed to from
 454				 * parent feed - forget the pointer but remember
 455				 * classid
 456				 */
 457				p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
 458				p->un.inner.clprio[prio].ptr = NULL;
 459			}
 460
 461			htb_safe_rb_erase(cl->node + prio,
 462					  &p->un.inner.clprio[prio].feed);
 463
 464			if (!p->un.inner.clprio[prio].feed.rb_node)
 465				mask |= 1 << prio;
 466		}
 467
 468		p->prio_activity &= ~mask;
 469		cl = p;
 470		p = cl->parent;
 471
 472	}
 473	if (cl->cmode == HTB_CAN_SEND && mask)
 474		htb_remove_class_from_row(q, cl, mask);
 475}
 476
 477static inline s64 htb_lowater(const struct htb_class *cl)
 478{
 479	if (htb_hysteresis)
 480		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
 481	else
 482		return 0;
 483}
 484static inline s64 htb_hiwater(const struct htb_class *cl)
 485{
 486	if (htb_hysteresis)
 487		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
 488	else
 489		return 0;
 490}
 491
 492
 493/**
 494 * htb_class_mode - computes and returns current class mode
 495 *
 496 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
 497 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
 498 * from now to time when cl will change its state.
 499 * Also it is worth to note that class mode doesn't change simply
 500 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
 501 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
 502 * mode transitions per time unit. The speed gain is about 1/6.
 503 */
 504static inline enum htb_cmode
 505htb_class_mode(struct htb_class *cl, s64 *diff)
 506{
 507	s64 toks;
 508
 509	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
 510		*diff = -toks;
 511		return HTB_CANT_SEND;
 512	}
 513
 514	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
 515		return HTB_CAN_SEND;
 516
 517	*diff = -toks;
 518	return HTB_MAY_BORROW;
 519}
 520
 521/**
 522 * htb_change_class_mode - changes classe's mode
 523 *
 524 * This should be the only way how to change classe's mode under normal
 525 * cirsumstances. Routine will update feed lists linkage, change mode
 526 * and add class to the wait event queue if appropriate. New mode should
 527 * be different from old one and cl->pq_key has to be valid if changing
 528 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
 529 */
 530static void
 531htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
 532{
 533	enum htb_cmode new_mode = htb_class_mode(cl, diff);
 534
 535	if (new_mode == cl->cmode)
 536		return;
 537
 538	if (new_mode == HTB_CANT_SEND)
 539		cl->overlimits++;
 540
 541	if (cl->prio_activity) {	/* not necessary: speed optimization */
 542		if (cl->cmode != HTB_CANT_SEND)
 543			htb_deactivate_prios(q, cl);
 544		cl->cmode = new_mode;
 545		if (new_mode != HTB_CANT_SEND)
 546			htb_activate_prios(q, cl);
 547	} else
 548		cl->cmode = new_mode;
 549}
 550
 551/**
 552 * htb_activate - inserts leaf cl into appropriate active feeds
 553 *
 554 * Routine learns (new) priority of leaf and activates feed chain
 555 * for the prio. It can be called on already active leaf safely.
 556 * It also adds leaf into droplist.
 557 */
 558static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
 559{
 560	WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
 561
 562	if (!cl->prio_activity) {
 563		cl->prio_activity = 1 << cl->prio;
 564		htb_activate_prios(q, cl);
 565		list_add_tail(&cl->un.leaf.drop_list,
 566			      q->drops + cl->prio);
 567	}
 568}
 569
 570/**
 571 * htb_deactivate - remove leaf cl from active feeds
 572 *
 573 * Make sure that leaf is active. In the other words it can't be called
 574 * with non-active leaf. It also removes class from the drop list.
 575 */
 576static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 577{
 578	WARN_ON(!cl->prio_activity);
 579
 580	htb_deactivate_prios(q, cl);
 581	cl->prio_activity = 0;
 582	list_del_init(&cl->un.leaf.drop_list);
 583}
 584
 585static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
 586			     struct qdisc_skb_head *qh)
 587{
 588	struct sk_buff *last = qh->tail;
 589
 590	if (last) {
 591		skb->next = NULL;
 592		last->next = skb;
 593		qh->tail = skb;
 594	} else {
 595		qh->tail = skb;
 596		qh->head = skb;
 597	}
 598	qh->qlen++;
 599}
 600
 601static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 602		       struct sk_buff **to_free)
 603{
 604	int uninitialized_var(ret);
 605	struct htb_sched *q = qdisc_priv(sch);
 606	struct htb_class *cl = htb_classify(skb, sch, &ret);
 607
 608	if (cl == HTB_DIRECT) {
 609		/* enqueue to helper queue */
 610		if (q->direct_queue.qlen < q->direct_qlen) {
 611			htb_enqueue_tail(skb, sch, &q->direct_queue);
 612			q->direct_pkts++;
 613		} else {
 614			return qdisc_drop(skb, sch, to_free);
 615		}
 616#ifdef CONFIG_NET_CLS_ACT
 617	} else if (!cl) {
 618		if (ret & __NET_XMIT_BYPASS)
 619			qdisc_qstats_drop(sch);
 620		__qdisc_drop(skb, to_free);
 621		return ret;
 622#endif
 623	} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
 624					to_free)) != NET_XMIT_SUCCESS) {
 625		if (net_xmit_drop_count(ret)) {
 626			qdisc_qstats_drop(sch);
 627			cl->drops++;
 628		}
 629		return ret;
 630	} else {
 631		htb_activate(q, cl);
 632	}
 633
 634	qdisc_qstats_backlog_inc(sch, skb);
 635	sch->q.qlen++;
 636	return NET_XMIT_SUCCESS;
 637}
 638
 639static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
 640{
 641	s64 toks = diff + cl->tokens;
 642
 643	if (toks > cl->buffer)
 644		toks = cl->buffer;
 645	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
 646	if (toks <= -cl->mbuffer)
 647		toks = 1 - cl->mbuffer;
 648
 649	cl->tokens = toks;
 650}
 651
 652static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
 653{
 654	s64 toks = diff + cl->ctokens;
 655
 656	if (toks > cl->cbuffer)
 657		toks = cl->cbuffer;
 658	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
 659	if (toks <= -cl->mbuffer)
 660		toks = 1 - cl->mbuffer;
 661
 662	cl->ctokens = toks;
 663}
 664
 665/**
 666 * htb_charge_class - charges amount "bytes" to leaf and ancestors
 667 *
 668 * Routine assumes that packet "bytes" long was dequeued from leaf cl
 669 * borrowing from "level". It accounts bytes to ceil leaky bucket for
 670 * leaf and all ancestors and to rate bucket for ancestors at levels
 671 * "level" and higher. It also handles possible change of mode resulting
 672 * from the update. Note that mode can also increase here (MAY_BORROW to
 673 * CAN_SEND) because we can use more precise clock that event queue here.
 674 * In such case we remove class from event queue first.
 675 */
 676static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
 677			     int level, struct sk_buff *skb)
 678{
 679	int bytes = qdisc_pkt_len(skb);
 680	enum htb_cmode old_mode;
 681	s64 diff;
 682
 683	while (cl) {
 684		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 685		if (cl->level >= level) {
 686			if (cl->level == level)
 687				cl->xstats.lends++;
 688			htb_accnt_tokens(cl, bytes, diff);
 689		} else {
 690			cl->xstats.borrows++;
 691			cl->tokens += diff;	/* we moved t_c; update tokens */
 692		}
 693		htb_accnt_ctokens(cl, bytes, diff);
 694		cl->t_c = q->now;
 695
 696		old_mode = cl->cmode;
 697		diff = 0;
 698		htb_change_class_mode(q, cl, &diff);
 699		if (old_mode != cl->cmode) {
 700			if (old_mode != HTB_CAN_SEND)
 701				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 702			if (cl->cmode != HTB_CAN_SEND)
 703				htb_add_to_wait_tree(q, cl, diff);
 704		}
 705
 706		/* update basic stats except for leaves which are already updated */
 707		if (cl->level)
 708			bstats_update(&cl->bstats, skb);
 709
 710		cl = cl->parent;
 711	}
 712}
 713
 714/**
 715 * htb_do_events - make mode changes to classes at the level
 716 *
 717 * Scans event queue for pending events and applies them. Returns time of
 718 * next pending event (0 for no event in pq, q->now for too many events).
 719 * Note: Applied are events whose have cl->pq_key <= q->now.
 720 */
 721static s64 htb_do_events(struct htb_sched *q, const int level,
 722			 unsigned long start)
 723{
 724	/* don't run for longer than 2 jiffies; 2 is used instead of
 725	 * 1 to simplify things when jiffy is going to be incremented
 726	 * too soon
 727	 */
 728	unsigned long stop_at = start + 2;
 729	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
 730
 731	while (time_before(jiffies, stop_at)) {
 732		struct htb_class *cl;
 733		s64 diff;
 734		struct rb_node *p = rb_first(wait_pq);
 735
 736		if (!p)
 737			return 0;
 738
 739		cl = rb_entry(p, struct htb_class, pq_node);
 740		if (cl->pq_key > q->now)
 741			return cl->pq_key;
 742
 743		htb_safe_rb_erase(p, wait_pq);
 744		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 745		htb_change_class_mode(q, cl, &diff);
 746		if (cl->cmode != HTB_CAN_SEND)
 747			htb_add_to_wait_tree(q, cl, diff);
 748	}
 749
 750	/* too much load - let's continue after a break for scheduling */
 751	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
 752		pr_warn("htb: too many events!\n");
 753		q->warned |= HTB_WARN_TOOMANYEVENTS;
 754	}
 755
 756	return q->now;
 757}
 758
 759/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
 760 * is no such one exists.
 761 */
 762static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
 763					      u32 id)
 764{
 765	struct rb_node *r = NULL;
 766	while (n) {
 767		struct htb_class *cl =
 768		    rb_entry(n, struct htb_class, node[prio]);
 769
 770		if (id > cl->common.classid) {
 771			n = n->rb_right;
 772		} else if (id < cl->common.classid) {
 773			r = n;
 774			n = n->rb_left;
 775		} else {
 776			return n;
 777		}
 778	}
 779	return r;
 780}
 781
 782/**
 783 * htb_lookup_leaf - returns next leaf class in DRR order
 784 *
 785 * Find leaf where current feed pointers points to.
 786 */
 787static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
 
 788{
 789	int i;
 790	struct {
 791		struct rb_node *root;
 792		struct rb_node **pptr;
 793		u32 *pid;
 794	} stk[TC_HTB_MAXDEPTH], *sp = stk;
 795
 796	BUG_ON(!hprio->row.rb_node);
 797	sp->root = hprio->row.rb_node;
 798	sp->pptr = &hprio->ptr;
 799	sp->pid = &hprio->last_ptr_id;
 800
 801	for (i = 0; i < 65535; i++) {
 802		if (!*sp->pptr && *sp->pid) {
 803			/* ptr was invalidated but id is valid - try to recover
 804			 * the original or next ptr
 805			 */
 806			*sp->pptr =
 807			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
 808		}
 809		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
 810				 * can become out of date quickly
 811				 */
 812		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
 813			*sp->pptr = sp->root;
 814			while ((*sp->pptr)->rb_left)
 815				*sp->pptr = (*sp->pptr)->rb_left;
 816			if (sp > stk) {
 817				sp--;
 818				if (!*sp->pptr) {
 819					WARN_ON(1);
 820					return NULL;
 821				}
 822				htb_next_rb_node(sp->pptr);
 823			}
 824		} else {
 825			struct htb_class *cl;
 826			struct htb_prio *clp;
 827
 828			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
 829			if (!cl->level)
 830				return cl;
 831			clp = &cl->un.inner.clprio[prio];
 832			(++sp)->root = clp->feed.rb_node;
 833			sp->pptr = &clp->ptr;
 834			sp->pid = &clp->last_ptr_id;
 835		}
 836	}
 837	WARN_ON(1);
 838	return NULL;
 839}
 840
 841/* dequeues packet at given priority and level; call only if
 842 * you are sure that there is active class at prio/level
 843 */
 844static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
 845					const int level)
 846{
 847	struct sk_buff *skb = NULL;
 848	struct htb_class *cl, *start;
 849	struct htb_level *hlevel = &q->hlevel[level];
 850	struct htb_prio *hprio = &hlevel->hprio[prio];
 851
 852	/* look initial class up in the row */
 853	start = cl = htb_lookup_leaf(hprio, prio);
 
 
 854
 855	do {
 856next:
 857		if (unlikely(!cl))
 858			return NULL;
 859
 860		/* class can be empty - it is unlikely but can be true if leaf
 861		 * qdisc drops packets in enqueue routine or if someone used
 862		 * graft operation on the leaf since last dequeue;
 863		 * simply deactivate and skip such class
 864		 */
 865		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
 866			struct htb_class *next;
 867			htb_deactivate(q, cl);
 868
 869			/* row/level might become empty */
 870			if ((q->row_mask[level] & (1 << prio)) == 0)
 871				return NULL;
 872
 873			next = htb_lookup_leaf(hprio, prio);
 
 
 874
 875			if (cl == start)	/* fix start if we just deleted it */
 876				start = next;
 877			cl = next;
 878			goto next;
 879		}
 880
 881		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
 882		if (likely(skb != NULL))
 883			break;
 884
 885		qdisc_warn_nonwc("htb", cl->un.leaf.q);
 886		htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
 887					 &q->hlevel[0].hprio[prio].ptr);
 888		cl = htb_lookup_leaf(hprio, prio);
 
 
 889
 890	} while (cl != start);
 891
 892	if (likely(skb != NULL)) {
 893		bstats_update(&cl->bstats, skb);
 894		cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
 895		if (cl->un.leaf.deficit[level] < 0) {
 896			cl->un.leaf.deficit[level] += cl->quantum;
 897			htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
 898						 &q->hlevel[0].hprio[prio].ptr);
 899		}
 900		/* this used to be after charge_class but this constelation
 901		 * gives us slightly better performance
 902		 */
 903		if (!cl->un.leaf.q->q.qlen)
 904			htb_deactivate(q, cl);
 905		htb_charge_class(q, cl, level, skb);
 906	}
 907	return skb;
 908}
 909
 910static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 911{
 912	struct sk_buff *skb;
 913	struct htb_sched *q = qdisc_priv(sch);
 914	int level;
 915	s64 next_event;
 916	unsigned long start_at;
 917
 918	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 919	skb = __qdisc_dequeue_head(&q->direct_queue);
 920	if (skb != NULL) {
 921ok:
 922		qdisc_bstats_update(sch, skb);
 923		qdisc_qstats_backlog_dec(sch, skb);
 924		sch->q.qlen--;
 925		return skb;
 926	}
 927
 928	if (!sch->q.qlen)
 929		goto fin;
 930	q->now = ktime_get_ns();
 931	start_at = jiffies;
 932
 933	next_event = q->now + 5LLU * NSEC_PER_SEC;
 934
 935	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
 936		/* common case optimization - skip event handler quickly */
 937		int m;
 938		s64 event = q->near_ev_cache[level];
 939
 940		if (q->now >= event) {
 941			event = htb_do_events(q, level, start_at);
 942			if (!event)
 943				event = q->now + NSEC_PER_SEC;
 944			q->near_ev_cache[level] = event;
 945		}
 
 946
 947		if (next_event > event)
 948			next_event = event;
 949
 950		m = ~q->row_mask[level];
 951		while (m != (int)(-1)) {
 952			int prio = ffz(m);
 953
 954			m |= 1 << prio;
 955			skb = htb_dequeue_tree(q, prio, level);
 956			if (likely(skb != NULL))
 957				goto ok;
 958		}
 959	}
 960	qdisc_qstats_overlimit(sch);
 961	if (likely(next_event > q->now))
 962		qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
 963	else
 964		schedule_work(&q->work);
 965fin:
 966	return skb;
 967}
 968
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 969/* reset all classes */
 970/* always caled under BH & queue lock */
 971static void htb_reset(struct Qdisc *sch)
 972{
 973	struct htb_sched *q = qdisc_priv(sch);
 974	struct htb_class *cl;
 
 975	unsigned int i;
 976
 977	for (i = 0; i < q->clhash.hashsize; i++) {
 978		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
 979			if (cl->level)
 980				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
 981			else {
 982				if (cl->un.leaf.q)
 983					qdisc_reset(cl->un.leaf.q);
 984				INIT_LIST_HEAD(&cl->un.leaf.drop_list);
 985			}
 986			cl->prio_activity = 0;
 987			cl->cmode = HTB_CAN_SEND;
 
 988		}
 989	}
 990	qdisc_watchdog_cancel(&q->watchdog);
 991	__qdisc_reset_queue(&q->direct_queue);
 992	sch->q.qlen = 0;
 993	sch->qstats.backlog = 0;
 994	memset(q->hlevel, 0, sizeof(q->hlevel));
 995	memset(q->row_mask, 0, sizeof(q->row_mask));
 
 
 996	for (i = 0; i < TC_HTB_NUMPRIO; i++)
 997		INIT_LIST_HEAD(q->drops + i);
 998}
 999
1000static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1001	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
1002	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
1003	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1004	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1005	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
1006	[TCA_HTB_RATE64] = { .type = NLA_U64 },
1007	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
1008};
1009
1010static void htb_work_func(struct work_struct *work)
1011{
1012	struct htb_sched *q = container_of(work, struct htb_sched, work);
1013	struct Qdisc *sch = q->watchdog.qdisc;
1014
1015	rcu_read_lock();
1016	__netif_schedule(qdisc_root(sch));
1017	rcu_read_unlock();
1018}
1019
1020static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1021		    struct netlink_ext_ack *extack)
1022{
1023	struct htb_sched *q = qdisc_priv(sch);
1024	struct nlattr *tb[TCA_HTB_MAX + 1];
1025	struct tc_htb_glob *gopt;
1026	int err;
1027	int i;
1028
1029	qdisc_watchdog_init(&q->watchdog, sch);
1030	INIT_WORK(&q->work, htb_work_func);
1031
1032	if (!opt)
1033		return -EINVAL;
1034
1035	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1036	if (err)
1037		return err;
1038
1039	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1040	if (err < 0)
1041		return err;
1042
1043	if (!tb[TCA_HTB_INIT])
 
1044		return -EINVAL;
1045
1046	gopt = nla_data(tb[TCA_HTB_INIT]);
1047	if (gopt->version != HTB_VER >> 16)
 
 
1048		return -EINVAL;
 
1049
1050	err = qdisc_class_hash_init(&q->clhash);
1051	if (err < 0)
1052		return err;
1053	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1054		INIT_LIST_HEAD(q->drops + i);
1055
1056	qdisc_skb_head_init(&q->direct_queue);
 
 
1057
1058	if (tb[TCA_HTB_DIRECT_QLEN])
1059		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1060	else
1061		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1062
1063	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1064		q->rate2quantum = 1;
1065	q->defcls = gopt->defcls;
1066
1067	return 0;
1068}
1069
1070static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1071{
 
1072	struct htb_sched *q = qdisc_priv(sch);
1073	struct nlattr *nest;
1074	struct tc_htb_glob gopt;
1075
1076	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1077	 * no change can happen on the qdisc parameters.
1078	 */
1079
1080	gopt.direct_pkts = q->direct_pkts;
1081	gopt.version = HTB_VER;
1082	gopt.rate2quantum = q->rate2quantum;
1083	gopt.defcls = q->defcls;
1084	gopt.debug = 0;
1085
1086	nest = nla_nest_start(skb, TCA_OPTIONS);
1087	if (nest == NULL)
1088		goto nla_put_failure;
1089	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1090	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1091		goto nla_put_failure;
 
1092
1093	return nla_nest_end(skb, nest);
 
1094
1095nla_put_failure:
 
1096	nla_nest_cancel(skb, nest);
1097	return -1;
1098}
1099
1100static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1101			  struct sk_buff *skb, struct tcmsg *tcm)
1102{
1103	struct htb_class *cl = (struct htb_class *)arg;
 
1104	struct nlattr *nest;
1105	struct tc_htb_opt opt;
1106
1107	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1108	 * no change can happen on the class parameters.
1109	 */
1110	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1111	tcm->tcm_handle = cl->common.classid;
1112	if (!cl->level && cl->un.leaf.q)
1113		tcm->tcm_info = cl->un.leaf.q->handle;
1114
1115	nest = nla_nest_start(skb, TCA_OPTIONS);
1116	if (nest == NULL)
1117		goto nla_put_failure;
1118
1119	memset(&opt, 0, sizeof(opt));
1120
1121	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1122	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1123	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1124	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1125	opt.quantum = cl->quantum;
1126	opt.prio = cl->prio;
1127	opt.level = cl->level;
1128	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1129		goto nla_put_failure;
1130	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1131	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1132			      TCA_HTB_PAD))
1133		goto nla_put_failure;
1134	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1135	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1136			      TCA_HTB_PAD))
1137		goto nla_put_failure;
1138
1139	return nla_nest_end(skb, nest);
 
 
1140
1141nla_put_failure:
 
1142	nla_nest_cancel(skb, nest);
1143	return -1;
1144}
1145
1146static int
1147htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1148{
1149	struct htb_class *cl = (struct htb_class *)arg;
1150	struct gnet_stats_queue qs = {
1151		.drops = cl->drops,
1152		.overlimits = cl->overlimits,
1153	};
1154	__u32 qlen = 0;
1155
1156	if (!cl->level && cl->un.leaf.q) {
1157		qlen = cl->un.leaf.q->q.qlen;
1158		qs.backlog = cl->un.leaf.q->qstats.backlog;
1159	}
1160	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1161				    INT_MIN, INT_MAX);
1162	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1163				     INT_MIN, INT_MAX);
1164
1165	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1166				  d, NULL, &cl->bstats) < 0 ||
1167	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1168	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1169		return -1;
1170
1171	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1172}
1173
1174static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1175		     struct Qdisc **old, struct netlink_ext_ack *extack)
1176{
1177	struct htb_class *cl = (struct htb_class *)arg;
1178
1179	if (cl->level)
1180		return -EINVAL;
1181	if (new == NULL &&
1182	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1183				     cl->common.classid, extack)) == NULL)
1184		return -ENOBUFS;
1185
1186	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
 
 
 
 
 
 
 
1187	return 0;
1188}
1189
1190static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1191{
1192	struct htb_class *cl = (struct htb_class *)arg;
1193	return !cl->level ? cl->un.leaf.q : NULL;
1194}
1195
1196static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1197{
1198	struct htb_class *cl = (struct htb_class *)arg;
1199
1200	htb_deactivate(qdisc_priv(sch), cl);
 
 
 
 
 
 
 
 
 
1201}
1202
1203static inline int htb_parent_last_child(struct htb_class *cl)
1204{
1205	if (!cl->parent)
1206		/* the root class */
1207		return 0;
1208	if (cl->parent->children > 1)
1209		/* not the last child */
1210		return 0;
1211	return 1;
1212}
1213
1214static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1215			       struct Qdisc *new_q)
1216{
1217	struct htb_class *parent = cl->parent;
1218
1219	WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1220
1221	if (parent->cmode != HTB_CAN_SEND)
1222		htb_safe_rb_erase(&parent->pq_node,
1223				  &q->hlevel[parent->level].wait_pq);
1224
1225	parent->level = 0;
1226	memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1227	INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1228	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1229	parent->tokens = parent->buffer;
1230	parent->ctokens = parent->cbuffer;
1231	parent->t_c = ktime_get_ns();
1232	parent->cmode = HTB_CAN_SEND;
1233}
1234
1235static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1236{
1237	if (!cl->level) {
1238		WARN_ON(!cl->un.leaf.q);
1239		qdisc_destroy(cl->un.leaf.q);
1240	}
1241	gen_kill_estimator(&cl->rate_est);
1242	tcf_block_put(cl->block);
 
 
 
1243	kfree(cl);
1244}
1245
1246static void htb_destroy(struct Qdisc *sch)
1247{
1248	struct htb_sched *q = qdisc_priv(sch);
1249	struct hlist_node *next;
1250	struct htb_class *cl;
1251	unsigned int i;
1252
1253	cancel_work_sync(&q->work);
1254	qdisc_watchdog_cancel(&q->watchdog);
1255	/* This line used to be after htb_destroy_class call below
1256	 * and surprisingly it worked in 2.4. But it must precede it
1257	 * because filter need its target class alive to be able to call
1258	 * unbind_filter on it (without Oops).
1259	 */
1260	tcf_block_put(q->block);
1261
1262	for (i = 0; i < q->clhash.hashsize; i++) {
1263		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1264			tcf_block_put(cl->block);
1265			cl->block = NULL;
1266		}
1267	}
1268	for (i = 0; i < q->clhash.hashsize; i++) {
1269		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1270					  common.hnode)
1271			htb_destroy_class(sch, cl);
1272	}
1273	qdisc_class_hash_destroy(&q->clhash);
1274	__qdisc_reset_queue(&q->direct_queue);
1275}
1276
1277static int htb_delete(struct Qdisc *sch, unsigned long arg)
1278{
1279	struct htb_sched *q = qdisc_priv(sch);
1280	struct htb_class *cl = (struct htb_class *)arg;
 
1281	struct Qdisc *new_q = NULL;
1282	int last_child = 0;
1283
1284	/* TODO: why don't allow to delete subtree ? references ? does
1285	 * tc subsys guarantee us that in htb_destroy it holds no class
1286	 * refs so that we can remove children safely there ?
1287	 */
1288	if (cl->children || cl->filter_cnt)
1289		return -EBUSY;
1290
1291	if (!cl->level && htb_parent_last_child(cl)) {
1292		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1293					  cl->parent->common.classid,
1294					  NULL);
1295		last_child = 1;
1296	}
1297
1298	sch_tree_lock(sch);
1299
1300	if (!cl->level) {
1301		unsigned int qlen = cl->un.leaf.q->q.qlen;
1302		unsigned int backlog = cl->un.leaf.q->qstats.backlog;
1303
1304		qdisc_reset(cl->un.leaf.q);
1305		qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
1306	}
1307
1308	/* delete from hash and active; remainder in destroy_class */
1309	qdisc_class_hash_remove(&q->clhash, &cl->common);
1310	if (cl->parent)
1311		cl->parent->children--;
1312
1313	if (cl->prio_activity)
1314		htb_deactivate(q, cl);
1315
1316	if (cl->cmode != HTB_CAN_SEND)
1317		htb_safe_rb_erase(&cl->pq_node,
1318				  &q->hlevel[cl->level].wait_pq);
1319
1320	if (last_child)
1321		htb_parent_to_leaf(q, cl, new_q);
1322
 
 
 
 
 
 
1323	sch_tree_unlock(sch);
 
 
 
 
 
 
1324
1325	htb_destroy_class(sch, cl);
1326	return 0;
1327}
1328
1329static int htb_change_class(struct Qdisc *sch, u32 classid,
1330			    u32 parentid, struct nlattr **tca,
1331			    unsigned long *arg, struct netlink_ext_ack *extack)
1332{
1333	int err = -EINVAL;
1334	struct htb_sched *q = qdisc_priv(sch);
1335	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1336	struct nlattr *opt = tca[TCA_OPTIONS];
1337	struct nlattr *tb[TCA_HTB_MAX + 1];
 
1338	struct tc_htb_opt *hopt;
1339	u64 rate64, ceil64;
1340	int warn = 0;
1341
1342	/* extract all subattrs from opt attr */
1343	if (!opt)
1344		goto failure;
1345
1346	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1347	if (err < 0)
1348		goto failure;
1349
1350	err = -EINVAL;
1351	if (tb[TCA_HTB_PARMS] == NULL)
1352		goto failure;
1353
1354	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1355
1356	hopt = nla_data(tb[TCA_HTB_PARMS]);
1357	if (!hopt->rate.rate || !hopt->ceil.rate)
 
 
 
1358		goto failure;
1359
1360	/* Keeping backward compatible with rate_table based iproute2 tc */
1361	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1362		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1363					      NULL));
1364
1365	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1366		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1367					      NULL));
1368
1369	if (!cl) {		/* new class */
1370		struct Qdisc *new_q;
1371		int prio;
1372		struct {
1373			struct nlattr		nla;
1374			struct gnet_estimator	opt;
1375		} est = {
1376			.nla = {
1377				.nla_len	= nla_attr_size(sizeof(est.opt)),
1378				.nla_type	= TCA_RATE,
1379			},
1380			.opt = {
1381				/* 4s interval, 16s averaging constant */
1382				.interval	= 2,
1383				.ewma_log	= 2,
1384			},
1385		};
1386
1387		/* check for valid classid */
1388		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1389		    htb_find(classid, sch))
1390			goto failure;
1391
1392		/* check maximal depth */
1393		if (parent && parent->parent && parent->parent->level < 2) {
1394			pr_err("htb: tree is too deep\n");
1395			goto failure;
1396		}
1397		err = -ENOBUFS;
1398		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1399		if (!cl)
1400			goto failure;
1401
1402		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
 
 
1403		if (err) {
1404			kfree(cl);
1405			goto failure;
1406		}
1407		if (htb_rate_est || tca[TCA_RATE]) {
1408			err = gen_new_estimator(&cl->bstats, NULL,
1409						&cl->rate_est,
1410						NULL,
1411						qdisc_root_sleeping_running(sch),
1412						tca[TCA_RATE] ? : &est.nla);
1413			if (err) {
1414				tcf_block_put(cl->block);
1415				kfree(cl);
1416				goto failure;
1417			}
1418		}
1419
 
1420		cl->children = 0;
1421		INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1422		RB_CLEAR_NODE(&cl->pq_node);
1423
1424		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1425			RB_CLEAR_NODE(&cl->node[prio]);
1426
1427		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1428		 * so that can't be used inside of sch_tree_lock
1429		 * -- thanks to Karlis Peisenieks
1430		 */
1431		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1432					  classid, NULL);
1433		sch_tree_lock(sch);
1434		if (parent && !parent->level) {
1435			unsigned int qlen = parent->un.leaf.q->q.qlen;
1436			unsigned int backlog = parent->un.leaf.q->qstats.backlog;
1437
1438			/* turn parent into inner node */
1439			qdisc_reset(parent->un.leaf.q);
1440			qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
1441			qdisc_destroy(parent->un.leaf.q);
1442			if (parent->prio_activity)
1443				htb_deactivate(q, parent);
1444
1445			/* remove from evt list because of level change */
1446			if (parent->cmode != HTB_CAN_SEND) {
1447				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1448				parent->cmode = HTB_CAN_SEND;
1449			}
1450			parent->level = (parent->parent ? parent->parent->level
1451					 : TC_HTB_MAXDEPTH) - 1;
1452			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1453		}
1454		/* leaf (we) needs elementary qdisc */
1455		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1456
1457		cl->common.classid = classid;
1458		cl->parent = parent;
1459
1460		/* set class to be in HTB_CAN_SEND state */
1461		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1462		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1463		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1464		cl->t_c = ktime_get_ns();
1465		cl->cmode = HTB_CAN_SEND;
1466
1467		/* attach to the hash list and parent's family */
1468		qdisc_class_hash_insert(&q->clhash, &cl->common);
1469		if (parent)
1470			parent->children++;
1471		if (cl->un.leaf.q != &noop_qdisc)
1472			qdisc_hash_add(cl->un.leaf.q, true);
1473	} else {
1474		if (tca[TCA_RATE]) {
1475			err = gen_replace_estimator(&cl->bstats, NULL,
1476						    &cl->rate_est,
1477						    NULL,
1478						    qdisc_root_sleeping_running(sch),
1479						    tca[TCA_RATE]);
1480			if (err)
1481				return err;
1482		}
1483		sch_tree_lock(sch);
1484	}
1485
1486	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1487
1488	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1489
1490	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1491	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1492
1493	/* it used to be a nasty bug here, we have to check that node
1494	 * is really leaf before changing cl->un.leaf !
1495	 */
1496	if (!cl->level) {
1497		u64 quantum = cl->rate.rate_bytes_ps;
1498
1499		do_div(quantum, q->rate2quantum);
1500		cl->quantum = min_t(u64, quantum, INT_MAX);
1501
1502		if (!hopt->quantum && cl->quantum < 1000) {
1503			warn = -1;
 
 
1504			cl->quantum = 1000;
1505		}
1506		if (!hopt->quantum && cl->quantum > 200000) {
1507			warn = 1;
 
 
1508			cl->quantum = 200000;
1509		}
1510		if (hopt->quantum)
1511			cl->quantum = hopt->quantum;
1512		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1513			cl->prio = TC_HTB_NUMPRIO - 1;
1514	}
1515
1516	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1517	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1518
 
 
 
 
 
1519	sch_tree_unlock(sch);
1520
1521	if (warn)
1522		pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1523			    cl->common.classid, (warn == -1 ? "small" : "big"));
1524
1525	qdisc_class_hash_grow(sch, &q->clhash);
1526
1527	*arg = (unsigned long)cl;
1528	return 0;
1529
1530failure:
 
 
 
 
1531	return err;
1532}
1533
1534static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
1535				       struct netlink_ext_ack *extack)
1536{
1537	struct htb_sched *q = qdisc_priv(sch);
1538	struct htb_class *cl = (struct htb_class *)arg;
 
1539
1540	return cl ? cl->block : q->block;
1541}
1542
1543static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1544				     u32 classid)
1545{
1546	struct htb_class *cl = htb_find(classid, sch);
1547
1548	/*if (cl && !cl->level) return 0;
1549	 * The line above used to be there to prevent attaching filters to
1550	 * leaves. But at least tc_index filter uses this just to get class
1551	 * for other reasons so that we have to allow for it.
1552	 * ----
1553	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1554	 * another way to "lock" the class - unlike "get" this lock can
1555	 * be broken by class during destroy IIUC.
1556	 */
1557	if (cl)
1558		cl->filter_cnt++;
1559	return (unsigned long)cl;
1560}
1561
1562static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1563{
1564	struct htb_class *cl = (struct htb_class *)arg;
1565
1566	if (cl)
1567		cl->filter_cnt--;
1568}
1569
1570static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1571{
1572	struct htb_sched *q = qdisc_priv(sch);
1573	struct htb_class *cl;
 
1574	unsigned int i;
1575
1576	if (arg->stop)
1577		return;
1578
1579	for (i = 0; i < q->clhash.hashsize; i++) {
1580		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1581			if (arg->count < arg->skip) {
1582				arg->count++;
1583				continue;
1584			}
1585			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1586				arg->stop = 1;
1587				return;
1588			}
1589			arg->count++;
1590		}
1591	}
1592}
1593
1594static const struct Qdisc_class_ops htb_class_ops = {
1595	.graft		=	htb_graft,
1596	.leaf		=	htb_leaf,
1597	.qlen_notify	=	htb_qlen_notify,
1598	.find		=	htb_search,
 
1599	.change		=	htb_change_class,
1600	.delete		=	htb_delete,
1601	.walk		=	htb_walk,
1602	.tcf_block	=	htb_tcf_block,
1603	.bind_tcf	=	htb_bind_filter,
1604	.unbind_tcf	=	htb_unbind_filter,
1605	.dump		=	htb_dump_class,
1606	.dump_stats	=	htb_dump_class_stats,
1607};
1608
1609static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1610	.cl_ops		=	&htb_class_ops,
1611	.id		=	"htb",
1612	.priv_size	=	sizeof(struct htb_sched),
1613	.enqueue	=	htb_enqueue,
1614	.dequeue	=	htb_dequeue,
1615	.peek		=	qdisc_peek_dequeued,
 
1616	.init		=	htb_init,
1617	.reset		=	htb_reset,
1618	.destroy	=	htb_destroy,
1619	.dump		=	htb_dump,
1620	.owner		=	THIS_MODULE,
1621};
1622
1623static int __init htb_module_init(void)
1624{
1625	return register_qdisc(&htb_qdisc_ops);
1626}
1627static void __exit htb_module_exit(void)
1628{
1629	unregister_qdisc(&htb_qdisc_ops);
1630}
1631
1632module_init(htb_module_init)
1633module_exit(htb_module_exit)
1634MODULE_LICENSE("GPL");