Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Martin Devera, <devik@cdi.cz>
  10 *
  11 * Credits (in time order) for older HTB versions:
  12 *              Stef Coene <stef.coene@docum.org>
  13 *			HTB support at LARTC mailing list
  14 *		Ondrej Kraus, <krauso@barr.cz>
  15 *			found missing INIT_QDISC(htb)
  16 *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
  17 *			helped a lot to locate nasty class stall bug
  18 *		Andi Kleen, Jamal Hadi, Bert Hubert
  19 *			code review and helpful comments on shaping
  20 *		Tomasz Wrona, <tw@eter.tym.pl>
  21 *			created test case so that I was able to fix nasty bug
  22 *		Wilfried Weissmann
  23 *			spotted bug in dequeue code and helped with fix
  24 *		Jiri Fojtasek
  25 *			fixed requeue routine
  26 *		and many others. thanks.
  27 */
  28#include <linux/module.h>
  29#include <linux/moduleparam.h>
  30#include <linux/types.h>
  31#include <linux/kernel.h>
  32#include <linux/string.h>
  33#include <linux/errno.h>
  34#include <linux/skbuff.h>
  35#include <linux/list.h>
  36#include <linux/compiler.h>
  37#include <linux/rbtree.h>
  38#include <linux/workqueue.h>
  39#include <linux/slab.h>
  40#include <net/netlink.h>
 
  41#include <net/pkt_sched.h>
 
  42
  43/* HTB algorithm.
  44    Author: devik@cdi.cz
  45    ========================================================================
  46    HTB is like TBF with multiple classes. It is also similar to CBQ because
  47    it allows to assign priority to each class in hierarchy.
  48    In fact it is another implementation of Floyd's formal sharing.
  49
  50    Levels:
  51    Each class is assigned level. Leaf has ALWAYS level 0 and root
  52    classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
  53    one less than their parent.
  54*/
  55
  56static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
  57#define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
  58
  59#if HTB_VER >> 16 != TC_HTB_PROTOVER
  60#error "Mismatched sch_htb.c and pkt_sch.h"
  61#endif
  62
  63/* Module parameter and sysfs export */
  64module_param    (htb_hysteresis, int, 0640);
  65MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
  66
 
 
 
 
  67/* used internaly to keep status of single class */
  68enum htb_cmode {
  69	HTB_CANT_SEND,		/* class can't send and can't borrow */
  70	HTB_MAY_BORROW,		/* class can't send but may borrow */
  71	HTB_CAN_SEND		/* class can send */
  72};
  73
  74/* interior & leaf nodes; props specific to leaves are marked L: */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  75struct htb_class {
  76	struct Qdisc_class_common common;
  77	/* general class parameters */
  78	struct gnet_stats_basic_packed bstats;
  79	struct gnet_stats_queue qstats;
  80	struct gnet_stats_rate_est rate_est;
  81	struct tc_htb_xstats xstats;	/* our special stats */
  82	int refcnt;		/* usage count of this class */
  83
  84	/* topology */
  85	int level;		/* our level (see above) */
  86	unsigned int children;
  87	struct htb_class *parent;	/* parent class */
 
 
 
 
 
 
 
 
 
 
 
  88
  89	int prio;		/* these two are used only by leaves... */
  90	int quantum;		/* but stored for parent-to-leaf return */
 
  91
  92	union {
  93		struct htb_class_leaf {
  94			struct Qdisc *q;
  95			int deficit[TC_HTB_MAXDEPTH];
  96			struct list_head drop_list;
  97		} leaf;
  98		struct htb_class_inner {
  99			struct rb_root feed[TC_HTB_NUMPRIO];	/* feed trees */
 100			struct rb_node *ptr[TC_HTB_NUMPRIO];	/* current class ptr */
 101			/* When class changes from state 1->2 and disconnects from
 102			 * parent's feed then we lost ptr value and start from the
 103			 * first child again. Here we store classid of the
 104			 * last valid ptr (used when ptr is NULL).
 105			 */
 106			u32 last_ptr_id[TC_HTB_NUMPRIO];
 107		} inner;
 108	} un;
 109	struct rb_node node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
 110	struct rb_node pq_node;	/* node for event queue */
 111	psched_time_t pq_key;
 112
 113	int prio_activity;	/* for which prios are we active */
 114	enum htb_cmode cmode;	/* current mode of the class */
 115
 116	/* class attached filters */
 117	struct tcf_proto *filter_list;
 118	int filter_cnt;
 119
 120	/* token bucket parameters */
 121	struct qdisc_rate_table *rate;	/* rate table of the class itself */
 122	struct qdisc_rate_table *ceil;	/* ceiling rate (limits borrows too) */
 123	long buffer, cbuffer;	/* token bucket depth/rate */
 124	psched_tdiff_t mbuffer;	/* max wait time */
 125	long tokens, ctokens;	/* current number of tokens */
 126	psched_time_t t_c;	/* checkpoint time */
 
 
 
 
 
 127};
 128
 129struct htb_sched {
 130	struct Qdisc_class_hash clhash;
 131	struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
 
 132
 133	/* self list - roots of self generating tree */
 134	struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 135	int row_mask[TC_HTB_MAXDEPTH];
 136	struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 137	u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 138
 139	/* self wait list - roots of wait PQs per row */
 140	struct rb_root wait_pq[TC_HTB_MAXDEPTH];
 
 
 141
 142	/* time of nearest event per level (row) */
 143	psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
 
 
 144
 145	int defcls;		/* class where unclassified flows go to */
 146
 147	/* filters for qdisc itself */
 148	struct tcf_proto *filter_list;
 149
 150	int rate2quantum;	/* quant = rate / rate2quantum */
 151	psched_time_t now;	/* cached dequeue time */
 152	struct qdisc_watchdog watchdog;
 153
 154	/* non shaped skbs; let them go directly thru */
 155	struct sk_buff_head direct_queue;
 156	int direct_qlen;	/* max qlen of above */
 157
 158	long direct_pkts;
 159
 160#define HTB_WARN_TOOMANYEVENTS	0x1
 161	unsigned int warned;	/* only one warning */
 162	struct work_struct work;
 
 163};
 164
 165/* find class in global hash table using given handle */
 166static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 167{
 168	struct htb_sched *q = qdisc_priv(sch);
 169	struct Qdisc_class_common *clc;
 170
 171	clc = qdisc_class_find(&q->clhash, handle);
 172	if (clc == NULL)
 173		return NULL;
 174	return container_of(clc, struct htb_class, common);
 175}
 176
 
 
 
 
 
 
 
 177/**
 178 * htb_classify - classify a packet into class
 
 
 
 179 *
 180 * It returns NULL if the packet should be dropped or -1 if the packet
 181 * should be passed directly thru. In all other cases leaf class is returned.
 182 * We allow direct class selection by classid in priority. The we examine
 183 * filters in qdisc and in inner nodes (if higher filter points to the inner
 184 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
 185 * internal fifo (direct). These packets then go directly thru. If we still
 186 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
 187 * then finish and return direct queue.
 188 */
 189#define HTB_DIRECT ((struct htb_class *)-1L)
 190
 191static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
 192				      int *qerr)
 193{
 194	struct htb_sched *q = qdisc_priv(sch);
 195	struct htb_class *cl;
 196	struct tcf_result res;
 197	struct tcf_proto *tcf;
 198	int result;
 199
 200	/* allow to select class by setting skb->priority to valid classid;
 201	 * note that nfmark can be used too by attaching filter fw with no
 202	 * rules in it
 203	 */
 204	if (skb->priority == sch->handle)
 205		return HTB_DIRECT;	/* X:0 (direct flow) selected */
 206	cl = htb_find(skb->priority, sch);
 207	if (cl && cl->level == 0)
 208		return cl;
 
 
 
 
 
 
 209
 210	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 211	tcf = q->filter_list;
 212	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
 213#ifdef CONFIG_NET_CLS_ACT
 214		switch (result) {
 215		case TC_ACT_QUEUED:
 216		case TC_ACT_STOLEN:
 
 217			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
 218		case TC_ACT_SHOT:
 219			return NULL;
 220		}
 221#endif
 222		cl = (void *)res.class;
 223		if (!cl) {
 224			if (res.classid == sch->handle)
 225				return HTB_DIRECT;	/* X:0 (direct flow) */
 226			cl = htb_find(res.classid, sch);
 227			if (!cl)
 228				break;	/* filter selected invalid classid */
 229		}
 230		if (!cl->level)
 231			return cl;	/* we hit leaf; return it */
 232
 233		/* we have got inner class; apply inner filter chain */
 234		tcf = cl->filter_list;
 235	}
 236	/* classification failed; try to use default class */
 237	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
 238	if (!cl || cl->level)
 239		return HTB_DIRECT;	/* bad default .. this is safe bet */
 240	return cl;
 241}
 242
 243/**
 244 * htb_add_to_id_tree - adds class to the round robin list
 
 
 
 245 *
 246 * Routine adds class to the list (actually tree) sorted by classid.
 247 * Make sure that class is not already on such list for given prio.
 248 */
 249static void htb_add_to_id_tree(struct rb_root *root,
 250			       struct htb_class *cl, int prio)
 251{
 252	struct rb_node **p = &root->rb_node, *parent = NULL;
 253
 254	while (*p) {
 255		struct htb_class *c;
 256		parent = *p;
 257		c = rb_entry(parent, struct htb_class, node[prio]);
 258
 259		if (cl->common.classid > c->common.classid)
 260			p = &parent->rb_right;
 261		else
 262			p = &parent->rb_left;
 263	}
 264	rb_link_node(&cl->node[prio], parent, p);
 265	rb_insert_color(&cl->node[prio], root);
 266}
 267
 268/**
 269 * htb_add_to_wait_tree - adds class to the event queue with delay
 
 
 
 270 *
 271 * The class is added to priority event queue to indicate that class will
 272 * change its mode in cl->pq_key microseconds. Make sure that class is not
 273 * already in the queue.
 274 */
 275static void htb_add_to_wait_tree(struct htb_sched *q,
 276				 struct htb_class *cl, long delay)
 277{
 278	struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
 279
 280	cl->pq_key = q->now + delay;
 281	if (cl->pq_key == q->now)
 282		cl->pq_key++;
 283
 284	/* update the nearest event cache */
 285	if (q->near_ev_cache[cl->level] > cl->pq_key)
 286		q->near_ev_cache[cl->level] = cl->pq_key;
 287
 288	while (*p) {
 289		struct htb_class *c;
 290		parent = *p;
 291		c = rb_entry(parent, struct htb_class, pq_node);
 292		if (cl->pq_key >= c->pq_key)
 293			p = &parent->rb_right;
 294		else
 295			p = &parent->rb_left;
 296	}
 297	rb_link_node(&cl->pq_node, parent, p);
 298	rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
 299}
 300
 301/**
 302 * htb_next_rb_node - finds next node in binary tree
 
 303 *
 304 * When we are past last key we return NULL.
 305 * Average complexity is 2 steps per call.
 306 */
 307static inline void htb_next_rb_node(struct rb_node **n)
 308{
 309	*n = rb_next(*n);
 310}
 311
 312/**
 313 * htb_add_class_to_row - add class to its row
 
 
 
 314 *
 315 * The class is added to row at priorities marked in mask.
 316 * It does nothing if mask == 0.
 317 */
 318static inline void htb_add_class_to_row(struct htb_sched *q,
 319					struct htb_class *cl, int mask)
 320{
 321	q->row_mask[cl->level] |= mask;
 322	while (mask) {
 323		int prio = ffz(~mask);
 324		mask &= ~(1 << prio);
 325		htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
 326	}
 327}
 328
 329/* If this triggers, it is a bug in this code, but it need not be fatal */
 330static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
 331{
 332	if (RB_EMPTY_NODE(rb)) {
 333		WARN_ON(1);
 334	} else {
 335		rb_erase(rb, root);
 336		RB_CLEAR_NODE(rb);
 337	}
 338}
 339
 340
 341/**
 342 * htb_remove_class_from_row - removes class from its row
 
 
 
 343 *
 344 * The class is removed from row at priorities marked in mask.
 345 * It does nothing if mask == 0.
 346 */
 347static inline void htb_remove_class_from_row(struct htb_sched *q,
 348						 struct htb_class *cl, int mask)
 349{
 350	int m = 0;
 
 351
 352	while (mask) {
 353		int prio = ffz(~mask);
 
 354
 355		mask &= ~(1 << prio);
 356		if (q->ptr[cl->level][prio] == cl->node + prio)
 357			htb_next_rb_node(q->ptr[cl->level] + prio);
 358
 359		htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
 360		if (!q->row[cl->level][prio].rb_node)
 361			m |= 1 << prio;
 362	}
 363	q->row_mask[cl->level] &= ~m;
 364}
 365
 366/**
 367 * htb_activate_prios - creates active classe's feed chain
 
 
 368 *
 369 * The class is connected to ancestors and/or appropriate rows
 370 * for priorities it is participating on. cl->cmode must be new
 371 * (activated) mode. It does nothing if cl->prio_activity == 0.
 372 */
 373static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 374{
 375	struct htb_class *p = cl->parent;
 376	long m, mask = cl->prio_activity;
 377
 378	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 379		m = mask;
 380		while (m) {
 381			int prio = ffz(~m);
 
 
 
 382			m &= ~(1 << prio);
 383
 384			if (p->un.inner.feed[prio].rb_node)
 385				/* parent already has its feed in use so that
 386				 * reset bit in mask as parent is already ok
 387				 */
 388				mask &= ~(1 << prio);
 389
 390			htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
 391		}
 392		p->prio_activity |= mask;
 393		cl = p;
 394		p = cl->parent;
 395
 396	}
 397	if (cl->cmode == HTB_CAN_SEND && mask)
 398		htb_add_class_to_row(q, cl, mask);
 399}
 400
 401/**
 402 * htb_deactivate_prios - remove class from feed chain
 
 
 403 *
 404 * cl->cmode must represent old mode (before deactivation). It does
 405 * nothing if cl->prio_activity == 0. Class is removed from all feed
 406 * chains and rows.
 407 */
 408static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 409{
 410	struct htb_class *p = cl->parent;
 411	long m, mask = cl->prio_activity;
 412
 413	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 414		m = mask;
 415		mask = 0;
 416		while (m) {
 417			int prio = ffz(~m);
 418			m &= ~(1 << prio);
 419
 420			if (p->un.inner.ptr[prio] == cl->node + prio) {
 421				/* we are removing child which is pointed to from
 422				 * parent feed - forget the pointer but remember
 423				 * classid
 424				 */
 425				p->un.inner.last_ptr_id[prio] = cl->common.classid;
 426				p->un.inner.ptr[prio] = NULL;
 427			}
 428
 429			htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
 
 430
 431			if (!p->un.inner.feed[prio].rb_node)
 432				mask |= 1 << prio;
 433		}
 434
 435		p->prio_activity &= ~mask;
 436		cl = p;
 437		p = cl->parent;
 438
 439	}
 440	if (cl->cmode == HTB_CAN_SEND && mask)
 441		htb_remove_class_from_row(q, cl, mask);
 442}
 443
 444static inline long htb_lowater(const struct htb_class *cl)
 445{
 446	if (htb_hysteresis)
 447		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
 448	else
 449		return 0;
 450}
 451static inline long htb_hiwater(const struct htb_class *cl)
 452{
 453	if (htb_hysteresis)
 454		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
 455	else
 456		return 0;
 457}
 458
 459
 460/**
 461 * htb_class_mode - computes and returns current class mode
 
 
 462 *
 463 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
 464 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
 465 * from now to time when cl will change its state.
 466 * Also it is worth to note that class mode doesn't change simply
 467 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
 468 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
 469 * mode transitions per time unit. The speed gain is about 1/6.
 470 */
 471static inline enum htb_cmode
 472htb_class_mode(struct htb_class *cl, long *diff)
 473{
 474	long toks;
 475
 476	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
 477		*diff = -toks;
 478		return HTB_CANT_SEND;
 479	}
 480
 481	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
 482		return HTB_CAN_SEND;
 483
 484	*diff = -toks;
 485	return HTB_MAY_BORROW;
 486}
 487
 488/**
 489 * htb_change_class_mode - changes classe's mode
 
 
 
 490 *
 491 * This should be the only way how to change classe's mode under normal
 492 * cirsumstances. Routine will update feed lists linkage, change mode
 493 * and add class to the wait event queue if appropriate. New mode should
 494 * be different from old one and cl->pq_key has to be valid if changing
 495 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
 496 */
 497static void
 498htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
 499{
 500	enum htb_cmode new_mode = htb_class_mode(cl, diff);
 501
 502	if (new_mode == cl->cmode)
 503		return;
 504
 
 
 
 
 
 505	if (cl->prio_activity) {	/* not necessary: speed optimization */
 506		if (cl->cmode != HTB_CANT_SEND)
 507			htb_deactivate_prios(q, cl);
 508		cl->cmode = new_mode;
 509		if (new_mode != HTB_CANT_SEND)
 510			htb_activate_prios(q, cl);
 511	} else
 512		cl->cmode = new_mode;
 513}
 514
 515/**
 516 * htb_activate - inserts leaf cl into appropriate active feeds
 
 
 517 *
 518 * Routine learns (new) priority of leaf and activates feed chain
 519 * for the prio. It can be called on already active leaf safely.
 520 * It also adds leaf into droplist.
 521 */
 522static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
 523{
 524	WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
 525
 526	if (!cl->prio_activity) {
 527		cl->prio_activity = 1 << cl->prio;
 528		htb_activate_prios(q, cl);
 529		list_add_tail(&cl->un.leaf.drop_list,
 530			      q->drops + cl->prio);
 531	}
 532}
 533
 534/**
 535 * htb_deactivate - remove leaf cl from active feeds
 
 
 536 *
 537 * Make sure that leaf is active. In the other words it can't be called
 538 * with non-active leaf. It also removes class from the drop list.
 539 */
 540static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 541{
 542	WARN_ON(!cl->prio_activity);
 543
 544	htb_deactivate_prios(q, cl);
 545	cl->prio_activity = 0;
 546	list_del_init(&cl->un.leaf.drop_list);
 547}
 548
 549static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 550{
 551	int uninitialized_var(ret);
 
 552	struct htb_sched *q = qdisc_priv(sch);
 553	struct htb_class *cl = htb_classify(skb, sch, &ret);
 554
 555	if (cl == HTB_DIRECT) {
 556		/* enqueue to helper queue */
 557		if (q->direct_queue.qlen < q->direct_qlen) {
 558			__skb_queue_tail(&q->direct_queue, skb);
 559			q->direct_pkts++;
 560		} else {
 561			return qdisc_drop(skb, sch);
 562		}
 563#ifdef CONFIG_NET_CLS_ACT
 564	} else if (!cl) {
 565		if (ret & __NET_XMIT_BYPASS)
 566			sch->qstats.drops++;
 567		kfree_skb(skb);
 568		return ret;
 569#endif
 570	} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
 
 571		if (net_xmit_drop_count(ret)) {
 572			sch->qstats.drops++;
 573			cl->qstats.drops++;
 574		}
 575		return ret;
 576	} else {
 577		htb_activate(q, cl);
 578	}
 579
 
 580	sch->q.qlen++;
 581	return NET_XMIT_SUCCESS;
 582}
 583
 584static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
 585{
 586	long toks = diff + cl->tokens;
 587
 588	if (toks > cl->buffer)
 589		toks = cl->buffer;
 590	toks -= (long) qdisc_l2t(cl->rate, bytes);
 591	if (toks <= -cl->mbuffer)
 592		toks = 1 - cl->mbuffer;
 593
 594	cl->tokens = toks;
 595}
 596
 597static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
 598{
 599	long toks = diff + cl->ctokens;
 600
 601	if (toks > cl->cbuffer)
 602		toks = cl->cbuffer;
 603	toks -= (long) qdisc_l2t(cl->ceil, bytes);
 604	if (toks <= -cl->mbuffer)
 605		toks = 1 - cl->mbuffer;
 606
 607	cl->ctokens = toks;
 608}
 609
 610/**
 611 * htb_charge_class - charges amount "bytes" to leaf and ancestors
 
 
 
 
 612 *
 613 * Routine assumes that packet "bytes" long was dequeued from leaf cl
 614 * borrowing from "level". It accounts bytes to ceil leaky bucket for
 615 * leaf and all ancestors and to rate bucket for ancestors at levels
 616 * "level" and higher. It also handles possible change of mode resulting
 617 * from the update. Note that mode can also increase here (MAY_BORROW to
 618 * CAN_SEND) because we can use more precise clock that event queue here.
 619 * In such case we remove class from event queue first.
 620 */
 621static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
 622			     int level, struct sk_buff *skb)
 623{
 624	int bytes = qdisc_pkt_len(skb);
 625	enum htb_cmode old_mode;
 626	long diff;
 627
 628	while (cl) {
 629		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
 630		if (cl->level >= level) {
 631			if (cl->level == level)
 632				cl->xstats.lends++;
 633			htb_accnt_tokens(cl, bytes, diff);
 634		} else {
 635			cl->xstats.borrows++;
 636			cl->tokens += diff;	/* we moved t_c; update tokens */
 637		}
 638		htb_accnt_ctokens(cl, bytes, diff);
 639		cl->t_c = q->now;
 640
 641		old_mode = cl->cmode;
 642		diff = 0;
 643		htb_change_class_mode(q, cl, &diff);
 644		if (old_mode != cl->cmode) {
 645			if (old_mode != HTB_CAN_SEND)
 646				htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
 647			if (cl->cmode != HTB_CAN_SEND)
 648				htb_add_to_wait_tree(q, cl, diff);
 649		}
 650
 651		/* update basic stats except for leaves which are already updated */
 652		if (cl->level)
 653			bstats_update(&cl->bstats, skb);
 654
 655		cl = cl->parent;
 656	}
 657}
 658
 659/**
 660 * htb_do_events - make mode changes to classes at the level
 
 
 
 661 *
 662 * Scans event queue for pending events and applies them. Returns time of
 663 * next pending event (0 for no event in pq, q->now for too many events).
 664 * Note: Applied are events whose have cl->pq_key <= q->now.
 665 */
 666static psched_time_t htb_do_events(struct htb_sched *q, int level,
 667				   unsigned long start)
 668{
 669	/* don't run for longer than 2 jiffies; 2 is used instead of
 670	 * 1 to simplify things when jiffy is going to be incremented
 671	 * too soon
 672	 */
 673	unsigned long stop_at = start + 2;
 
 
 674	while (time_before(jiffies, stop_at)) {
 675		struct htb_class *cl;
 676		long diff;
 677		struct rb_node *p = rb_first(&q->wait_pq[level]);
 678
 679		if (!p)
 680			return 0;
 681
 682		cl = rb_entry(p, struct htb_class, pq_node);
 683		if (cl->pq_key > q->now)
 684			return cl->pq_key;
 685
 686		htb_safe_rb_erase(p, q->wait_pq + level);
 687		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
 688		htb_change_class_mode(q, cl, &diff);
 689		if (cl->cmode != HTB_CAN_SEND)
 690			htb_add_to_wait_tree(q, cl, diff);
 691	}
 692
 693	/* too much load - let's continue after a break for scheduling */
 694	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
 695		pr_warning("htb: too many events!\n");
 696		q->warned |= HTB_WARN_TOOMANYEVENTS;
 697	}
 698
 699	return q->now;
 700}
 701
 702/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
 703 * is no such one exists.
 704 */
 705static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
 706					      u32 id)
 707{
 708	struct rb_node *r = NULL;
 709	while (n) {
 710		struct htb_class *cl =
 711		    rb_entry(n, struct htb_class, node[prio]);
 712
 713		if (id > cl->common.classid) {
 714			n = n->rb_right;
 715		} else if (id < cl->common.classid) {
 716			r = n;
 717			n = n->rb_left;
 718		} else {
 719			return n;
 720		}
 721	}
 722	return r;
 723}
 724
 725/**
 726 * htb_lookup_leaf - returns next leaf class in DRR order
 
 
 727 *
 728 * Find leaf where current feed pointers points to.
 729 */
 730static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
 731					 struct rb_node **pptr, u32 * pid)
 732{
 733	int i;
 734	struct {
 735		struct rb_node *root;
 736		struct rb_node **pptr;
 737		u32 *pid;
 738	} stk[TC_HTB_MAXDEPTH], *sp = stk;
 739
 740	BUG_ON(!tree->rb_node);
 741	sp->root = tree->rb_node;
 742	sp->pptr = pptr;
 743	sp->pid = pid;
 744
 745	for (i = 0; i < 65535; i++) {
 746		if (!*sp->pptr && *sp->pid) {
 747			/* ptr was invalidated but id is valid - try to recover
 748			 * the original or next ptr
 749			 */
 750			*sp->pptr =
 751			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
 752		}
 753		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
 754				 * can become out of date quickly
 755				 */
 756		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
 757			*sp->pptr = sp->root;
 758			while ((*sp->pptr)->rb_left)
 759				*sp->pptr = (*sp->pptr)->rb_left;
 760			if (sp > stk) {
 761				sp--;
 762				if (!*sp->pptr) {
 763					WARN_ON(1);
 764					return NULL;
 765				}
 766				htb_next_rb_node(sp->pptr);
 767			}
 768		} else {
 769			struct htb_class *cl;
 
 
 770			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
 771			if (!cl->level)
 772				return cl;
 773			(++sp)->root = cl->un.inner.feed[prio].rb_node;
 774			sp->pptr = cl->un.inner.ptr + prio;
 775			sp->pid = cl->un.inner.last_ptr_id + prio;
 
 776		}
 777	}
 778	WARN_ON(1);
 779	return NULL;
 780}
 781
 782/* dequeues packet at given priority and level; call only if
 783 * you are sure that there is active class at prio/level
 784 */
 785static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
 786					int level)
 787{
 788	struct sk_buff *skb = NULL;
 789	struct htb_class *cl, *start;
 
 
 
 790	/* look initial class up in the row */
 791	start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
 792				     q->ptr[level] + prio,
 793				     q->last_ptr_id[level] + prio);
 794
 795	do {
 796next:
 797		if (unlikely(!cl))
 798			return NULL;
 799
 800		/* class can be empty - it is unlikely but can be true if leaf
 801		 * qdisc drops packets in enqueue routine or if someone used
 802		 * graft operation on the leaf since last dequeue;
 803		 * simply deactivate and skip such class
 804		 */
 805		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
 806			struct htb_class *next;
 807			htb_deactivate(q, cl);
 808
 809			/* row/level might become empty */
 810			if ((q->row_mask[level] & (1 << prio)) == 0)
 811				return NULL;
 812
 813			next = htb_lookup_leaf(q->row[level] + prio,
 814					       prio, q->ptr[level] + prio,
 815					       q->last_ptr_id[level] + prio);
 816
 817			if (cl == start)	/* fix start if we just deleted it */
 818				start = next;
 819			cl = next;
 820			goto next;
 821		}
 822
 823		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
 824		if (likely(skb != NULL))
 825			break;
 826
 827		qdisc_warn_nonwc("htb", cl->un.leaf.q);
 828		htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
 829				  ptr[0]) + prio);
 830		cl = htb_lookup_leaf(q->row[level] + prio, prio,
 831				     q->ptr[level] + prio,
 832				     q->last_ptr_id[level] + prio);
 833
 834	} while (cl != start);
 835
 836	if (likely(skb != NULL)) {
 837		bstats_update(&cl->bstats, skb);
 838		cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
 839		if (cl->un.leaf.deficit[level] < 0) {
 840			cl->un.leaf.deficit[level] += cl->quantum;
 841			htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
 842					  ptr[0]) + prio);
 843		}
 844		/* this used to be after charge_class but this constelation
 845		 * gives us slightly better performance
 846		 */
 847		if (!cl->un.leaf.q->q.qlen)
 848			htb_deactivate(q, cl);
 849		htb_charge_class(q, cl, level, skb);
 850	}
 851	return skb;
 852}
 853
 854static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 855{
 856	struct sk_buff *skb;
 857	struct htb_sched *q = qdisc_priv(sch);
 858	int level;
 859	psched_time_t next_event;
 860	unsigned long start_at;
 861
 862	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 863	skb = __skb_dequeue(&q->direct_queue);
 864	if (skb != NULL) {
 865ok:
 866		qdisc_bstats_update(sch, skb);
 867		qdisc_unthrottled(sch);
 868		sch->q.qlen--;
 869		return skb;
 870	}
 871
 872	if (!sch->q.qlen)
 873		goto fin;
 874	q->now = psched_get_time();
 875	start_at = jiffies;
 876
 877	next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
 878
 879	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
 880		/* common case optimization - skip event handler quickly */
 881		int m;
 882		psched_time_t event;
 883
 884		if (q->now >= q->near_ev_cache[level]) {
 885			event = htb_do_events(q, level, start_at);
 886			if (!event)
 887				event = q->now + PSCHED_TICKS_PER_SEC;
 888			q->near_ev_cache[level] = event;
 889		} else
 890			event = q->near_ev_cache[level];
 891
 892		if (next_event > event)
 893			next_event = event;
 894
 895		m = ~q->row_mask[level];
 896		while (m != (int)(-1)) {
 897			int prio = ffz(m);
 898
 899			m |= 1 << prio;
 900			skb = htb_dequeue_tree(q, prio, level);
 901			if (likely(skb != NULL))
 902				goto ok;
 903		}
 904	}
 905	sch->qstats.overlimits++;
 906	if (likely(next_event > q->now))
 907		qdisc_watchdog_schedule(&q->watchdog, next_event);
 908	else
 909		schedule_work(&q->work);
 910fin:
 911	return skb;
 912}
 913
 914/* try to drop from each class (by prio) until one succeed */
 915static unsigned int htb_drop(struct Qdisc *sch)
 916{
 917	struct htb_sched *q = qdisc_priv(sch);
 918	int prio;
 919
 920	for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
 921		struct list_head *p;
 922		list_for_each(p, q->drops + prio) {
 923			struct htb_class *cl = list_entry(p, struct htb_class,
 924							  un.leaf.drop_list);
 925			unsigned int len;
 926			if (cl->un.leaf.q->ops->drop &&
 927			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
 928				sch->q.qlen--;
 929				if (!cl->un.leaf.q->q.qlen)
 930					htb_deactivate(q, cl);
 931				return len;
 932			}
 933		}
 934	}
 935	return 0;
 936}
 937
 938/* reset all classes */
 939/* always caled under BH & queue lock */
 940static void htb_reset(struct Qdisc *sch)
 941{
 942	struct htb_sched *q = qdisc_priv(sch);
 943	struct htb_class *cl;
 944	struct hlist_node *n;
 945	unsigned int i;
 946
 947	for (i = 0; i < q->clhash.hashsize; i++) {
 948		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
 949			if (cl->level)
 950				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
 951			else {
 952				if (cl->un.leaf.q)
 953					qdisc_reset(cl->un.leaf.q);
 954				INIT_LIST_HEAD(&cl->un.leaf.drop_list);
 955			}
 956			cl->prio_activity = 0;
 957			cl->cmode = HTB_CAN_SEND;
 958
 959		}
 960	}
 961	qdisc_watchdog_cancel(&q->watchdog);
 962	__skb_queue_purge(&q->direct_queue);
 963	sch->q.qlen = 0;
 964	memset(q->row, 0, sizeof(q->row));
 965	memset(q->row_mask, 0, sizeof(q->row_mask));
 966	memset(q->wait_pq, 0, sizeof(q->wait_pq));
 967	memset(q->ptr, 0, sizeof(q->ptr));
 968	for (i = 0; i < TC_HTB_NUMPRIO; i++)
 969		INIT_LIST_HEAD(q->drops + i);
 970}
 971
 972static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
 973	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
 974	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
 975	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 976	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 
 
 
 
 977};
 978
 979static void htb_work_func(struct work_struct *work)
 980{
 981	struct htb_sched *q = container_of(work, struct htb_sched, work);
 982	struct Qdisc *sch = q->watchdog.qdisc;
 983
 
 984	__netif_schedule(qdisc_root(sch));
 
 985}
 986
 987static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 988{
 
 
 
 
 
 
 
 
 989	struct htb_sched *q = qdisc_priv(sch);
 990	struct nlattr *tb[TCA_HTB_INIT + 1];
 991	struct tc_htb_glob *gopt;
 
 
 992	int err;
 993	int i;
 
 
 994
 995	if (!opt)
 996		return -EINVAL;
 997
 998	err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
 
 
 
 
 
 999	if (err < 0)
1000		return err;
1001
1002	if (tb[TCA_HTB_INIT] == NULL) {
1003		pr_err("HTB: hey probably you have bad tc tool ?\n");
1004		return -EINVAL;
1005	}
1006	gopt = nla_data(tb[TCA_HTB_INIT]);
1007	if (gopt->version != HTB_VER >> 16) {
1008		pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
1009		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1010		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1011	}
1012
1013	err = qdisc_class_hash_init(&q->clhash);
1014	if (err < 0)
1015		return err;
1016	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1017		INIT_LIST_HEAD(q->drops + i);
1018
1019	qdisc_watchdog_init(&q->watchdog, sch);
1020	INIT_WORK(&q->work, htb_work_func);
1021	skb_queue_head_init(&q->direct_queue);
1022
1023	q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1024	if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
1025		q->direct_qlen = 2;
1026
1027	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1028		q->rate2quantum = 1;
1029	q->defcls = gopt->defcls;
1030
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031	return 0;
1032}
1033
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1034static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1035{
1036	spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1037	struct htb_sched *q = qdisc_priv(sch);
1038	struct nlattr *nest;
1039	struct tc_htb_glob gopt;
1040
1041	spin_lock_bh(root_lock);
 
 
 
 
 
 
 
 
1042
1043	gopt.direct_pkts = q->direct_pkts;
1044	gopt.version = HTB_VER;
1045	gopt.rate2quantum = q->rate2quantum;
1046	gopt.defcls = q->defcls;
1047	gopt.debug = 0;
1048
1049	nest = nla_nest_start(skb, TCA_OPTIONS);
1050	if (nest == NULL)
1051		goto nla_put_failure;
1052	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
 
 
 
1053		goto nla_put_failure;
1054	nla_nest_end(skb, nest);
1055
1056	spin_unlock_bh(root_lock);
1057	return skb->len;
1058
1059nla_put_failure:
1060	spin_unlock_bh(root_lock);
1061	nla_nest_cancel(skb, nest);
1062	return -1;
1063}
1064
1065static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1066			  struct sk_buff *skb, struct tcmsg *tcm)
1067{
1068	struct htb_class *cl = (struct htb_class *)arg;
1069	spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1070	struct nlattr *nest;
1071	struct tc_htb_opt opt;
1072
1073	spin_lock_bh(root_lock);
 
 
1074	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1075	tcm->tcm_handle = cl->common.classid;
1076	if (!cl->level && cl->un.leaf.q)
1077		tcm->tcm_info = cl->un.leaf.q->handle;
1078
1079	nest = nla_nest_start(skb, TCA_OPTIONS);
1080	if (nest == NULL)
1081		goto nla_put_failure;
1082
1083	memset(&opt, 0, sizeof(opt));
1084
1085	opt.rate = cl->rate->rate;
1086	opt.buffer = cl->buffer;
1087	opt.ceil = cl->ceil->rate;
1088	opt.cbuffer = cl->cbuffer;
1089	opt.quantum = cl->quantum;
1090	opt.prio = cl->prio;
1091	opt.level = cl->level;
1092	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1093		goto nla_put_failure;
 
 
 
 
 
 
 
 
 
 
1094
1095	nla_nest_end(skb, nest);
1096	spin_unlock_bh(root_lock);
1097	return skb->len;
1098
1099nla_put_failure:
1100	spin_unlock_bh(root_lock);
1101	nla_nest_cancel(skb, nest);
1102	return -1;
1103}
1104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1105static int
1106htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1107{
1108	struct htb_class *cl = (struct htb_class *)arg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1109
1110	if (!cl->level && cl->un.leaf.q)
1111		cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1112	cl->xstats.tokens = cl->tokens;
1113	cl->xstats.ctokens = cl->ctokens;
1114
1115	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1116	    gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1117	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
1118		return -1;
1119
1120	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1121}
1122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1123static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1124		     struct Qdisc **old)
1125{
 
1126	struct htb_class *cl = (struct htb_class *)arg;
 
 
1127
1128	if (cl->level)
1129		return -EINVAL;
1130	if (new == NULL &&
1131	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1132				     cl->common.classid)) == NULL)
1133		return -ENOBUFS;
1134
1135	sch_tree_lock(sch);
1136	*old = cl->un.leaf.q;
1137	cl->un.leaf.q = new;
1138	if (*old != NULL) {
1139		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1140		qdisc_reset(*old);
 
 
1141	}
1142	sch_tree_unlock(sch);
 
 
 
 
 
 
 
 
 
 
 
 
 
1143	return 0;
1144}
1145
1146static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1147{
1148	struct htb_class *cl = (struct htb_class *)arg;
1149	return !cl->level ? cl->un.leaf.q : NULL;
1150}
1151
1152static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1153{
1154	struct htb_class *cl = (struct htb_class *)arg;
1155
1156	if (cl->un.leaf.q->q.qlen == 0)
1157		htb_deactivate(qdisc_priv(sch), cl);
1158}
1159
1160static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1161{
1162	struct htb_class *cl = htb_find(classid, sch);
1163	if (cl)
1164		cl->refcnt++;
1165	return (unsigned long)cl;
1166}
1167
1168static inline int htb_parent_last_child(struct htb_class *cl)
1169{
1170	if (!cl->parent)
1171		/* the root class */
1172		return 0;
1173	if (cl->parent->children > 1)
1174		/* not the last child */
1175		return 0;
1176	return 1;
1177}
1178
1179static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1180			       struct Qdisc *new_q)
1181{
 
1182	struct htb_class *parent = cl->parent;
1183
1184	WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1185
1186	if (parent->cmode != HTB_CAN_SEND)
1187		htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
 
1188
1189	parent->level = 0;
1190	memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1191	INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1192	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1193	parent->tokens = parent->buffer;
1194	parent->ctokens = parent->cbuffer;
1195	parent->t_c = psched_get_time();
1196	parent->cmode = HTB_CAN_SEND;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1197}
1198
1199static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1200{
1201	if (!cl->level) {
1202		WARN_ON(!cl->un.leaf.q);
1203		qdisc_destroy(cl->un.leaf.q);
1204	}
1205	gen_kill_estimator(&cl->bstats, &cl->rate_est);
1206	qdisc_put_rtab(cl->rate);
1207	qdisc_put_rtab(cl->ceil);
1208
1209	tcf_destroy_chain(&cl->filter_list);
1210	kfree(cl);
1211}
1212
1213static void htb_destroy(struct Qdisc *sch)
1214{
 
 
1215	struct htb_sched *q = qdisc_priv(sch);
1216	struct hlist_node *n, *next;
 
1217	struct htb_class *cl;
1218	unsigned int i;
1219
1220	cancel_work_sync(&q->work);
1221	qdisc_watchdog_cancel(&q->watchdog);
1222	/* This line used to be after htb_destroy_class call below
1223	 * and surprisingly it worked in 2.4. But it must precede it
1224	 * because filter need its target class alive to be able to call
1225	 * unbind_filter on it (without Oops).
1226	 */
1227	tcf_destroy_chain(&q->filter_list);
1228
1229	for (i = 0; i < q->clhash.hashsize; i++) {
1230		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1231			tcf_destroy_chain(&cl->filter_list);
1232	}
1233	for (i = 0; i < q->clhash.hashsize; i++) {
1234		hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1235					  common.hnode)
1236			htb_destroy_class(sch, cl);
1237	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1238	qdisc_class_hash_destroy(&q->clhash);
1239	__skb_queue_purge(&q->direct_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
1240}
1241
1242static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
1243{
1244	struct htb_sched *q = qdisc_priv(sch);
1245	struct htb_class *cl = (struct htb_class *)arg;
1246	unsigned int qlen;
1247	struct Qdisc *new_q = NULL;
1248	int last_child = 0;
 
1249
1250	// TODO: why don't allow to delete subtree ? references ? does
1251	// tc subsys quarantee us that in htb_destroy it holds no class
1252	// refs so that we can remove children safely there ?
1253	if (cl->children || cl->filter_cnt)
 
 
1254		return -EBUSY;
 
1255
1256	if (!cl->level && htb_parent_last_child(cl)) {
1257		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1258					  cl->parent->common.classid);
1259		last_child = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1260	}
1261
1262	sch_tree_lock(sch);
1263
1264	if (!cl->level) {
1265		qlen = cl->un.leaf.q->q.qlen;
1266		qdisc_reset(cl->un.leaf.q);
1267		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1268	}
1269
1270	/* delete from hash and active; remainder in destroy_class */
1271	qdisc_class_hash_remove(&q->clhash, &cl->common);
1272	if (cl->parent)
1273		cl->parent->children--;
1274
1275	if (cl->prio_activity)
1276		htb_deactivate(q, cl);
1277
1278	if (cl->cmode != HTB_CAN_SEND)
1279		htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
 
1280
1281	if (last_child)
1282		htb_parent_to_leaf(q, cl, new_q);
1283
1284	BUG_ON(--cl->refcnt == 0);
1285	/*
1286	 * This shouldn't happen: we "hold" one cops->get() when called
1287	 * from tc_ctl_tclass; the destroy method is done from cops->put().
1288	 */
1289
1290	sch_tree_unlock(sch);
1291	return 0;
1292}
1293
1294static void htb_put(struct Qdisc *sch, unsigned long arg)
1295{
1296	struct htb_class *cl = (struct htb_class *)arg;
1297
1298	if (--cl->refcnt == 0)
1299		htb_destroy_class(sch, cl);
1300}
1301
1302static int htb_change_class(struct Qdisc *sch, u32 classid,
1303			    u32 parentid, struct nlattr **tca,
1304			    unsigned long *arg)
1305{
1306	int err = -EINVAL;
1307	struct htb_sched *q = qdisc_priv(sch);
1308	struct htb_class *cl = (struct htb_class *)*arg, *parent;
 
1309	struct nlattr *opt = tca[TCA_OPTIONS];
1310	struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1311	struct nlattr *tb[__TCA_HTB_MAX];
 
1312	struct tc_htb_opt *hopt;
 
 
1313
1314	/* extract all subattrs from opt attr */
1315	if (!opt)
1316		goto failure;
1317
1318	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
 
1319	if (err < 0)
1320		goto failure;
1321
1322	err = -EINVAL;
1323	if (tb[TCA_HTB_PARMS] == NULL)
1324		goto failure;
1325
1326	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1327
1328	hopt = nla_data(tb[TCA_HTB_PARMS]);
1329
1330	rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1331	ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1332	if (!rtab || !ctab)
1333		goto failure;
1334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1335	if (!cl) {		/* new class */
1336		struct Qdisc *new_q;
 
1337		int prio;
1338		struct {
1339			struct nlattr		nla;
1340			struct gnet_estimator	opt;
1341		} est = {
1342			.nla = {
1343				.nla_len	= nla_attr_size(sizeof(est.opt)),
1344				.nla_type	= TCA_RATE,
1345			},
1346			.opt = {
1347				/* 4s interval, 16s averaging constant */
1348				.interval	= 2,
1349				.ewma_log	= 2,
1350			},
1351		};
1352
1353		/* check for valid classid */
1354		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1355		    htb_find(classid, sch))
1356			goto failure;
1357
1358		/* check maximal depth */
1359		if (parent && parent->parent && parent->parent->level < 2) {
1360			pr_err("htb: tree is too deep\n");
1361			goto failure;
1362		}
1363		err = -ENOBUFS;
1364		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1365		if (!cl)
1366			goto failure;
1367
1368		err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1369					qdisc_root_sleeping_lock(sch),
1370					tca[TCA_RATE] ? : &est.nla);
 
1371		if (err) {
1372			kfree(cl);
1373			goto failure;
1374		}
 
 
 
 
 
 
 
 
 
1375
1376		cl->refcnt = 1;
1377		cl->children = 0;
1378		INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1379		RB_CLEAR_NODE(&cl->pq_node);
1380
1381		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1382			RB_CLEAR_NODE(&cl->node[prio]);
1383
 
 
 
 
 
 
 
1384		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1385		 * so that can't be used inside of sch_tree_lock
1386		 * -- thanks to Karlis Peisenieks
1387		 */
1388		new_q = qdisc_create_dflt(sch->dev_queue,
1389					  &pfifo_qdisc_ops, classid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1390		sch_tree_lock(sch);
1391		if (parent && !parent->level) {
1392			unsigned int qlen = parent->un.leaf.q->q.qlen;
1393
1394			/* turn parent into inner node */
1395			qdisc_reset(parent->un.leaf.q);
1396			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1397			qdisc_destroy(parent->un.leaf.q);
1398			if (parent->prio_activity)
1399				htb_deactivate(q, parent);
1400
1401			/* remove from evt list because of level change */
1402			if (parent->cmode != HTB_CAN_SEND) {
1403				htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1404				parent->cmode = HTB_CAN_SEND;
1405			}
1406			parent->level = (parent->parent ? parent->parent->level
1407					 : TC_HTB_MAXDEPTH) - 1;
1408			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1409		}
 
1410		/* leaf (we) needs elementary qdisc */
1411		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
 
 
1412
1413		cl->common.classid = classid;
1414		cl->parent = parent;
1415
1416		/* set class to be in HTB_CAN_SEND state */
1417		cl->tokens = hopt->buffer;
1418		cl->ctokens = hopt->cbuffer;
1419		cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;	/* 1min */
1420		cl->t_c = psched_get_time();
1421		cl->cmode = HTB_CAN_SEND;
1422
1423		/* attach to the hash list and parent's family */
1424		qdisc_class_hash_insert(&q->clhash, &cl->common);
1425		if (parent)
1426			parent->children++;
 
 
1427	} else {
1428		if (tca[TCA_RATE]) {
1429			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1430						    qdisc_root_sleeping_lock(sch),
 
 
1431						    tca[TCA_RATE]);
1432			if (err)
1433				return err;
1434		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1435		sch_tree_lock(sch);
1436	}
1437
 
 
 
1438	/* it used to be a nasty bug here, we have to check that node
1439	 * is really leaf before changing cl->un.leaf !
1440	 */
1441	if (!cl->level) {
1442		cl->quantum = rtab->rate.rate / q->rate2quantum;
 
 
 
 
1443		if (!hopt->quantum && cl->quantum < 1000) {
1444			pr_warning(
1445			       "HTB: quantum of class %X is small. Consider r2q change.\n",
1446			       cl->common.classid);
1447			cl->quantum = 1000;
1448		}
1449		if (!hopt->quantum && cl->quantum > 200000) {
1450			pr_warning(
1451			       "HTB: quantum of class %X is big. Consider r2q change.\n",
1452			       cl->common.classid);
1453			cl->quantum = 200000;
1454		}
1455		if (hopt->quantum)
1456			cl->quantum = hopt->quantum;
1457		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1458			cl->prio = TC_HTB_NUMPRIO - 1;
1459	}
1460
1461	cl->buffer = hopt->buffer;
1462	cl->cbuffer = hopt->cbuffer;
1463	if (cl->rate)
1464		qdisc_put_rtab(cl->rate);
1465	cl->rate = rtab;
1466	if (cl->ceil)
1467		qdisc_put_rtab(cl->ceil);
1468	cl->ceil = ctab;
1469	sch_tree_unlock(sch);
 
 
 
 
 
 
1470
1471	qdisc_class_hash_grow(sch, &q->clhash);
1472
1473	*arg = (unsigned long)cl;
1474	return 0;
1475
 
 
 
 
 
1476failure:
1477	if (rtab)
1478		qdisc_put_rtab(rtab);
1479	if (ctab)
1480		qdisc_put_rtab(ctab);
1481	return err;
1482}
1483
1484static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
 
1485{
1486	struct htb_sched *q = qdisc_priv(sch);
1487	struct htb_class *cl = (struct htb_class *)arg;
1488	struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1489
1490	return fl;
1491}
1492
1493static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1494				     u32 classid)
1495{
1496	struct htb_class *cl = htb_find(classid, sch);
1497
1498	/*if (cl && !cl->level) return 0;
1499	 * The line above used to be there to prevent attaching filters to
1500	 * leaves. But at least tc_index filter uses this just to get class
1501	 * for other reasons so that we have to allow for it.
1502	 * ----
1503	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1504	 * another way to "lock" the class - unlike "get" this lock can
1505	 * be broken by class during destroy IIUC.
1506	 */
1507	if (cl)
1508		cl->filter_cnt++;
1509	return (unsigned long)cl;
1510}
1511
1512static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1513{
1514	struct htb_class *cl = (struct htb_class *)arg;
1515
1516	if (cl)
1517		cl->filter_cnt--;
1518}
1519
1520static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1521{
1522	struct htb_sched *q = qdisc_priv(sch);
1523	struct htb_class *cl;
1524	struct hlist_node *n;
1525	unsigned int i;
1526
1527	if (arg->stop)
1528		return;
1529
1530	for (i = 0; i < q->clhash.hashsize; i++) {
1531		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1532			if (arg->count < arg->skip) {
1533				arg->count++;
1534				continue;
1535			}
1536			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1537				arg->stop = 1;
1538				return;
1539			}
1540			arg->count++;
1541		}
1542	}
1543}
1544
1545static const struct Qdisc_class_ops htb_class_ops = {
 
1546	.graft		=	htb_graft,
1547	.leaf		=	htb_leaf,
1548	.qlen_notify	=	htb_qlen_notify,
1549	.get		=	htb_get,
1550	.put		=	htb_put,
1551	.change		=	htb_change_class,
1552	.delete		=	htb_delete,
1553	.walk		=	htb_walk,
1554	.tcf_chain	=	htb_find_tcf,
1555	.bind_tcf	=	htb_bind_filter,
1556	.unbind_tcf	=	htb_unbind_filter,
1557	.dump		=	htb_dump_class,
1558	.dump_stats	=	htb_dump_class_stats,
1559};
1560
1561static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1562	.cl_ops		=	&htb_class_ops,
1563	.id		=	"htb",
1564	.priv_size	=	sizeof(struct htb_sched),
1565	.enqueue	=	htb_enqueue,
1566	.dequeue	=	htb_dequeue,
1567	.peek		=	qdisc_peek_dequeued,
1568	.drop		=	htb_drop,
1569	.init		=	htb_init,
 
1570	.reset		=	htb_reset,
1571	.destroy	=	htb_destroy,
1572	.dump		=	htb_dump,
1573	.owner		=	THIS_MODULE,
1574};
 
1575
1576static int __init htb_module_init(void)
1577{
1578	return register_qdisc(&htb_qdisc_ops);
1579}
1580static void __exit htb_module_exit(void)
1581{
1582	unregister_qdisc(&htb_qdisc_ops);
1583}
1584
1585module_init(htb_module_init)
1586module_exit(htb_module_exit)
1587MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
   4 *
 
 
 
 
 
   5 * Authors:	Martin Devera, <devik@cdi.cz>
   6 *
   7 * Credits (in time order) for older HTB versions:
   8 *              Stef Coene <stef.coene@docum.org>
   9 *			HTB support at LARTC mailing list
  10 *		Ondrej Kraus, <krauso@barr.cz>
  11 *			found missing INIT_QDISC(htb)
  12 *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
  13 *			helped a lot to locate nasty class stall bug
  14 *		Andi Kleen, Jamal Hadi, Bert Hubert
  15 *			code review and helpful comments on shaping
  16 *		Tomasz Wrona, <tw@eter.tym.pl>
  17 *			created test case so that I was able to fix nasty bug
  18 *		Wilfried Weissmann
  19 *			spotted bug in dequeue code and helped with fix
  20 *		Jiri Fojtasek
  21 *			fixed requeue routine
  22 *		and many others. thanks.
  23 */
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <linux/types.h>
  27#include <linux/kernel.h>
  28#include <linux/string.h>
  29#include <linux/errno.h>
  30#include <linux/skbuff.h>
  31#include <linux/list.h>
  32#include <linux/compiler.h>
  33#include <linux/rbtree.h>
  34#include <linux/workqueue.h>
  35#include <linux/slab.h>
  36#include <net/netlink.h>
  37#include <net/sch_generic.h>
  38#include <net/pkt_sched.h>
  39#include <net/pkt_cls.h>
  40
  41/* HTB algorithm.
  42    Author: devik@cdi.cz
  43    ========================================================================
  44    HTB is like TBF with multiple classes. It is also similar to CBQ because
  45    it allows to assign priority to each class in hierarchy.
  46    In fact it is another implementation of Floyd's formal sharing.
  47
  48    Levels:
  49    Each class is assigned level. Leaf has ALWAYS level 0 and root
  50    classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
  51    one less than their parent.
  52*/
  53
  54static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
  55#define HTB_VER 0x30011		/* major must be matched with number supplied by TC as version */
  56
  57#if HTB_VER >> 16 != TC_HTB_PROTOVER
  58#error "Mismatched sch_htb.c and pkt_sch.h"
  59#endif
  60
  61/* Module parameter and sysfs export */
  62module_param    (htb_hysteresis, int, 0640);
  63MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
  64
  65static int htb_rate_est = 0; /* htb classes have a default rate estimator */
  66module_param(htb_rate_est, int, 0640);
  67MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
  68
  69/* used internaly to keep status of single class */
  70enum htb_cmode {
  71	HTB_CANT_SEND,		/* class can't send and can't borrow */
  72	HTB_MAY_BORROW,		/* class can't send but may borrow */
  73	HTB_CAN_SEND		/* class can send */
  74};
  75
  76struct htb_prio {
  77	union {
  78		struct rb_root	row;
  79		struct rb_root	feed;
  80	};
  81	struct rb_node	*ptr;
  82	/* When class changes from state 1->2 and disconnects from
  83	 * parent's feed then we lost ptr value and start from the
  84	 * first child again. Here we store classid of the
  85	 * last valid ptr (used when ptr is NULL).
  86	 */
  87	u32		last_ptr_id;
  88};
  89
  90/* interior & leaf nodes; props specific to leaves are marked L:
  91 * To reduce false sharing, place mostly read fields at beginning,
  92 * and mostly written ones at the end.
  93 */
  94struct htb_class {
  95	struct Qdisc_class_common common;
  96	struct psched_ratecfg	rate;
  97	struct psched_ratecfg	ceil;
  98	s64			buffer, cbuffer;/* token bucket depth/rate */
  99	s64			mbuffer;	/* max wait time */
 100	u32			prio;		/* these two are used only by leaves... */
 101	int			quantum;	/* but stored for parent-to-leaf return */
 102
 103	struct tcf_proto __rcu	*filter_list;	/* class attached filters */
 104	struct tcf_block	*block;
 105
 106	int			level;		/* our level (see above) */
 107	unsigned int		children;
 108	struct htb_class	*parent;	/* parent class */
 109
 110	struct net_rate_estimator __rcu *rate_est;
 111
 112	/*
 113	 * Written often fields
 114	 */
 115	struct gnet_stats_basic_sync bstats;
 116	struct gnet_stats_basic_sync bstats_bias;
 117	struct tc_htb_xstats	xstats;	/* our special stats */
 118
 119	/* token bucket parameters */
 120	s64			tokens, ctokens;/* current number of tokens */
 121	s64			t_c;		/* checkpoint time */
 122
 123	union {
 124		struct htb_class_leaf {
 125			int		deficit[TC_HTB_MAXDEPTH];
 126			struct Qdisc	*q;
 127			struct netdev_queue *offload_queue;
 128		} leaf;
 129		struct htb_class_inner {
 130			struct htb_prio clprio[TC_HTB_NUMPRIO];
 
 
 
 
 
 
 
 131		} inner;
 132	};
 133	s64			pq_key;
 
 
 
 
 
 
 
 
 
 134
 135	int			prio_activity;	/* for which prios are we active */
 136	enum htb_cmode		cmode;		/* current mode of the class */
 137	struct rb_node		pq_node;	/* node for event queue */
 138	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
 139
 140	unsigned int drops ____cacheline_aligned_in_smp;
 141	unsigned int		overlimits;
 142};
 143
 144struct htb_level {
 145	struct rb_root	wait_pq;
 146	struct htb_prio hprio[TC_HTB_NUMPRIO];
 147};
 148
 149struct htb_sched {
 150	struct Qdisc_class_hash clhash;
 151	int			defcls;		/* class where unclassified flows go to */
 152	int			rate2quantum;	/* quant = rate / rate2quantum */
 153
 154	/* filters for qdisc itself */
 155	struct tcf_proto __rcu	*filter_list;
 156	struct tcf_block	*block;
 
 
 157
 158#define HTB_WARN_TOOMANYEVENTS	0x1
 159	unsigned int		warned;	/* only one warning */
 160	int			direct_qlen;
 161	struct work_struct	work;
 162
 163	/* non shaped skbs; let them go directly thru */
 164	struct qdisc_skb_head	direct_queue;
 165	u32			direct_pkts;
 166	u32			overlimits;
 167
 168	struct qdisc_watchdog	watchdog;
 169
 170	s64			now;	/* cached dequeue time */
 
 171
 172	/* time of nearest event per level (row) */
 173	s64			near_ev_cache[TC_HTB_MAXDEPTH];
 
 174
 175	int			row_mask[TC_HTB_MAXDEPTH];
 
 
 176
 177	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
 178
 179	struct Qdisc		**direct_qdiscs;
 180	unsigned int            num_direct_qdiscs;
 181
 182	bool			offload;
 183};
 184
 185/* find class in global hash table using given handle */
 186static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 187{
 188	struct htb_sched *q = qdisc_priv(sch);
 189	struct Qdisc_class_common *clc;
 190
 191	clc = qdisc_class_find(&q->clhash, handle);
 192	if (clc == NULL)
 193		return NULL;
 194	return container_of(clc, struct htb_class, common);
 195}
 196
 197static unsigned long htb_search(struct Qdisc *sch, u32 handle)
 198{
 199	return (unsigned long)htb_find(handle, sch);
 200}
 201
 202#define HTB_DIRECT ((struct htb_class *)-1L)
 203
 204/**
 205 * htb_classify - classify a packet into class
 206 * @skb: the socket buffer
 207 * @sch: the active queue discipline
 208 * @qerr: pointer for returned status code
 209 *
 210 * It returns NULL if the packet should be dropped or -1 if the packet
 211 * should be passed directly thru. In all other cases leaf class is returned.
 212 * We allow direct class selection by classid in priority. The we examine
 213 * filters in qdisc and in inner nodes (if higher filter points to the inner
 214 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
 215 * internal fifo (direct). These packets then go directly thru. If we still
 216 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
 217 * then finish and return direct queue.
 218 */
 
 
 219static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
 220				      int *qerr)
 221{
 222	struct htb_sched *q = qdisc_priv(sch);
 223	struct htb_class *cl;
 224	struct tcf_result res;
 225	struct tcf_proto *tcf;
 226	int result;
 227
 228	/* allow to select class by setting skb->priority to valid classid;
 229	 * note that nfmark can be used too by attaching filter fw with no
 230	 * rules in it
 231	 */
 232	if (skb->priority == sch->handle)
 233		return HTB_DIRECT;	/* X:0 (direct flow) selected */
 234	cl = htb_find(skb->priority, sch);
 235	if (cl) {
 236		if (cl->level == 0)
 237			return cl;
 238		/* Start with inner filter chain if a non-leaf class is selected */
 239		tcf = rcu_dereference_bh(cl->filter_list);
 240	} else {
 241		tcf = rcu_dereference_bh(q->filter_list);
 242	}
 243
 244	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 245	while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
 
 246#ifdef CONFIG_NET_CLS_ACT
 247		switch (result) {
 248		case TC_ACT_QUEUED:
 249		case TC_ACT_STOLEN:
 250		case TC_ACT_TRAP:
 251			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 252			fallthrough;
 253		case TC_ACT_SHOT:
 254			return NULL;
 255		}
 256#endif
 257		cl = (void *)res.class;
 258		if (!cl) {
 259			if (res.classid == sch->handle)
 260				return HTB_DIRECT;	/* X:0 (direct flow) */
 261			cl = htb_find(res.classid, sch);
 262			if (!cl)
 263				break;	/* filter selected invalid classid */
 264		}
 265		if (!cl->level)
 266			return cl;	/* we hit leaf; return it */
 267
 268		/* we have got inner class; apply inner filter chain */
 269		tcf = rcu_dereference_bh(cl->filter_list);
 270	}
 271	/* classification failed; try to use default class */
 272	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
 273	if (!cl || cl->level)
 274		return HTB_DIRECT;	/* bad default .. this is safe bet */
 275	return cl;
 276}
 277
 278/**
 279 * htb_add_to_id_tree - adds class to the round robin list
 280 * @root: the root of the tree
 281 * @cl: the class to add
 282 * @prio: the give prio in class
 283 *
 284 * Routine adds class to the list (actually tree) sorted by classid.
 285 * Make sure that class is not already on such list for given prio.
 286 */
 287static void htb_add_to_id_tree(struct rb_root *root,
 288			       struct htb_class *cl, int prio)
 289{
 290	struct rb_node **p = &root->rb_node, *parent = NULL;
 291
 292	while (*p) {
 293		struct htb_class *c;
 294		parent = *p;
 295		c = rb_entry(parent, struct htb_class, node[prio]);
 296
 297		if (cl->common.classid > c->common.classid)
 298			p = &parent->rb_right;
 299		else
 300			p = &parent->rb_left;
 301	}
 302	rb_link_node(&cl->node[prio], parent, p);
 303	rb_insert_color(&cl->node[prio], root);
 304}
 305
 306/**
 307 * htb_add_to_wait_tree - adds class to the event queue with delay
 308 * @q: the priority event queue
 309 * @cl: the class to add
 310 * @delay: delay in microseconds
 311 *
 312 * The class is added to priority event queue to indicate that class will
 313 * change its mode in cl->pq_key microseconds. Make sure that class is not
 314 * already in the queue.
 315 */
 316static void htb_add_to_wait_tree(struct htb_sched *q,
 317				 struct htb_class *cl, s64 delay)
 318{
 319	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
 320
 321	cl->pq_key = q->now + delay;
 322	if (cl->pq_key == q->now)
 323		cl->pq_key++;
 324
 325	/* update the nearest event cache */
 326	if (q->near_ev_cache[cl->level] > cl->pq_key)
 327		q->near_ev_cache[cl->level] = cl->pq_key;
 328
 329	while (*p) {
 330		struct htb_class *c;
 331		parent = *p;
 332		c = rb_entry(parent, struct htb_class, pq_node);
 333		if (cl->pq_key >= c->pq_key)
 334			p = &parent->rb_right;
 335		else
 336			p = &parent->rb_left;
 337	}
 338	rb_link_node(&cl->pq_node, parent, p);
 339	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 340}
 341
 342/**
 343 * htb_next_rb_node - finds next node in binary tree
 344 * @n: the current node in binary tree
 345 *
 346 * When we are past last key we return NULL.
 347 * Average complexity is 2 steps per call.
 348 */
 349static inline void htb_next_rb_node(struct rb_node **n)
 350{
 351	*n = rb_next(*n);
 352}
 353
 354/**
 355 * htb_add_class_to_row - add class to its row
 356 * @q: the priority event queue
 357 * @cl: the class to add
 358 * @mask: the given priorities in class in bitmap
 359 *
 360 * The class is added to row at priorities marked in mask.
 361 * It does nothing if mask == 0.
 362 */
 363static inline void htb_add_class_to_row(struct htb_sched *q,
 364					struct htb_class *cl, int mask)
 365{
 366	q->row_mask[cl->level] |= mask;
 367	while (mask) {
 368		int prio = ffz(~mask);
 369		mask &= ~(1 << prio);
 370		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
 371	}
 372}
 373
 374/* If this triggers, it is a bug in this code, but it need not be fatal */
 375static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
 376{
 377	if (RB_EMPTY_NODE(rb)) {
 378		WARN_ON(1);
 379	} else {
 380		rb_erase(rb, root);
 381		RB_CLEAR_NODE(rb);
 382	}
 383}
 384
 385
 386/**
 387 * htb_remove_class_from_row - removes class from its row
 388 * @q: the priority event queue
 389 * @cl: the class to add
 390 * @mask: the given priorities in class in bitmap
 391 *
 392 * The class is removed from row at priorities marked in mask.
 393 * It does nothing if mask == 0.
 394 */
 395static inline void htb_remove_class_from_row(struct htb_sched *q,
 396						 struct htb_class *cl, int mask)
 397{
 398	int m = 0;
 399	struct htb_level *hlevel = &q->hlevel[cl->level];
 400
 401	while (mask) {
 402		int prio = ffz(~mask);
 403		struct htb_prio *hprio = &hlevel->hprio[prio];
 404
 405		mask &= ~(1 << prio);
 406		if (hprio->ptr == cl->node + prio)
 407			htb_next_rb_node(&hprio->ptr);
 408
 409		htb_safe_rb_erase(cl->node + prio, &hprio->row);
 410		if (!hprio->row.rb_node)
 411			m |= 1 << prio;
 412	}
 413	q->row_mask[cl->level] &= ~m;
 414}
 415
 416/**
 417 * htb_activate_prios - creates active classe's feed chain
 418 * @q: the priority event queue
 419 * @cl: the class to activate
 420 *
 421 * The class is connected to ancestors and/or appropriate rows
 422 * for priorities it is participating on. cl->cmode must be new
 423 * (activated) mode. It does nothing if cl->prio_activity == 0.
 424 */
 425static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 426{
 427	struct htb_class *p = cl->parent;
 428	long m, mask = cl->prio_activity;
 429
 430	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 431		m = mask;
 432		while (m) {
 433			unsigned int prio = ffz(~m);
 434
 435			if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
 436				break;
 437			m &= ~(1 << prio);
 438
 439			if (p->inner.clprio[prio].feed.rb_node)
 440				/* parent already has its feed in use so that
 441				 * reset bit in mask as parent is already ok
 442				 */
 443				mask &= ~(1 << prio);
 444
 445			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
 446		}
 447		p->prio_activity |= mask;
 448		cl = p;
 449		p = cl->parent;
 450
 451	}
 452	if (cl->cmode == HTB_CAN_SEND && mask)
 453		htb_add_class_to_row(q, cl, mask);
 454}
 455
 456/**
 457 * htb_deactivate_prios - remove class from feed chain
 458 * @q: the priority event queue
 459 * @cl: the class to deactivate
 460 *
 461 * cl->cmode must represent old mode (before deactivation). It does
 462 * nothing if cl->prio_activity == 0. Class is removed from all feed
 463 * chains and rows.
 464 */
 465static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 466{
 467	struct htb_class *p = cl->parent;
 468	long m, mask = cl->prio_activity;
 469
 470	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 471		m = mask;
 472		mask = 0;
 473		while (m) {
 474			int prio = ffz(~m);
 475			m &= ~(1 << prio);
 476
 477			if (p->inner.clprio[prio].ptr == cl->node + prio) {
 478				/* we are removing child which is pointed to from
 479				 * parent feed - forget the pointer but remember
 480				 * classid
 481				 */
 482				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
 483				p->inner.clprio[prio].ptr = NULL;
 484			}
 485
 486			htb_safe_rb_erase(cl->node + prio,
 487					  &p->inner.clprio[prio].feed);
 488
 489			if (!p->inner.clprio[prio].feed.rb_node)
 490				mask |= 1 << prio;
 491		}
 492
 493		p->prio_activity &= ~mask;
 494		cl = p;
 495		p = cl->parent;
 496
 497	}
 498	if (cl->cmode == HTB_CAN_SEND && mask)
 499		htb_remove_class_from_row(q, cl, mask);
 500}
 501
 502static inline s64 htb_lowater(const struct htb_class *cl)
 503{
 504	if (htb_hysteresis)
 505		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
 506	else
 507		return 0;
 508}
 509static inline s64 htb_hiwater(const struct htb_class *cl)
 510{
 511	if (htb_hysteresis)
 512		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
 513	else
 514		return 0;
 515}
 516
 517
 518/**
 519 * htb_class_mode - computes and returns current class mode
 520 * @cl: the target class
 521 * @diff: diff time in microseconds
 522 *
 523 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
 524 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
 525 * from now to time when cl will change its state.
 526 * Also it is worth to note that class mode doesn't change simply
 527 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
 528 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
 529 * mode transitions per time unit. The speed gain is about 1/6.
 530 */
 531static inline enum htb_cmode
 532htb_class_mode(struct htb_class *cl, s64 *diff)
 533{
 534	s64 toks;
 535
 536	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
 537		*diff = -toks;
 538		return HTB_CANT_SEND;
 539	}
 540
 541	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
 542		return HTB_CAN_SEND;
 543
 544	*diff = -toks;
 545	return HTB_MAY_BORROW;
 546}
 547
 548/**
 549 * htb_change_class_mode - changes classe's mode
 550 * @q: the priority event queue
 551 * @cl: the target class
 552 * @diff: diff time in microseconds
 553 *
 554 * This should be the only way how to change classe's mode under normal
 555 * circumstances. Routine will update feed lists linkage, change mode
 556 * and add class to the wait event queue if appropriate. New mode should
 557 * be different from old one and cl->pq_key has to be valid if changing
 558 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
 559 */
 560static void
 561htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
 562{
 563	enum htb_cmode new_mode = htb_class_mode(cl, diff);
 564
 565	if (new_mode == cl->cmode)
 566		return;
 567
 568	if (new_mode == HTB_CANT_SEND) {
 569		cl->overlimits++;
 570		q->overlimits++;
 571	}
 572
 573	if (cl->prio_activity) {	/* not necessary: speed optimization */
 574		if (cl->cmode != HTB_CANT_SEND)
 575			htb_deactivate_prios(q, cl);
 576		cl->cmode = new_mode;
 577		if (new_mode != HTB_CANT_SEND)
 578			htb_activate_prios(q, cl);
 579	} else
 580		cl->cmode = new_mode;
 581}
 582
 583/**
 584 * htb_activate - inserts leaf cl into appropriate active feeds
 585 * @q: the priority event queue
 586 * @cl: the target class
 587 *
 588 * Routine learns (new) priority of leaf and activates feed chain
 589 * for the prio. It can be called on already active leaf safely.
 590 * It also adds leaf into droplist.
 591 */
 592static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
 593{
 594	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
 595
 596	if (!cl->prio_activity) {
 597		cl->prio_activity = 1 << cl->prio;
 598		htb_activate_prios(q, cl);
 
 
 599	}
 600}
 601
 602/**
 603 * htb_deactivate - remove leaf cl from active feeds
 604 * @q: the priority event queue
 605 * @cl: the target class
 606 *
 607 * Make sure that leaf is active. In the other words it can't be called
 608 * with non-active leaf. It also removes class from the drop list.
 609 */
 610static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 611{
 612	WARN_ON(!cl->prio_activity);
 613
 614	htb_deactivate_prios(q, cl);
 615	cl->prio_activity = 0;
 
 616}
 617
 618static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 619		       struct sk_buff **to_free)
 620{
 621	int ret;
 622	unsigned int len = qdisc_pkt_len(skb);
 623	struct htb_sched *q = qdisc_priv(sch);
 624	struct htb_class *cl = htb_classify(skb, sch, &ret);
 625
 626	if (cl == HTB_DIRECT) {
 627		/* enqueue to helper queue */
 628		if (q->direct_queue.qlen < q->direct_qlen) {
 629			__qdisc_enqueue_tail(skb, &q->direct_queue);
 630			q->direct_pkts++;
 631		} else {
 632			return qdisc_drop(skb, sch, to_free);
 633		}
 634#ifdef CONFIG_NET_CLS_ACT
 635	} else if (!cl) {
 636		if (ret & __NET_XMIT_BYPASS)
 637			qdisc_qstats_drop(sch);
 638		__qdisc_drop(skb, to_free);
 639		return ret;
 640#endif
 641	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
 642					to_free)) != NET_XMIT_SUCCESS) {
 643		if (net_xmit_drop_count(ret)) {
 644			qdisc_qstats_drop(sch);
 645			cl->drops++;
 646		}
 647		return ret;
 648	} else {
 649		htb_activate(q, cl);
 650	}
 651
 652	sch->qstats.backlog += len;
 653	sch->q.qlen++;
 654	return NET_XMIT_SUCCESS;
 655}
 656
 657static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
 658{
 659	s64 toks = diff + cl->tokens;
 660
 661	if (toks > cl->buffer)
 662		toks = cl->buffer;
 663	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
 664	if (toks <= -cl->mbuffer)
 665		toks = 1 - cl->mbuffer;
 666
 667	cl->tokens = toks;
 668}
 669
 670static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
 671{
 672	s64 toks = diff + cl->ctokens;
 673
 674	if (toks > cl->cbuffer)
 675		toks = cl->cbuffer;
 676	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
 677	if (toks <= -cl->mbuffer)
 678		toks = 1 - cl->mbuffer;
 679
 680	cl->ctokens = toks;
 681}
 682
 683/**
 684 * htb_charge_class - charges amount "bytes" to leaf and ancestors
 685 * @q: the priority event queue
 686 * @cl: the class to start iterate
 687 * @level: the minimum level to account
 688 * @skb: the socket buffer
 689 *
 690 * Routine assumes that packet "bytes" long was dequeued from leaf cl
 691 * borrowing from "level". It accounts bytes to ceil leaky bucket for
 692 * leaf and all ancestors and to rate bucket for ancestors at levels
 693 * "level" and higher. It also handles possible change of mode resulting
 694 * from the update. Note that mode can also increase here (MAY_BORROW to
 695 * CAN_SEND) because we can use more precise clock that event queue here.
 696 * In such case we remove class from event queue first.
 697 */
 698static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
 699			     int level, struct sk_buff *skb)
 700{
 701	int bytes = qdisc_pkt_len(skb);
 702	enum htb_cmode old_mode;
 703	s64 diff;
 704
 705	while (cl) {
 706		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 707		if (cl->level >= level) {
 708			if (cl->level == level)
 709				cl->xstats.lends++;
 710			htb_accnt_tokens(cl, bytes, diff);
 711		} else {
 712			cl->xstats.borrows++;
 713			cl->tokens += diff;	/* we moved t_c; update tokens */
 714		}
 715		htb_accnt_ctokens(cl, bytes, diff);
 716		cl->t_c = q->now;
 717
 718		old_mode = cl->cmode;
 719		diff = 0;
 720		htb_change_class_mode(q, cl, &diff);
 721		if (old_mode != cl->cmode) {
 722			if (old_mode != HTB_CAN_SEND)
 723				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 724			if (cl->cmode != HTB_CAN_SEND)
 725				htb_add_to_wait_tree(q, cl, diff);
 726		}
 727
 728		/* update basic stats except for leaves which are already updated */
 729		if (cl->level)
 730			bstats_update(&cl->bstats, skb);
 731
 732		cl = cl->parent;
 733	}
 734}
 735
 736/**
 737 * htb_do_events - make mode changes to classes at the level
 738 * @q: the priority event queue
 739 * @level: which wait_pq in 'q->hlevel'
 740 * @start: start jiffies
 741 *
 742 * Scans event queue for pending events and applies them. Returns time of
 743 * next pending event (0 for no event in pq, q->now for too many events).
 744 * Note: Applied are events whose have cl->pq_key <= q->now.
 745 */
 746static s64 htb_do_events(struct htb_sched *q, const int level,
 747			 unsigned long start)
 748{
 749	/* don't run for longer than 2 jiffies; 2 is used instead of
 750	 * 1 to simplify things when jiffy is going to be incremented
 751	 * too soon
 752	 */
 753	unsigned long stop_at = start + 2;
 754	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
 755
 756	while (time_before(jiffies, stop_at)) {
 757		struct htb_class *cl;
 758		s64 diff;
 759		struct rb_node *p = rb_first(wait_pq);
 760
 761		if (!p)
 762			return 0;
 763
 764		cl = rb_entry(p, struct htb_class, pq_node);
 765		if (cl->pq_key > q->now)
 766			return cl->pq_key;
 767
 768		htb_safe_rb_erase(p, wait_pq);
 769		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 770		htb_change_class_mode(q, cl, &diff);
 771		if (cl->cmode != HTB_CAN_SEND)
 772			htb_add_to_wait_tree(q, cl, diff);
 773	}
 774
 775	/* too much load - let's continue after a break for scheduling */
 776	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
 777		pr_warn("htb: too many events!\n");
 778		q->warned |= HTB_WARN_TOOMANYEVENTS;
 779	}
 780
 781	return q->now;
 782}
 783
 784/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
 785 * is no such one exists.
 786 */
 787static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
 788					      u32 id)
 789{
 790	struct rb_node *r = NULL;
 791	while (n) {
 792		struct htb_class *cl =
 793		    rb_entry(n, struct htb_class, node[prio]);
 794
 795		if (id > cl->common.classid) {
 796			n = n->rb_right;
 797		} else if (id < cl->common.classid) {
 798			r = n;
 799			n = n->rb_left;
 800		} else {
 801			return n;
 802		}
 803	}
 804	return r;
 805}
 806
 807/**
 808 * htb_lookup_leaf - returns next leaf class in DRR order
 809 * @hprio: the current one
 810 * @prio: which prio in class
 811 *
 812 * Find leaf where current feed pointers points to.
 813 */
 814static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
 
 815{
 816	int i;
 817	struct {
 818		struct rb_node *root;
 819		struct rb_node **pptr;
 820		u32 *pid;
 821	} stk[TC_HTB_MAXDEPTH], *sp = stk;
 822
 823	BUG_ON(!hprio->row.rb_node);
 824	sp->root = hprio->row.rb_node;
 825	sp->pptr = &hprio->ptr;
 826	sp->pid = &hprio->last_ptr_id;
 827
 828	for (i = 0; i < 65535; i++) {
 829		if (!*sp->pptr && *sp->pid) {
 830			/* ptr was invalidated but id is valid - try to recover
 831			 * the original or next ptr
 832			 */
 833			*sp->pptr =
 834			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
 835		}
 836		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
 837				 * can become out of date quickly
 838				 */
 839		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
 840			*sp->pptr = sp->root;
 841			while ((*sp->pptr)->rb_left)
 842				*sp->pptr = (*sp->pptr)->rb_left;
 843			if (sp > stk) {
 844				sp--;
 845				if (!*sp->pptr) {
 846					WARN_ON(1);
 847					return NULL;
 848				}
 849				htb_next_rb_node(sp->pptr);
 850			}
 851		} else {
 852			struct htb_class *cl;
 853			struct htb_prio *clp;
 854
 855			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
 856			if (!cl->level)
 857				return cl;
 858			clp = &cl->inner.clprio[prio];
 859			(++sp)->root = clp->feed.rb_node;
 860			sp->pptr = &clp->ptr;
 861			sp->pid = &clp->last_ptr_id;
 862		}
 863	}
 864	WARN_ON(1);
 865	return NULL;
 866}
 867
 868/* dequeues packet at given priority and level; call only if
 869 * you are sure that there is active class at prio/level
 870 */
 871static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
 872					const int level)
 873{
 874	struct sk_buff *skb = NULL;
 875	struct htb_class *cl, *start;
 876	struct htb_level *hlevel = &q->hlevel[level];
 877	struct htb_prio *hprio = &hlevel->hprio[prio];
 878
 879	/* look initial class up in the row */
 880	start = cl = htb_lookup_leaf(hprio, prio);
 
 
 881
 882	do {
 883next:
 884		if (unlikely(!cl))
 885			return NULL;
 886
 887		/* class can be empty - it is unlikely but can be true if leaf
 888		 * qdisc drops packets in enqueue routine or if someone used
 889		 * graft operation on the leaf since last dequeue;
 890		 * simply deactivate and skip such class
 891		 */
 892		if (unlikely(cl->leaf.q->q.qlen == 0)) {
 893			struct htb_class *next;
 894			htb_deactivate(q, cl);
 895
 896			/* row/level might become empty */
 897			if ((q->row_mask[level] & (1 << prio)) == 0)
 898				return NULL;
 899
 900			next = htb_lookup_leaf(hprio, prio);
 
 
 901
 902			if (cl == start)	/* fix start if we just deleted it */
 903				start = next;
 904			cl = next;
 905			goto next;
 906		}
 907
 908		skb = cl->leaf.q->dequeue(cl->leaf.q);
 909		if (likely(skb != NULL))
 910			break;
 911
 912		qdisc_warn_nonwc("htb", cl->leaf.q);
 913		htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
 914					 &q->hlevel[0].hprio[prio].ptr);
 915		cl = htb_lookup_leaf(hprio, prio);
 
 
 916
 917	} while (cl != start);
 918
 919	if (likely(skb != NULL)) {
 920		bstats_update(&cl->bstats, skb);
 921		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
 922		if (cl->leaf.deficit[level] < 0) {
 923			cl->leaf.deficit[level] += cl->quantum;
 924			htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
 925						 &q->hlevel[0].hprio[prio].ptr);
 926		}
 927		/* this used to be after charge_class but this constelation
 928		 * gives us slightly better performance
 929		 */
 930		if (!cl->leaf.q->q.qlen)
 931			htb_deactivate(q, cl);
 932		htb_charge_class(q, cl, level, skb);
 933	}
 934	return skb;
 935}
 936
 937static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 938{
 939	struct sk_buff *skb;
 940	struct htb_sched *q = qdisc_priv(sch);
 941	int level;
 942	s64 next_event;
 943	unsigned long start_at;
 944
 945	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 946	skb = __qdisc_dequeue_head(&q->direct_queue);
 947	if (skb != NULL) {
 948ok:
 949		qdisc_bstats_update(sch, skb);
 950		qdisc_qstats_backlog_dec(sch, skb);
 951		sch->q.qlen--;
 952		return skb;
 953	}
 954
 955	if (!sch->q.qlen)
 956		goto fin;
 957	q->now = ktime_get_ns();
 958	start_at = jiffies;
 959
 960	next_event = q->now + 5LLU * NSEC_PER_SEC;
 961
 962	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
 963		/* common case optimization - skip event handler quickly */
 964		int m;
 965		s64 event = q->near_ev_cache[level];
 966
 967		if (q->now >= event) {
 968			event = htb_do_events(q, level, start_at);
 969			if (!event)
 970				event = q->now + NSEC_PER_SEC;
 971			q->near_ev_cache[level] = event;
 972		}
 
 973
 974		if (next_event > event)
 975			next_event = event;
 976
 977		m = ~q->row_mask[level];
 978		while (m != (int)(-1)) {
 979			int prio = ffz(m);
 980
 981			m |= 1 << prio;
 982			skb = htb_dequeue_tree(q, prio, level);
 983			if (likely(skb != NULL))
 984				goto ok;
 985		}
 986	}
 
 987	if (likely(next_event > q->now))
 988		qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
 989	else
 990		schedule_work(&q->work);
 991fin:
 992	return skb;
 993}
 994
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995/* reset all classes */
 996/* always caled under BH & queue lock */
 997static void htb_reset(struct Qdisc *sch)
 998{
 999	struct htb_sched *q = qdisc_priv(sch);
1000	struct htb_class *cl;
 
1001	unsigned int i;
1002
1003	for (i = 0; i < q->clhash.hashsize; i++) {
1004		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1005			if (cl->level)
1006				memset(&cl->inner, 0, sizeof(cl->inner));
1007			else {
1008				if (cl->leaf.q && !q->offload)
1009					qdisc_reset(cl->leaf.q);
 
1010			}
1011			cl->prio_activity = 0;
1012			cl->cmode = HTB_CAN_SEND;
 
1013		}
1014	}
1015	qdisc_watchdog_cancel(&q->watchdog);
1016	__qdisc_reset_queue(&q->direct_queue);
1017	memset(q->hlevel, 0, sizeof(q->hlevel));
 
1018	memset(q->row_mask, 0, sizeof(q->row_mask));
 
 
 
 
1019}
1020
1021static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1022	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
1023	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
1024	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1025	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1026	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
1027	[TCA_HTB_RATE64] = { .type = NLA_U64 },
1028	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
1029	[TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
1030};
1031
1032static void htb_work_func(struct work_struct *work)
1033{
1034	struct htb_sched *q = container_of(work, struct htb_sched, work);
1035	struct Qdisc *sch = q->watchdog.qdisc;
1036
1037	rcu_read_lock();
1038	__netif_schedule(qdisc_root(sch));
1039	rcu_read_unlock();
1040}
1041
1042static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
1043{
1044	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
1045}
1046
1047static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1048		    struct netlink_ext_ack *extack)
1049{
1050	struct net_device *dev = qdisc_dev(sch);
1051	struct tc_htb_qopt_offload offload_opt;
1052	struct htb_sched *q = qdisc_priv(sch);
1053	struct nlattr *tb[TCA_HTB_MAX + 1];
1054	struct tc_htb_glob *gopt;
1055	unsigned int ntx;
1056	bool offload;
1057	int err;
1058
1059	qdisc_watchdog_init(&q->watchdog, sch);
1060	INIT_WORK(&q->work, htb_work_func);
1061
1062	if (!opt)
1063		return -EINVAL;
1064
1065	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1066	if (err)
1067		return err;
1068
1069	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1070					  NULL);
1071	if (err < 0)
1072		return err;
1073
1074	if (!tb[TCA_HTB_INIT])
 
1075		return -EINVAL;
1076
1077	gopt = nla_data(tb[TCA_HTB_INIT]);
1078	if (gopt->version != HTB_VER >> 16)
 
 
1079		return -EINVAL;
1080
1081	offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
1082
1083	if (offload) {
1084		if (sch->parent != TC_H_ROOT) {
1085			NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload");
1086			return -EOPNOTSUPP;
1087		}
1088
1089		if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) {
1090			NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on");
1091			return -EOPNOTSUPP;
1092		}
1093
1094		q->num_direct_qdiscs = dev->real_num_tx_queues;
1095		q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
1096					   sizeof(*q->direct_qdiscs),
1097					   GFP_KERNEL);
1098		if (!q->direct_qdiscs)
1099			return -ENOMEM;
1100	}
1101
1102	err = qdisc_class_hash_init(&q->clhash);
1103	if (err < 0)
1104		return err;
 
 
1105
1106	if (tb[TCA_HTB_DIRECT_QLEN])
1107		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1108	else
1109		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
 
 
 
1110
1111	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1112		q->rate2quantum = 1;
1113	q->defcls = gopt->defcls;
1114
1115	if (!offload)
1116		return 0;
1117
1118	for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1119		struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1120		struct Qdisc *qdisc;
1121
1122		qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1123					  TC_H_MAKE(sch->handle, 0), extack);
1124		if (!qdisc) {
1125			return -ENOMEM;
1126		}
1127
1128		q->direct_qdiscs[ntx] = qdisc;
1129		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1130	}
1131
1132	sch->flags |= TCQ_F_MQROOT;
1133
1134	offload_opt = (struct tc_htb_qopt_offload) {
1135		.command = TC_HTB_CREATE,
1136		.parent_classid = TC_H_MAJ(sch->handle) >> 16,
1137		.classid = TC_H_MIN(q->defcls),
1138		.extack = extack,
1139	};
1140	err = htb_offload(dev, &offload_opt);
1141	if (err)
1142		return err;
1143
1144	/* Defer this assignment, so that htb_destroy skips offload-related
1145	 * parts (especially calling ndo_setup_tc) on errors.
1146	 */
1147	q->offload = true;
1148
1149	return 0;
1150}
1151
1152static void htb_attach_offload(struct Qdisc *sch)
1153{
1154	struct net_device *dev = qdisc_dev(sch);
1155	struct htb_sched *q = qdisc_priv(sch);
1156	unsigned int ntx;
1157
1158	for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1159		struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
1160
1161		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1162		qdisc_put(old);
1163		qdisc_hash_add(qdisc, false);
1164	}
1165	for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
1166		struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1167		struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
1168
1169		qdisc_put(old);
1170	}
1171
1172	kfree(q->direct_qdiscs);
1173	q->direct_qdiscs = NULL;
1174}
1175
1176static void htb_attach_software(struct Qdisc *sch)
1177{
1178	struct net_device *dev = qdisc_dev(sch);
1179	unsigned int ntx;
1180
1181	/* Resemble qdisc_graft behavior. */
1182	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1183		struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1184		struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
1185
1186		qdisc_refcount_inc(sch);
1187
1188		qdisc_put(old);
1189	}
1190}
1191
1192static void htb_attach(struct Qdisc *sch)
1193{
1194	struct htb_sched *q = qdisc_priv(sch);
1195
1196	if (q->offload)
1197		htb_attach_offload(sch);
1198	else
1199		htb_attach_software(sch);
1200}
1201
1202static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1203{
 
1204	struct htb_sched *q = qdisc_priv(sch);
1205	struct nlattr *nest;
1206	struct tc_htb_glob gopt;
1207
1208	if (q->offload)
1209		sch->flags |= TCQ_F_OFFLOADED;
1210	else
1211		sch->flags &= ~TCQ_F_OFFLOADED;
1212
1213	sch->qstats.overlimits = q->overlimits;
1214	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1215	 * no change can happen on the qdisc parameters.
1216	 */
1217
1218	gopt.direct_pkts = q->direct_pkts;
1219	gopt.version = HTB_VER;
1220	gopt.rate2quantum = q->rate2quantum;
1221	gopt.defcls = q->defcls;
1222	gopt.debug = 0;
1223
1224	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1225	if (nest == NULL)
1226		goto nla_put_failure;
1227	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1228	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1229		goto nla_put_failure;
1230	if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1231		goto nla_put_failure;
 
1232
1233	return nla_nest_end(skb, nest);
 
1234
1235nla_put_failure:
 
1236	nla_nest_cancel(skb, nest);
1237	return -1;
1238}
1239
1240static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1241			  struct sk_buff *skb, struct tcmsg *tcm)
1242{
1243	struct htb_class *cl = (struct htb_class *)arg;
1244	struct htb_sched *q = qdisc_priv(sch);
1245	struct nlattr *nest;
1246	struct tc_htb_opt opt;
1247
1248	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1249	 * no change can happen on the class parameters.
1250	 */
1251	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1252	tcm->tcm_handle = cl->common.classid;
1253	if (!cl->level && cl->leaf.q)
1254		tcm->tcm_info = cl->leaf.q->handle;
1255
1256	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1257	if (nest == NULL)
1258		goto nla_put_failure;
1259
1260	memset(&opt, 0, sizeof(opt));
1261
1262	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1263	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1264	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1265	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1266	opt.quantum = cl->quantum;
1267	opt.prio = cl->prio;
1268	opt.level = cl->level;
1269	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1270		goto nla_put_failure;
1271	if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1272		goto nla_put_failure;
1273	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1274	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1275			      TCA_HTB_PAD))
1276		goto nla_put_failure;
1277	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1278	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1279			      TCA_HTB_PAD))
1280		goto nla_put_failure;
1281
1282	return nla_nest_end(skb, nest);
 
 
1283
1284nla_put_failure:
 
1285	nla_nest_cancel(skb, nest);
1286	return -1;
1287}
1288
1289static void htb_offload_aggregate_stats(struct htb_sched *q,
1290					struct htb_class *cl)
1291{
1292	u64 bytes = 0, packets = 0;
1293	struct htb_class *c;
1294	unsigned int i;
1295
1296	gnet_stats_basic_sync_init(&cl->bstats);
1297
1298	for (i = 0; i < q->clhash.hashsize; i++) {
1299		hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
1300			struct htb_class *p = c;
1301
1302			while (p && p->level < cl->level)
1303				p = p->parent;
1304
1305			if (p != cl)
1306				continue;
1307
1308			bytes += u64_stats_read(&c->bstats_bias.bytes);
1309			packets += u64_stats_read(&c->bstats_bias.packets);
1310			if (c->level == 0) {
1311				bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
1312				packets += u64_stats_read(&c->leaf.q->bstats.packets);
1313			}
1314		}
1315	}
1316	_bstats_update(&cl->bstats, bytes, packets);
1317}
1318
1319static int
1320htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1321{
1322	struct htb_class *cl = (struct htb_class *)arg;
1323	struct htb_sched *q = qdisc_priv(sch);
1324	struct gnet_stats_queue qs = {
1325		.drops = cl->drops,
1326		.overlimits = cl->overlimits,
1327	};
1328	__u32 qlen = 0;
1329
1330	if (!cl->level && cl->leaf.q)
1331		qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1332
1333	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1334				    INT_MIN, INT_MAX);
1335	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1336				     INT_MIN, INT_MAX);
1337
1338	if (q->offload) {
1339		if (!cl->level) {
1340			if (cl->leaf.q)
1341				cl->bstats = cl->leaf.q->bstats;
1342			else
1343				gnet_stats_basic_sync_init(&cl->bstats);
1344			_bstats_update(&cl->bstats,
1345				       u64_stats_read(&cl->bstats_bias.bytes),
1346				       u64_stats_read(&cl->bstats_bias.packets));
1347		} else {
1348			htb_offload_aggregate_stats(q, cl);
1349		}
1350	}
1351
1352	if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
1353	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1354	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
 
 
 
 
 
1355		return -1;
1356
1357	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1358}
1359
1360static struct netdev_queue *
1361htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
1362{
1363	struct net_device *dev = qdisc_dev(sch);
1364	struct tc_htb_qopt_offload offload_opt;
1365	struct htb_sched *q = qdisc_priv(sch);
1366	int err;
1367
1368	if (!q->offload)
1369		return sch->dev_queue;
1370
1371	offload_opt = (struct tc_htb_qopt_offload) {
1372		.command = TC_HTB_LEAF_QUERY_QUEUE,
1373		.classid = TC_H_MIN(tcm->tcm_parent),
1374	};
1375	err = htb_offload(dev, &offload_opt);
1376	if (err || offload_opt.qid >= dev->num_tx_queues)
1377		return NULL;
1378	return netdev_get_tx_queue(dev, offload_opt.qid);
1379}
1380
1381static struct Qdisc *
1382htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
1383{
1384	struct net_device *dev = dev_queue->dev;
1385	struct Qdisc *old_q;
1386
1387	if (dev->flags & IFF_UP)
1388		dev_deactivate(dev);
1389	old_q = dev_graft_qdisc(dev_queue, new_q);
1390	if (new_q)
1391		new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1392	if (dev->flags & IFF_UP)
1393		dev_activate(dev);
1394
1395	return old_q;
1396}
1397
1398static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
1399{
1400	struct netdev_queue *queue;
1401
1402	queue = cl->leaf.offload_queue;
1403	if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
1404		WARN_ON(cl->leaf.q->dev_queue != queue);
1405
1406	return queue;
1407}
1408
1409static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
1410				   struct htb_class *cl_new, bool destroying)
1411{
1412	struct netdev_queue *queue_old, *queue_new;
1413	struct net_device *dev = qdisc_dev(sch);
1414
1415	queue_old = htb_offload_get_queue(cl_old);
1416	queue_new = htb_offload_get_queue(cl_new);
1417
1418	if (!destroying) {
1419		struct Qdisc *qdisc;
1420
1421		if (dev->flags & IFF_UP)
1422			dev_deactivate(dev);
1423		qdisc = dev_graft_qdisc(queue_old, NULL);
1424		WARN_ON(qdisc != cl_old->leaf.q);
1425	}
1426
1427	if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
1428		cl_old->leaf.q->dev_queue = queue_new;
1429	cl_old->leaf.offload_queue = queue_new;
1430
1431	if (!destroying) {
1432		struct Qdisc *qdisc;
1433
1434		qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
1435		if (dev->flags & IFF_UP)
1436			dev_activate(dev);
1437		WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
1438	}
1439}
1440
1441static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1442		     struct Qdisc **old, struct netlink_ext_ack *extack)
1443{
1444	struct netdev_queue *dev_queue = sch->dev_queue;
1445	struct htb_class *cl = (struct htb_class *)arg;
1446	struct htb_sched *q = qdisc_priv(sch);
1447	struct Qdisc *old_q;
1448
1449	if (cl->level)
1450		return -EINVAL;
 
 
 
 
1451
1452	if (q->offload)
1453		dev_queue = htb_offload_get_queue(cl);
1454
1455	if (!new) {
1456		new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1457					cl->common.classid, extack);
1458		if (!new)
1459			return -ENOBUFS;
1460	}
1461
1462	if (q->offload) {
1463		/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1464		qdisc_refcount_inc(new);
1465		old_q = htb_graft_helper(dev_queue, new);
1466	}
1467
1468	*old = qdisc_replace(sch, new, &cl->leaf.q);
1469
1470	if (q->offload) {
1471		WARN_ON(old_q != *old);
1472		qdisc_put(old_q);
1473	}
1474
1475	return 0;
1476}
1477
1478static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1479{
1480	struct htb_class *cl = (struct htb_class *)arg;
1481	return !cl->level ? cl->leaf.q : NULL;
1482}
1483
1484static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1485{
1486	struct htb_class *cl = (struct htb_class *)arg;
1487
1488	htb_deactivate(qdisc_priv(sch), cl);
 
 
 
 
 
 
 
 
 
1489}
1490
1491static inline int htb_parent_last_child(struct htb_class *cl)
1492{
1493	if (!cl->parent)
1494		/* the root class */
1495		return 0;
1496	if (cl->parent->children > 1)
1497		/* not the last child */
1498		return 0;
1499	return 1;
1500}
1501
1502static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
1503			       struct Qdisc *new_q)
1504{
1505	struct htb_sched *q = qdisc_priv(sch);
1506	struct htb_class *parent = cl->parent;
1507
1508	WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1509
1510	if (parent->cmode != HTB_CAN_SEND)
1511		htb_safe_rb_erase(&parent->pq_node,
1512				  &q->hlevel[parent->level].wait_pq);
1513
1514	parent->level = 0;
1515	memset(&parent->inner, 0, sizeof(parent->inner));
1516	parent->leaf.q = new_q ? new_q : &noop_qdisc;
 
1517	parent->tokens = parent->buffer;
1518	parent->ctokens = parent->cbuffer;
1519	parent->t_c = ktime_get_ns();
1520	parent->cmode = HTB_CAN_SEND;
1521	if (q->offload)
1522		parent->leaf.offload_queue = cl->leaf.offload_queue;
1523}
1524
1525static void htb_parent_to_leaf_offload(struct Qdisc *sch,
1526				       struct netdev_queue *dev_queue,
1527				       struct Qdisc *new_q)
1528{
1529	struct Qdisc *old_q;
1530
1531	/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1532	if (new_q)
1533		qdisc_refcount_inc(new_q);
1534	old_q = htb_graft_helper(dev_queue, new_q);
1535	WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1536}
1537
1538static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1539				     bool last_child, bool destroying,
1540				     struct netlink_ext_ack *extack)
1541{
1542	struct tc_htb_qopt_offload offload_opt;
1543	struct netdev_queue *dev_queue;
1544	struct Qdisc *q = cl->leaf.q;
1545	struct Qdisc *old;
1546	int err;
1547
1548	if (cl->level)
1549		return -EINVAL;
1550
1551	WARN_ON(!q);
1552	dev_queue = htb_offload_get_queue(cl);
1553	/* When destroying, caller qdisc_graft grafts the new qdisc and invokes
1554	 * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
1555	 * does not need to graft or qdisc_put the qdisc being destroyed.
1556	 */
1557	if (!destroying) {
1558		old = htb_graft_helper(dev_queue, NULL);
1559		/* Last qdisc grafted should be the same as cl->leaf.q when
1560		 * calling htb_delete.
1561		 */
1562		WARN_ON(old != q);
1563	}
1564
1565	if (cl->parent) {
1566		_bstats_update(&cl->parent->bstats_bias,
1567			       u64_stats_read(&q->bstats.bytes),
1568			       u64_stats_read(&q->bstats.packets));
1569	}
1570
1571	offload_opt = (struct tc_htb_qopt_offload) {
1572		.command = !last_child ? TC_HTB_LEAF_DEL :
1573			   destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
1574			   TC_HTB_LEAF_DEL_LAST,
1575		.classid = cl->common.classid,
1576		.extack = extack,
1577	};
1578	err = htb_offload(qdisc_dev(sch), &offload_opt);
1579
1580	if (!destroying) {
1581		if (!err)
1582			qdisc_put(old);
1583		else
1584			htb_graft_helper(dev_queue, old);
1585	}
1586
1587	if (last_child)
1588		return err;
1589
1590	if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
1591		u32 classid = TC_H_MAJ(sch->handle) |
1592			      TC_H_MIN(offload_opt.classid);
1593		struct htb_class *moved_cl = htb_find(classid, sch);
1594
1595		htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
1596	}
1597
1598	return err;
1599}
1600
1601static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1602{
1603	if (!cl->level) {
1604		WARN_ON(!cl->leaf.q);
1605		qdisc_put(cl->leaf.q);
1606	}
1607	gen_kill_estimator(&cl->rate_est);
1608	tcf_block_put(cl->block);
 
 
 
1609	kfree(cl);
1610}
1611
1612static void htb_destroy(struct Qdisc *sch)
1613{
1614	struct net_device *dev = qdisc_dev(sch);
1615	struct tc_htb_qopt_offload offload_opt;
1616	struct htb_sched *q = qdisc_priv(sch);
1617	struct hlist_node *next;
1618	bool nonempty, changed;
1619	struct htb_class *cl;
1620	unsigned int i;
1621
1622	cancel_work_sync(&q->work);
1623	qdisc_watchdog_cancel(&q->watchdog);
1624	/* This line used to be after htb_destroy_class call below
1625	 * and surprisingly it worked in 2.4. But it must precede it
1626	 * because filter need its target class alive to be able to call
1627	 * unbind_filter on it (without Oops).
1628	 */
1629	tcf_block_put(q->block);
1630
1631	for (i = 0; i < q->clhash.hashsize; i++) {
1632		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1633			tcf_block_put(cl->block);
1634			cl->block = NULL;
1635		}
 
 
 
1636	}
1637
1638	do {
1639		nonempty = false;
1640		changed = false;
1641		for (i = 0; i < q->clhash.hashsize; i++) {
1642			hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1643						  common.hnode) {
1644				bool last_child;
1645
1646				if (!q->offload) {
1647					htb_destroy_class(sch, cl);
1648					continue;
1649				}
1650
1651				nonempty = true;
1652
1653				if (cl->level)
1654					continue;
1655
1656				changed = true;
1657
1658				last_child = htb_parent_last_child(cl);
1659				htb_destroy_class_offload(sch, cl, last_child,
1660							  true, NULL);
1661				qdisc_class_hash_remove(&q->clhash,
1662							&cl->common);
1663				if (cl->parent)
1664					cl->parent->children--;
1665				if (last_child)
1666					htb_parent_to_leaf(sch, cl, NULL);
1667				htb_destroy_class(sch, cl);
1668			}
1669		}
1670	} while (changed);
1671	WARN_ON(nonempty);
1672
1673	qdisc_class_hash_destroy(&q->clhash);
1674	__qdisc_reset_queue(&q->direct_queue);
1675
1676	if (q->offload) {
1677		offload_opt = (struct tc_htb_qopt_offload) {
1678			.command = TC_HTB_DESTROY,
1679		};
1680		htb_offload(dev, &offload_opt);
1681	}
1682
1683	if (!q->direct_qdiscs)
1684		return;
1685	for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
1686		qdisc_put(q->direct_qdiscs[i]);
1687	kfree(q->direct_qdiscs);
1688}
1689
1690static int htb_delete(struct Qdisc *sch, unsigned long arg,
1691		      struct netlink_ext_ack *extack)
1692{
1693	struct htb_sched *q = qdisc_priv(sch);
1694	struct htb_class *cl = (struct htb_class *)arg;
 
1695	struct Qdisc *new_q = NULL;
1696	int last_child = 0;
1697	int err;
1698
1699	/* TODO: why don't allow to delete subtree ? references ? does
1700	 * tc subsys guarantee us that in htb_destroy it holds no class
1701	 * refs so that we can remove children safely there ?
1702	 */
1703	if (cl->children || qdisc_class_in_use(&cl->common)) {
1704		NL_SET_ERR_MSG(extack, "HTB class in use");
1705		return -EBUSY;
1706	}
1707
1708	if (!cl->level && htb_parent_last_child(cl))
 
 
1709		last_child = 1;
1710
1711	if (q->offload) {
1712		err = htb_destroy_class_offload(sch, cl, last_child, false,
1713						extack);
1714		if (err)
1715			return err;
1716	}
1717
1718	if (last_child) {
1719		struct netdev_queue *dev_queue = sch->dev_queue;
1720
1721		if (q->offload)
1722			dev_queue = htb_offload_get_queue(cl);
1723
1724		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1725					  cl->parent->common.classid,
1726					  NULL);
1727		if (q->offload)
1728			htb_parent_to_leaf_offload(sch, dev_queue, new_q);
1729	}
1730
1731	sch_tree_lock(sch);
1732
1733	if (!cl->level)
1734		qdisc_purge_queue(cl->leaf.q);
 
 
 
1735
1736	/* delete from hash and active; remainder in destroy_class */
1737	qdisc_class_hash_remove(&q->clhash, &cl->common);
1738	if (cl->parent)
1739		cl->parent->children--;
1740
1741	if (cl->prio_activity)
1742		htb_deactivate(q, cl);
1743
1744	if (cl->cmode != HTB_CAN_SEND)
1745		htb_safe_rb_erase(&cl->pq_node,
1746				  &q->hlevel[cl->level].wait_pq);
1747
1748	if (last_child)
1749		htb_parent_to_leaf(sch, cl, new_q);
 
 
 
 
 
 
1750
1751	sch_tree_unlock(sch);
 
 
1752
1753	htb_destroy_class(sch, cl);
1754	return 0;
 
 
 
 
1755}
1756
1757static int htb_change_class(struct Qdisc *sch, u32 classid,
1758			    u32 parentid, struct nlattr **tca,
1759			    unsigned long *arg, struct netlink_ext_ack *extack)
1760{
1761	int err = -EINVAL;
1762	struct htb_sched *q = qdisc_priv(sch);
1763	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1764	struct tc_htb_qopt_offload offload_opt;
1765	struct nlattr *opt = tca[TCA_OPTIONS];
1766	struct nlattr *tb[TCA_HTB_MAX + 1];
1767	struct Qdisc *parent_qdisc = NULL;
1768	struct netdev_queue *dev_queue;
1769	struct tc_htb_opt *hopt;
1770	u64 rate64, ceil64;
1771	int warn = 0;
1772
1773	/* extract all subattrs from opt attr */
1774	if (!opt)
1775		goto failure;
1776
1777	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1778					  extack);
1779	if (err < 0)
1780		goto failure;
1781
1782	err = -EINVAL;
1783	if (tb[TCA_HTB_PARMS] == NULL)
1784		goto failure;
1785
1786	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1787
1788	hopt = nla_data(tb[TCA_HTB_PARMS]);
1789	if (!hopt->rate.rate || !hopt->ceil.rate)
 
 
 
1790		goto failure;
1791
1792	if (q->offload) {
1793		/* Options not supported by the offload. */
1794		if (hopt->rate.overhead || hopt->ceil.overhead) {
1795			NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter");
1796			goto failure;
1797		}
1798		if (hopt->rate.mpu || hopt->ceil.mpu) {
1799			NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter");
1800			goto failure;
1801		}
1802	}
1803
1804	/* Keeping backward compatible with rate_table based iproute2 tc */
1805	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1806		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1807					      NULL));
1808
1809	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1810		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1811					      NULL));
1812
1813	rate64 = nla_get_u64_default(tb[TCA_HTB_RATE64], 0);
1814	ceil64 = nla_get_u64_default(tb[TCA_HTB_CEIL64], 0);
1815
1816	if (!cl) {		/* new class */
1817		struct net_device *dev = qdisc_dev(sch);
1818		struct Qdisc *new_q, *old_q;
1819		int prio;
1820		struct {
1821			struct nlattr		nla;
1822			struct gnet_estimator	opt;
1823		} est = {
1824			.nla = {
1825				.nla_len	= nla_attr_size(sizeof(est.opt)),
1826				.nla_type	= TCA_RATE,
1827			},
1828			.opt = {
1829				/* 4s interval, 16s averaging constant */
1830				.interval	= 2,
1831				.ewma_log	= 2,
1832			},
1833		};
1834
1835		/* check for valid classid */
1836		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1837		    htb_find(classid, sch))
1838			goto failure;
1839
1840		/* check maximal depth */
1841		if (parent && parent->parent && parent->parent->level < 2) {
1842			NL_SET_ERR_MSG_MOD(extack, "tree is too deep");
1843			goto failure;
1844		}
1845		err = -ENOBUFS;
1846		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1847		if (!cl)
1848			goto failure;
1849
1850		gnet_stats_basic_sync_init(&cl->bstats);
1851		gnet_stats_basic_sync_init(&cl->bstats_bias);
1852
1853		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1854		if (err) {
1855			kfree(cl);
1856			goto failure;
1857		}
1858		if (htb_rate_est || tca[TCA_RATE]) {
1859			err = gen_new_estimator(&cl->bstats, NULL,
1860						&cl->rate_est,
1861						NULL,
1862						true,
1863						tca[TCA_RATE] ? : &est.nla);
1864			if (err)
1865				goto err_block_put;
1866		}
1867
 
1868		cl->children = 0;
 
1869		RB_CLEAR_NODE(&cl->pq_node);
1870
1871		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1872			RB_CLEAR_NODE(&cl->node[prio]);
1873
1874		cl->common.classid = classid;
1875
1876		/* Make sure nothing interrupts us in between of two
1877		 * ndo_setup_tc calls.
1878		 */
1879		ASSERT_RTNL();
1880
1881		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1882		 * so that can't be used inside of sch_tree_lock
1883		 * -- thanks to Karlis Peisenieks
1884		 */
1885		if (!q->offload) {
1886			dev_queue = sch->dev_queue;
1887		} else if (!(parent && !parent->level)) {
1888			/* Assign a dev_queue to this classid. */
1889			offload_opt = (struct tc_htb_qopt_offload) {
1890				.command = TC_HTB_LEAF_ALLOC_QUEUE,
1891				.classid = cl->common.classid,
1892				.parent_classid = parent ?
1893					TC_H_MIN(parent->common.classid) :
1894					TC_HTB_CLASSID_ROOT,
1895				.rate = max_t(u64, hopt->rate.rate, rate64),
1896				.ceil = max_t(u64, hopt->ceil.rate, ceil64),
1897				.prio = hopt->prio,
1898				.quantum = hopt->quantum,
1899				.extack = extack,
1900			};
1901			err = htb_offload(dev, &offload_opt);
1902			if (err) {
1903				NL_SET_ERR_MSG_WEAK(extack,
1904						    "Failed to offload TC_HTB_LEAF_ALLOC_QUEUE");
1905				goto err_kill_estimator;
1906			}
1907			dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
1908		} else { /* First child. */
1909			dev_queue = htb_offload_get_queue(parent);
1910			old_q = htb_graft_helper(dev_queue, NULL);
1911			WARN_ON(old_q != parent->leaf.q);
1912			offload_opt = (struct tc_htb_qopt_offload) {
1913				.command = TC_HTB_LEAF_TO_INNER,
1914				.classid = cl->common.classid,
1915				.parent_classid =
1916					TC_H_MIN(parent->common.classid),
1917				.rate = max_t(u64, hopt->rate.rate, rate64),
1918				.ceil = max_t(u64, hopt->ceil.rate, ceil64),
1919				.prio = hopt->prio,
1920				.quantum = hopt->quantum,
1921				.extack = extack,
1922			};
1923			err = htb_offload(dev, &offload_opt);
1924			if (err) {
1925				NL_SET_ERR_MSG_WEAK(extack,
1926						    "Failed to offload TC_HTB_LEAF_TO_INNER");
1927				htb_graft_helper(dev_queue, old_q);
1928				goto err_kill_estimator;
1929			}
1930			_bstats_update(&parent->bstats_bias,
1931				       u64_stats_read(&old_q->bstats.bytes),
1932				       u64_stats_read(&old_q->bstats.packets));
1933			qdisc_put(old_q);
1934		}
1935		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1936					  classid, NULL);
1937		if (q->offload) {
1938			/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1939			if (new_q)
1940				qdisc_refcount_inc(new_q);
1941			old_q = htb_graft_helper(dev_queue, new_q);
1942			/* No qdisc_put needed. */
1943			WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1944		}
1945		sch_tree_lock(sch);
1946		if (parent && !parent->level) {
 
 
1947			/* turn parent into inner node */
1948			qdisc_purge_queue(parent->leaf.q);
1949			parent_qdisc = parent->leaf.q;
 
1950			if (parent->prio_activity)
1951				htb_deactivate(q, parent);
1952
1953			/* remove from evt list because of level change */
1954			if (parent->cmode != HTB_CAN_SEND) {
1955				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1956				parent->cmode = HTB_CAN_SEND;
1957			}
1958			parent->level = (parent->parent ? parent->parent->level
1959					 : TC_HTB_MAXDEPTH) - 1;
1960			memset(&parent->inner, 0, sizeof(parent->inner));
1961		}
1962
1963		/* leaf (we) needs elementary qdisc */
1964		cl->leaf.q = new_q ? new_q : &noop_qdisc;
1965		if (q->offload)
1966			cl->leaf.offload_queue = dev_queue;
1967
 
1968		cl->parent = parent;
1969
1970		/* set class to be in HTB_CAN_SEND state */
1971		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1972		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1973		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1974		cl->t_c = ktime_get_ns();
1975		cl->cmode = HTB_CAN_SEND;
1976
1977		/* attach to the hash list and parent's family */
1978		qdisc_class_hash_insert(&q->clhash, &cl->common);
1979		if (parent)
1980			parent->children++;
1981		if (cl->leaf.q != &noop_qdisc)
1982			qdisc_hash_add(cl->leaf.q, true);
1983	} else {
1984		if (tca[TCA_RATE]) {
1985			err = gen_replace_estimator(&cl->bstats, NULL,
1986						    &cl->rate_est,
1987						    NULL,
1988						    true,
1989						    tca[TCA_RATE]);
1990			if (err)
1991				return err;
1992		}
1993
1994		if (q->offload) {
1995			struct net_device *dev = qdisc_dev(sch);
1996
1997			offload_opt = (struct tc_htb_qopt_offload) {
1998				.command = TC_HTB_NODE_MODIFY,
1999				.classid = cl->common.classid,
2000				.rate = max_t(u64, hopt->rate.rate, rate64),
2001				.ceil = max_t(u64, hopt->ceil.rate, ceil64),
2002				.prio = hopt->prio,
2003				.quantum = hopt->quantum,
2004				.extack = extack,
2005			};
2006			err = htb_offload(dev, &offload_opt);
2007			if (err)
2008				/* Estimator was replaced, and rollback may fail
2009				 * as well, so we don't try to recover it, and
2010				 * the estimator won't work property with the
2011				 * offload anyway, because bstats are updated
2012				 * only when the stats are queried.
2013				 */
2014				return err;
2015		}
2016
2017		sch_tree_lock(sch);
2018	}
2019
2020	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
2021	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
2022
2023	/* it used to be a nasty bug here, we have to check that node
2024	 * is really leaf before changing cl->leaf !
2025	 */
2026	if (!cl->level) {
2027		u64 quantum = cl->rate.rate_bytes_ps;
2028
2029		do_div(quantum, q->rate2quantum);
2030		cl->quantum = min_t(u64, quantum, INT_MAX);
2031
2032		if (!hopt->quantum && cl->quantum < 1000) {
2033			warn = -1;
 
 
2034			cl->quantum = 1000;
2035		}
2036		if (!hopt->quantum && cl->quantum > 200000) {
2037			warn = 1;
 
 
2038			cl->quantum = 200000;
2039		}
2040		if (hopt->quantum)
2041			cl->quantum = hopt->quantum;
2042		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
2043			cl->prio = TC_HTB_NUMPRIO - 1;
2044	}
2045
2046	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
2047	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
2048
 
 
 
 
 
2049	sch_tree_unlock(sch);
2050	qdisc_put(parent_qdisc);
2051
2052	if (warn)
2053		NL_SET_ERR_MSG_FMT_MOD(extack,
2054				       "quantum of class %X is %s. Consider r2q change.",
2055				       cl->common.classid, (warn == -1 ? "small" : "big"));
2056
2057	qdisc_class_hash_grow(sch, &q->clhash);
2058
2059	*arg = (unsigned long)cl;
2060	return 0;
2061
2062err_kill_estimator:
2063	gen_kill_estimator(&cl->rate_est);
2064err_block_put:
2065	tcf_block_put(cl->block);
2066	kfree(cl);
2067failure:
 
 
 
 
2068	return err;
2069}
2070
2071static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
2072				       struct netlink_ext_ack *extack)
2073{
2074	struct htb_sched *q = qdisc_priv(sch);
2075	struct htb_class *cl = (struct htb_class *)arg;
 
2076
2077	return cl ? cl->block : q->block;
2078}
2079
2080static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
2081				     u32 classid)
2082{
2083	struct htb_class *cl = htb_find(classid, sch);
2084
2085	/*if (cl && !cl->level) return 0;
2086	 * The line above used to be there to prevent attaching filters to
2087	 * leaves. But at least tc_index filter uses this just to get class
2088	 * for other reasons so that we have to allow for it.
2089	 * ----
2090	 * 19.6.2002 As Werner explained it is ok - bind filter is just
2091	 * another way to "lock" the class - unlike "get" this lock can
2092	 * be broken by class during destroy IIUC.
2093	 */
2094	if (cl)
2095		qdisc_class_get(&cl->common);
2096	return (unsigned long)cl;
2097}
2098
2099static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2100{
2101	struct htb_class *cl = (struct htb_class *)arg;
2102
2103	qdisc_class_put(&cl->common);
 
2104}
2105
2106static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2107{
2108	struct htb_sched *q = qdisc_priv(sch);
2109	struct htb_class *cl;
 
2110	unsigned int i;
2111
2112	if (arg->stop)
2113		return;
2114
2115	for (i = 0; i < q->clhash.hashsize; i++) {
2116		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
2117			if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
 
 
 
 
 
2118				return;
 
 
2119		}
2120	}
2121}
2122
2123static const struct Qdisc_class_ops htb_class_ops = {
2124	.select_queue	=	htb_select_queue,
2125	.graft		=	htb_graft,
2126	.leaf		=	htb_leaf,
2127	.qlen_notify	=	htb_qlen_notify,
2128	.find		=	htb_search,
 
2129	.change		=	htb_change_class,
2130	.delete		=	htb_delete,
2131	.walk		=	htb_walk,
2132	.tcf_block	=	htb_tcf_block,
2133	.bind_tcf	=	htb_bind_filter,
2134	.unbind_tcf	=	htb_unbind_filter,
2135	.dump		=	htb_dump_class,
2136	.dump_stats	=	htb_dump_class_stats,
2137};
2138
2139static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
2140	.cl_ops		=	&htb_class_ops,
2141	.id		=	"htb",
2142	.priv_size	=	sizeof(struct htb_sched),
2143	.enqueue	=	htb_enqueue,
2144	.dequeue	=	htb_dequeue,
2145	.peek		=	qdisc_peek_dequeued,
 
2146	.init		=	htb_init,
2147	.attach		=	htb_attach,
2148	.reset		=	htb_reset,
2149	.destroy	=	htb_destroy,
2150	.dump		=	htb_dump,
2151	.owner		=	THIS_MODULE,
2152};
2153MODULE_ALIAS_NET_SCH("htb");
2154
2155static int __init htb_module_init(void)
2156{
2157	return register_qdisc(&htb_qdisc_ops);
2158}
2159static void __exit htb_module_exit(void)
2160{
2161	unregister_qdisc(&htb_qdisc_ops);
2162}
2163
2164module_init(htb_module_init)
2165module_exit(htb_module_exit)
2166MODULE_LICENSE("GPL");
2167MODULE_DESCRIPTION("Hierarchical Token Bucket scheduler");