Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
   4 *
 
 
 
 
 
   5 * Authors:	Martin Devera, <devik@cdi.cz>
   6 *
   7 * Credits (in time order) for older HTB versions:
   8 *              Stef Coene <stef.coene@docum.org>
   9 *			HTB support at LARTC mailing list
  10 *		Ondrej Kraus, <krauso@barr.cz>
  11 *			found missing INIT_QDISC(htb)
  12 *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
  13 *			helped a lot to locate nasty class stall bug
  14 *		Andi Kleen, Jamal Hadi, Bert Hubert
  15 *			code review and helpful comments on shaping
  16 *		Tomasz Wrona, <tw@eter.tym.pl>
  17 *			created test case so that I was able to fix nasty bug
  18 *		Wilfried Weissmann
  19 *			spotted bug in dequeue code and helped with fix
  20 *		Jiri Fojtasek
  21 *			fixed requeue routine
  22 *		and many others. thanks.
  23 */
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <linux/types.h>
  27#include <linux/kernel.h>
  28#include <linux/string.h>
  29#include <linux/errno.h>
  30#include <linux/skbuff.h>
  31#include <linux/list.h>
  32#include <linux/compiler.h>
  33#include <linux/rbtree.h>
  34#include <linux/workqueue.h>
  35#include <linux/slab.h>
  36#include <net/netlink.h>
  37#include <net/sch_generic.h>
  38#include <net/pkt_sched.h>
  39#include <net/pkt_cls.h>
  40
  41/* HTB algorithm.
  42    Author: devik@cdi.cz
  43    ========================================================================
  44    HTB is like TBF with multiple classes. It is also similar to CBQ because
  45    it allows to assign priority to each class in hierarchy.
  46    In fact it is another implementation of Floyd's formal sharing.
  47
  48    Levels:
  49    Each class is assigned level. Leaf has ALWAYS level 0 and root
  50    classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
  51    one less than their parent.
  52*/
  53
  54static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
  55#define HTB_VER 0x30011		/* major must be matched with number supplied by TC as version */
  56
  57#if HTB_VER >> 16 != TC_HTB_PROTOVER
  58#error "Mismatched sch_htb.c and pkt_sch.h"
  59#endif
  60
  61/* Module parameter and sysfs export */
  62module_param    (htb_hysteresis, int, 0640);
  63MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
  64
  65static int htb_rate_est = 0; /* htb classes have a default rate estimator */
  66module_param(htb_rate_est, int, 0640);
  67MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
  68
  69/* used internaly to keep status of single class */
  70enum htb_cmode {
  71	HTB_CANT_SEND,		/* class can't send and can't borrow */
  72	HTB_MAY_BORROW,		/* class can't send but may borrow */
  73	HTB_CAN_SEND		/* class can send */
  74};
  75
  76struct htb_prio {
  77	union {
  78		struct rb_root	row;
  79		struct rb_root	feed;
  80	};
  81	struct rb_node	*ptr;
  82	/* When class changes from state 1->2 and disconnects from
  83	 * parent's feed then we lost ptr value and start from the
  84	 * first child again. Here we store classid of the
  85	 * last valid ptr (used when ptr is NULL).
  86	 */
  87	u32		last_ptr_id;
  88};
  89
  90/* interior & leaf nodes; props specific to leaves are marked L:
  91 * To reduce false sharing, place mostly read fields at beginning,
  92 * and mostly written ones at the end.
  93 */
  94struct htb_class {
  95	struct Qdisc_class_common common;
  96	struct psched_ratecfg	rate;
  97	struct psched_ratecfg	ceil;
  98	s64			buffer, cbuffer;/* token bucket depth/rate */
  99	s64			mbuffer;	/* max wait time */
 100	u32			prio;		/* these two are used only by leaves... */
 101	int			quantum;	/* but stored for parent-to-leaf return */
 102
 103	struct tcf_proto __rcu	*filter_list;	/* class attached filters */
 104	struct tcf_block	*block;
 
 105
 106	int			level;		/* our level (see above) */
 107	unsigned int		children;
 108	struct htb_class	*parent;	/* parent class */
 109
 110	struct net_rate_estimator __rcu *rate_est;
 111
 112	/*
 113	 * Written often fields
 114	 */
 115	struct gnet_stats_basic_sync bstats;
 116	struct gnet_stats_basic_sync bstats_bias;
 117	struct tc_htb_xstats	xstats;	/* our special stats */
 118
 119	/* token bucket parameters */
 120	s64			tokens, ctokens;/* current number of tokens */
 121	s64			t_c;		/* checkpoint time */
 122
 123	union {
 124		struct htb_class_leaf {
 
 125			int		deficit[TC_HTB_MAXDEPTH];
 126			struct Qdisc	*q;
 127			struct netdev_queue *offload_queue;
 128		} leaf;
 129		struct htb_class_inner {
 130			struct htb_prio clprio[TC_HTB_NUMPRIO];
 131		} inner;
 132	};
 133	s64			pq_key;
 134
 135	int			prio_activity;	/* for which prios are we active */
 136	enum htb_cmode		cmode;		/* current mode of the class */
 137	struct rb_node		pq_node;	/* node for event queue */
 138	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
 139
 140	unsigned int drops ____cacheline_aligned_in_smp;
 141	unsigned int		overlimits;
 142};
 143
 144struct htb_level {
 145	struct rb_root	wait_pq;
 146	struct htb_prio hprio[TC_HTB_NUMPRIO];
 147};
 148
 149struct htb_sched {
 150	struct Qdisc_class_hash clhash;
 151	int			defcls;		/* class where unclassified flows go to */
 152	int			rate2quantum;	/* quant = rate / rate2quantum */
 153
 154	/* filters for qdisc itself */
 155	struct tcf_proto __rcu	*filter_list;
 156	struct tcf_block	*block;
 157
 158#define HTB_WARN_TOOMANYEVENTS	0x1
 159	unsigned int		warned;	/* only one warning */
 160	int			direct_qlen;
 161	struct work_struct	work;
 162
 163	/* non shaped skbs; let them go directly thru */
 164	struct qdisc_skb_head	direct_queue;
 165	u32			direct_pkts;
 166	u32			overlimits;
 167
 168	struct qdisc_watchdog	watchdog;
 169
 170	s64			now;	/* cached dequeue time */
 
 171
 172	/* time of nearest event per level (row) */
 173	s64			near_ev_cache[TC_HTB_MAXDEPTH];
 174
 175	int			row_mask[TC_HTB_MAXDEPTH];
 176
 177	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
 178
 179	struct Qdisc		**direct_qdiscs;
 180	unsigned int            num_direct_qdiscs;
 181
 182	bool			offload;
 183};
 184
 185/* find class in global hash table using given handle */
 186static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 187{
 188	struct htb_sched *q = qdisc_priv(sch);
 189	struct Qdisc_class_common *clc;
 190
 191	clc = qdisc_class_find(&q->clhash, handle);
 192	if (clc == NULL)
 193		return NULL;
 194	return container_of(clc, struct htb_class, common);
 195}
 196
 197static unsigned long htb_search(struct Qdisc *sch, u32 handle)
 198{
 199	return (unsigned long)htb_find(handle, sch);
 200}
 201
 202#define HTB_DIRECT ((struct htb_class *)-1L)
 203
 204/**
 205 * htb_classify - classify a packet into class
 206 * @skb: the socket buffer
 207 * @sch: the active queue discipline
 208 * @qerr: pointer for returned status code
 209 *
 210 * It returns NULL if the packet should be dropped or -1 if the packet
 211 * should be passed directly thru. In all other cases leaf class is returned.
 212 * We allow direct class selection by classid in priority. The we examine
 213 * filters in qdisc and in inner nodes (if higher filter points to the inner
 214 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
 215 * internal fifo (direct). These packets then go directly thru. If we still
 216 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
 217 * then finish and return direct queue.
 218 */
 
 
 219static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
 220				      int *qerr)
 221{
 222	struct htb_sched *q = qdisc_priv(sch);
 223	struct htb_class *cl;
 224	struct tcf_result res;
 225	struct tcf_proto *tcf;
 226	int result;
 227
 228	/* allow to select class by setting skb->priority to valid classid;
 229	 * note that nfmark can be used too by attaching filter fw with no
 230	 * rules in it
 231	 */
 232	if (skb->priority == sch->handle)
 233		return HTB_DIRECT;	/* X:0 (direct flow) selected */
 234	cl = htb_find(skb->priority, sch);
 235	if (cl) {
 236		if (cl->level == 0)
 237			return cl;
 238		/* Start with inner filter chain if a non-leaf class is selected */
 239		tcf = rcu_dereference_bh(cl->filter_list);
 240	} else {
 241		tcf = rcu_dereference_bh(q->filter_list);
 242	}
 243
 244	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 245	while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
 246#ifdef CONFIG_NET_CLS_ACT
 247		switch (result) {
 248		case TC_ACT_QUEUED:
 249		case TC_ACT_STOLEN:
 250		case TC_ACT_TRAP:
 251			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 252			fallthrough;
 253		case TC_ACT_SHOT:
 254			return NULL;
 255		}
 256#endif
 257		cl = (void *)res.class;
 258		if (!cl) {
 259			if (res.classid == sch->handle)
 260				return HTB_DIRECT;	/* X:0 (direct flow) */
 261			cl = htb_find(res.classid, sch);
 262			if (!cl)
 263				break;	/* filter selected invalid classid */
 264		}
 265		if (!cl->level)
 266			return cl;	/* we hit leaf; return it */
 267
 268		/* we have got inner class; apply inner filter chain */
 269		tcf = rcu_dereference_bh(cl->filter_list);
 270	}
 271	/* classification failed; try to use default class */
 272	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
 273	if (!cl || cl->level)
 274		return HTB_DIRECT;	/* bad default .. this is safe bet */
 275	return cl;
 276}
 277
 278/**
 279 * htb_add_to_id_tree - adds class to the round robin list
 280 * @root: the root of the tree
 281 * @cl: the class to add
 282 * @prio: the give prio in class
 283 *
 284 * Routine adds class to the list (actually tree) sorted by classid.
 285 * Make sure that class is not already on such list for given prio.
 286 */
 287static void htb_add_to_id_tree(struct rb_root *root,
 288			       struct htb_class *cl, int prio)
 289{
 290	struct rb_node **p = &root->rb_node, *parent = NULL;
 291
 292	while (*p) {
 293		struct htb_class *c;
 294		parent = *p;
 295		c = rb_entry(parent, struct htb_class, node[prio]);
 296
 297		if (cl->common.classid > c->common.classid)
 298			p = &parent->rb_right;
 299		else
 300			p = &parent->rb_left;
 301	}
 302	rb_link_node(&cl->node[prio], parent, p);
 303	rb_insert_color(&cl->node[prio], root);
 304}
 305
 306/**
 307 * htb_add_to_wait_tree - adds class to the event queue with delay
 308 * @q: the priority event queue
 309 * @cl: the class to add
 310 * @delay: delay in microseconds
 311 *
 312 * The class is added to priority event queue to indicate that class will
 313 * change its mode in cl->pq_key microseconds. Make sure that class is not
 314 * already in the queue.
 315 */
 316static void htb_add_to_wait_tree(struct htb_sched *q,
 317				 struct htb_class *cl, s64 delay)
 318{
 319	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
 320
 321	cl->pq_key = q->now + delay;
 322	if (cl->pq_key == q->now)
 323		cl->pq_key++;
 324
 325	/* update the nearest event cache */
 326	if (q->near_ev_cache[cl->level] > cl->pq_key)
 327		q->near_ev_cache[cl->level] = cl->pq_key;
 328
 329	while (*p) {
 330		struct htb_class *c;
 331		parent = *p;
 332		c = rb_entry(parent, struct htb_class, pq_node);
 333		if (cl->pq_key >= c->pq_key)
 334			p = &parent->rb_right;
 335		else
 336			p = &parent->rb_left;
 337	}
 338	rb_link_node(&cl->pq_node, parent, p);
 339	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 340}
 341
 342/**
 343 * htb_next_rb_node - finds next node in binary tree
 344 * @n: the current node in binary tree
 345 *
 346 * When we are past last key we return NULL.
 347 * Average complexity is 2 steps per call.
 348 */
 349static inline void htb_next_rb_node(struct rb_node **n)
 350{
 351	*n = rb_next(*n);
 352}
 353
 354/**
 355 * htb_add_class_to_row - add class to its row
 356 * @q: the priority event queue
 357 * @cl: the class to add
 358 * @mask: the given priorities in class in bitmap
 359 *
 360 * The class is added to row at priorities marked in mask.
 361 * It does nothing if mask == 0.
 362 */
 363static inline void htb_add_class_to_row(struct htb_sched *q,
 364					struct htb_class *cl, int mask)
 365{
 366	q->row_mask[cl->level] |= mask;
 367	while (mask) {
 368		int prio = ffz(~mask);
 369		mask &= ~(1 << prio);
 370		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
 371	}
 372}
 373
 374/* If this triggers, it is a bug in this code, but it need not be fatal */
 375static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
 376{
 377	if (RB_EMPTY_NODE(rb)) {
 378		WARN_ON(1);
 379	} else {
 380		rb_erase(rb, root);
 381		RB_CLEAR_NODE(rb);
 382	}
 383}
 384
 385
 386/**
 387 * htb_remove_class_from_row - removes class from its row
 388 * @q: the priority event queue
 389 * @cl: the class to add
 390 * @mask: the given priorities in class in bitmap
 391 *
 392 * The class is removed from row at priorities marked in mask.
 393 * It does nothing if mask == 0.
 394 */
 395static inline void htb_remove_class_from_row(struct htb_sched *q,
 396						 struct htb_class *cl, int mask)
 397{
 398	int m = 0;
 399	struct htb_level *hlevel = &q->hlevel[cl->level];
 400
 401	while (mask) {
 402		int prio = ffz(~mask);
 403		struct htb_prio *hprio = &hlevel->hprio[prio];
 404
 405		mask &= ~(1 << prio);
 406		if (hprio->ptr == cl->node + prio)
 407			htb_next_rb_node(&hprio->ptr);
 408
 409		htb_safe_rb_erase(cl->node + prio, &hprio->row);
 410		if (!hprio->row.rb_node)
 411			m |= 1 << prio;
 412	}
 413	q->row_mask[cl->level] &= ~m;
 414}
 415
 416/**
 417 * htb_activate_prios - creates active classe's feed chain
 418 * @q: the priority event queue
 419 * @cl: the class to activate
 420 *
 421 * The class is connected to ancestors and/or appropriate rows
 422 * for priorities it is participating on. cl->cmode must be new
 423 * (activated) mode. It does nothing if cl->prio_activity == 0.
 424 */
 425static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 426{
 427	struct htb_class *p = cl->parent;
 428	long m, mask = cl->prio_activity;
 429
 430	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 431		m = mask;
 432		while (m) {
 433			unsigned int prio = ffz(~m);
 434
 435			if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
 436				break;
 437			m &= ~(1 << prio);
 438
 439			if (p->inner.clprio[prio].feed.rb_node)
 440				/* parent already has its feed in use so that
 441				 * reset bit in mask as parent is already ok
 442				 */
 443				mask &= ~(1 << prio);
 444
 445			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
 446		}
 447		p->prio_activity |= mask;
 448		cl = p;
 449		p = cl->parent;
 450
 451	}
 452	if (cl->cmode == HTB_CAN_SEND && mask)
 453		htb_add_class_to_row(q, cl, mask);
 454}
 455
 456/**
 457 * htb_deactivate_prios - remove class from feed chain
 458 * @q: the priority event queue
 459 * @cl: the class to deactivate
 460 *
 461 * cl->cmode must represent old mode (before deactivation). It does
 462 * nothing if cl->prio_activity == 0. Class is removed from all feed
 463 * chains and rows.
 464 */
 465static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 466{
 467	struct htb_class *p = cl->parent;
 468	long m, mask = cl->prio_activity;
 469
 470	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 471		m = mask;
 472		mask = 0;
 473		while (m) {
 474			int prio = ffz(~m);
 475			m &= ~(1 << prio);
 476
 477			if (p->inner.clprio[prio].ptr == cl->node + prio) {
 478				/* we are removing child which is pointed to from
 479				 * parent feed - forget the pointer but remember
 480				 * classid
 481				 */
 482				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
 483				p->inner.clprio[prio].ptr = NULL;
 484			}
 485
 486			htb_safe_rb_erase(cl->node + prio,
 487					  &p->inner.clprio[prio].feed);
 488
 489			if (!p->inner.clprio[prio].feed.rb_node)
 490				mask |= 1 << prio;
 491		}
 492
 493		p->prio_activity &= ~mask;
 494		cl = p;
 495		p = cl->parent;
 496
 497	}
 498	if (cl->cmode == HTB_CAN_SEND && mask)
 499		htb_remove_class_from_row(q, cl, mask);
 500}
 501
 502static inline s64 htb_lowater(const struct htb_class *cl)
 503{
 504	if (htb_hysteresis)
 505		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
 506	else
 507		return 0;
 508}
 509static inline s64 htb_hiwater(const struct htb_class *cl)
 510{
 511	if (htb_hysteresis)
 512		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
 513	else
 514		return 0;
 515}
 516
 517
 518/**
 519 * htb_class_mode - computes and returns current class mode
 520 * @cl: the target class
 521 * @diff: diff time in microseconds
 522 *
 523 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
 524 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
 525 * from now to time when cl will change its state.
 526 * Also it is worth to note that class mode doesn't change simply
 527 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
 528 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
 529 * mode transitions per time unit. The speed gain is about 1/6.
 530 */
 531static inline enum htb_cmode
 532htb_class_mode(struct htb_class *cl, s64 *diff)
 533{
 534	s64 toks;
 535
 536	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
 537		*diff = -toks;
 538		return HTB_CANT_SEND;
 539	}
 540
 541	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
 542		return HTB_CAN_SEND;
 543
 544	*diff = -toks;
 545	return HTB_MAY_BORROW;
 546}
 547
 548/**
 549 * htb_change_class_mode - changes classe's mode
 550 * @q: the priority event queue
 551 * @cl: the target class
 552 * @diff: diff time in microseconds
 553 *
 554 * This should be the only way how to change classe's mode under normal
 555 * circumstances. Routine will update feed lists linkage, change mode
 556 * and add class to the wait event queue if appropriate. New mode should
 557 * be different from old one and cl->pq_key has to be valid if changing
 558 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
 559 */
 560static void
 561htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
 562{
 563	enum htb_cmode new_mode = htb_class_mode(cl, diff);
 564
 565	if (new_mode == cl->cmode)
 566		return;
 567
 568	if (new_mode == HTB_CANT_SEND) {
 569		cl->overlimits++;
 570		q->overlimits++;
 571	}
 572
 573	if (cl->prio_activity) {	/* not necessary: speed optimization */
 574		if (cl->cmode != HTB_CANT_SEND)
 575			htb_deactivate_prios(q, cl);
 576		cl->cmode = new_mode;
 577		if (new_mode != HTB_CANT_SEND)
 578			htb_activate_prios(q, cl);
 579	} else
 580		cl->cmode = new_mode;
 581}
 582
 583/**
 584 * htb_activate - inserts leaf cl into appropriate active feeds
 585 * @q: the priority event queue
 586 * @cl: the target class
 587 *
 588 * Routine learns (new) priority of leaf and activates feed chain
 589 * for the prio. It can be called on already active leaf safely.
 590 * It also adds leaf into droplist.
 591 */
 592static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
 593{
 594	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
 595
 596	if (!cl->prio_activity) {
 597		cl->prio_activity = 1 << cl->prio;
 598		htb_activate_prios(q, cl);
 
 
 599	}
 600}
 601
 602/**
 603 * htb_deactivate - remove leaf cl from active feeds
 604 * @q: the priority event queue
 605 * @cl: the target class
 606 *
 607 * Make sure that leaf is active. In the other words it can't be called
 608 * with non-active leaf. It also removes class from the drop list.
 609 */
 610static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 611{
 612	WARN_ON(!cl->prio_activity);
 613
 614	htb_deactivate_prios(q, cl);
 615	cl->prio_activity = 0;
 
 616}
 617
 618static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 619		       struct sk_buff **to_free)
 620{
 621	int ret;
 622	unsigned int len = qdisc_pkt_len(skb);
 623	struct htb_sched *q = qdisc_priv(sch);
 624	struct htb_class *cl = htb_classify(skb, sch, &ret);
 625
 626	if (cl == HTB_DIRECT) {
 627		/* enqueue to helper queue */
 628		if (q->direct_queue.qlen < q->direct_qlen) {
 629			__qdisc_enqueue_tail(skb, &q->direct_queue);
 630			q->direct_pkts++;
 631		} else {
 632			return qdisc_drop(skb, sch, to_free);
 633		}
 634#ifdef CONFIG_NET_CLS_ACT
 635	} else if (!cl) {
 636		if (ret & __NET_XMIT_BYPASS)
 637			qdisc_qstats_drop(sch);
 638		__qdisc_drop(skb, to_free);
 639		return ret;
 640#endif
 641	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
 642					to_free)) != NET_XMIT_SUCCESS) {
 643		if (net_xmit_drop_count(ret)) {
 644			qdisc_qstats_drop(sch);
 645			cl->drops++;
 646		}
 647		return ret;
 648	} else {
 649		htb_activate(q, cl);
 650	}
 651
 652	sch->qstats.backlog += len;
 653	sch->q.qlen++;
 654	return NET_XMIT_SUCCESS;
 655}
 656
 657static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
 658{
 659	s64 toks = diff + cl->tokens;
 660
 661	if (toks > cl->buffer)
 662		toks = cl->buffer;
 663	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
 664	if (toks <= -cl->mbuffer)
 665		toks = 1 - cl->mbuffer;
 666
 667	cl->tokens = toks;
 668}
 669
 670static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
 671{
 672	s64 toks = diff + cl->ctokens;
 673
 674	if (toks > cl->cbuffer)
 675		toks = cl->cbuffer;
 676	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
 677	if (toks <= -cl->mbuffer)
 678		toks = 1 - cl->mbuffer;
 679
 680	cl->ctokens = toks;
 681}
 682
 683/**
 684 * htb_charge_class - charges amount "bytes" to leaf and ancestors
 685 * @q: the priority event queue
 686 * @cl: the class to start iterate
 687 * @level: the minimum level to account
 688 * @skb: the socket buffer
 689 *
 690 * Routine assumes that packet "bytes" long was dequeued from leaf cl
 691 * borrowing from "level". It accounts bytes to ceil leaky bucket for
 692 * leaf and all ancestors and to rate bucket for ancestors at levels
 693 * "level" and higher. It also handles possible change of mode resulting
 694 * from the update. Note that mode can also increase here (MAY_BORROW to
 695 * CAN_SEND) because we can use more precise clock that event queue here.
 696 * In such case we remove class from event queue first.
 697 */
 698static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
 699			     int level, struct sk_buff *skb)
 700{
 701	int bytes = qdisc_pkt_len(skb);
 702	enum htb_cmode old_mode;
 703	s64 diff;
 704
 705	while (cl) {
 706		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 707		if (cl->level >= level) {
 708			if (cl->level == level)
 709				cl->xstats.lends++;
 710			htb_accnt_tokens(cl, bytes, diff);
 711		} else {
 712			cl->xstats.borrows++;
 713			cl->tokens += diff;	/* we moved t_c; update tokens */
 714		}
 715		htb_accnt_ctokens(cl, bytes, diff);
 716		cl->t_c = q->now;
 717
 718		old_mode = cl->cmode;
 719		diff = 0;
 720		htb_change_class_mode(q, cl, &diff);
 721		if (old_mode != cl->cmode) {
 722			if (old_mode != HTB_CAN_SEND)
 723				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 724			if (cl->cmode != HTB_CAN_SEND)
 725				htb_add_to_wait_tree(q, cl, diff);
 726		}
 727
 728		/* update basic stats except for leaves which are already updated */
 729		if (cl->level)
 730			bstats_update(&cl->bstats, skb);
 731
 732		cl = cl->parent;
 733	}
 734}
 735
 736/**
 737 * htb_do_events - make mode changes to classes at the level
 738 * @q: the priority event queue
 739 * @level: which wait_pq in 'q->hlevel'
 740 * @start: start jiffies
 741 *
 742 * Scans event queue for pending events and applies them. Returns time of
 743 * next pending event (0 for no event in pq, q->now for too many events).
 744 * Note: Applied are events whose have cl->pq_key <= q->now.
 745 */
 746static s64 htb_do_events(struct htb_sched *q, const int level,
 747			 unsigned long start)
 748{
 749	/* don't run for longer than 2 jiffies; 2 is used instead of
 750	 * 1 to simplify things when jiffy is going to be incremented
 751	 * too soon
 752	 */
 753	unsigned long stop_at = start + 2;
 754	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
 755
 756	while (time_before(jiffies, stop_at)) {
 757		struct htb_class *cl;
 758		s64 diff;
 759		struct rb_node *p = rb_first(wait_pq);
 760
 761		if (!p)
 762			return 0;
 763
 764		cl = rb_entry(p, struct htb_class, pq_node);
 765		if (cl->pq_key > q->now)
 766			return cl->pq_key;
 767
 768		htb_safe_rb_erase(p, wait_pq);
 769		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 770		htb_change_class_mode(q, cl, &diff);
 771		if (cl->cmode != HTB_CAN_SEND)
 772			htb_add_to_wait_tree(q, cl, diff);
 773	}
 774
 775	/* too much load - let's continue after a break for scheduling */
 776	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
 777		pr_warn("htb: too many events!\n");
 778		q->warned |= HTB_WARN_TOOMANYEVENTS;
 779	}
 780
 781	return q->now;
 782}
 783
 784/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
 785 * is no such one exists.
 786 */
 787static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
 788					      u32 id)
 789{
 790	struct rb_node *r = NULL;
 791	while (n) {
 792		struct htb_class *cl =
 793		    rb_entry(n, struct htb_class, node[prio]);
 794
 795		if (id > cl->common.classid) {
 796			n = n->rb_right;
 797		} else if (id < cl->common.classid) {
 798			r = n;
 799			n = n->rb_left;
 800		} else {
 801			return n;
 802		}
 803	}
 804	return r;
 805}
 806
 807/**
 808 * htb_lookup_leaf - returns next leaf class in DRR order
 809 * @hprio: the current one
 810 * @prio: which prio in class
 811 *
 812 * Find leaf where current feed pointers points to.
 813 */
 814static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
 815{
 816	int i;
 817	struct {
 818		struct rb_node *root;
 819		struct rb_node **pptr;
 820		u32 *pid;
 821	} stk[TC_HTB_MAXDEPTH], *sp = stk;
 822
 823	BUG_ON(!hprio->row.rb_node);
 824	sp->root = hprio->row.rb_node;
 825	sp->pptr = &hprio->ptr;
 826	sp->pid = &hprio->last_ptr_id;
 827
 828	for (i = 0; i < 65535; i++) {
 829		if (!*sp->pptr && *sp->pid) {
 830			/* ptr was invalidated but id is valid - try to recover
 831			 * the original or next ptr
 832			 */
 833			*sp->pptr =
 834			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
 835		}
 836		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
 837				 * can become out of date quickly
 838				 */
 839		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
 840			*sp->pptr = sp->root;
 841			while ((*sp->pptr)->rb_left)
 842				*sp->pptr = (*sp->pptr)->rb_left;
 843			if (sp > stk) {
 844				sp--;
 845				if (!*sp->pptr) {
 846					WARN_ON(1);
 847					return NULL;
 848				}
 849				htb_next_rb_node(sp->pptr);
 850			}
 851		} else {
 852			struct htb_class *cl;
 853			struct htb_prio *clp;
 854
 855			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
 856			if (!cl->level)
 857				return cl;
 858			clp = &cl->inner.clprio[prio];
 859			(++sp)->root = clp->feed.rb_node;
 860			sp->pptr = &clp->ptr;
 861			sp->pid = &clp->last_ptr_id;
 862		}
 863	}
 864	WARN_ON(1);
 865	return NULL;
 866}
 867
 868/* dequeues packet at given priority and level; call only if
 869 * you are sure that there is active class at prio/level
 870 */
 871static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
 872					const int level)
 873{
 874	struct sk_buff *skb = NULL;
 875	struct htb_class *cl, *start;
 876	struct htb_level *hlevel = &q->hlevel[level];
 877	struct htb_prio *hprio = &hlevel->hprio[prio];
 878
 879	/* look initial class up in the row */
 880	start = cl = htb_lookup_leaf(hprio, prio);
 881
 882	do {
 883next:
 884		if (unlikely(!cl))
 885			return NULL;
 886
 887		/* class can be empty - it is unlikely but can be true if leaf
 888		 * qdisc drops packets in enqueue routine or if someone used
 889		 * graft operation on the leaf since last dequeue;
 890		 * simply deactivate and skip such class
 891		 */
 892		if (unlikely(cl->leaf.q->q.qlen == 0)) {
 893			struct htb_class *next;
 894			htb_deactivate(q, cl);
 895
 896			/* row/level might become empty */
 897			if ((q->row_mask[level] & (1 << prio)) == 0)
 898				return NULL;
 899
 900			next = htb_lookup_leaf(hprio, prio);
 901
 902			if (cl == start)	/* fix start if we just deleted it */
 903				start = next;
 904			cl = next;
 905			goto next;
 906		}
 907
 908		skb = cl->leaf.q->dequeue(cl->leaf.q);
 909		if (likely(skb != NULL))
 910			break;
 911
 912		qdisc_warn_nonwc("htb", cl->leaf.q);
 913		htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
 914					 &q->hlevel[0].hprio[prio].ptr);
 915		cl = htb_lookup_leaf(hprio, prio);
 916
 917	} while (cl != start);
 918
 919	if (likely(skb != NULL)) {
 920		bstats_update(&cl->bstats, skb);
 921		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
 922		if (cl->leaf.deficit[level] < 0) {
 923			cl->leaf.deficit[level] += cl->quantum;
 924			htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
 925						 &q->hlevel[0].hprio[prio].ptr);
 926		}
 927		/* this used to be after charge_class but this constelation
 928		 * gives us slightly better performance
 929		 */
 930		if (!cl->leaf.q->q.qlen)
 931			htb_deactivate(q, cl);
 932		htb_charge_class(q, cl, level, skb);
 933	}
 934	return skb;
 935}
 936
 937static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 938{
 939	struct sk_buff *skb;
 940	struct htb_sched *q = qdisc_priv(sch);
 941	int level;
 942	s64 next_event;
 943	unsigned long start_at;
 944
 945	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 946	skb = __qdisc_dequeue_head(&q->direct_queue);
 947	if (skb != NULL) {
 948ok:
 949		qdisc_bstats_update(sch, skb);
 950		qdisc_qstats_backlog_dec(sch, skb);
 951		sch->q.qlen--;
 952		return skb;
 953	}
 954
 955	if (!sch->q.qlen)
 956		goto fin;
 957	q->now = ktime_get_ns();
 958	start_at = jiffies;
 959
 960	next_event = q->now + 5LLU * NSEC_PER_SEC;
 961
 962	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
 963		/* common case optimization - skip event handler quickly */
 964		int m;
 965		s64 event = q->near_ev_cache[level];
 966
 967		if (q->now >= event) {
 968			event = htb_do_events(q, level, start_at);
 969			if (!event)
 970				event = q->now + NSEC_PER_SEC;
 971			q->near_ev_cache[level] = event;
 972		}
 973
 974		if (next_event > event)
 975			next_event = event;
 976
 977		m = ~q->row_mask[level];
 978		while (m != (int)(-1)) {
 979			int prio = ffz(m);
 980
 981			m |= 1 << prio;
 982			skb = htb_dequeue_tree(q, prio, level);
 983			if (likely(skb != NULL))
 984				goto ok;
 985		}
 986	}
 987	if (likely(next_event > q->now))
 988		qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
 989	else
 
 
 
 
 
 
 
 990		schedule_work(&q->work);
 
 991fin:
 992	return skb;
 993}
 994
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995/* reset all classes */
 996/* always caled under BH & queue lock */
 997static void htb_reset(struct Qdisc *sch)
 998{
 999	struct htb_sched *q = qdisc_priv(sch);
1000	struct htb_class *cl;
1001	unsigned int i;
1002
1003	for (i = 0; i < q->clhash.hashsize; i++) {
1004		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1005			if (cl->level)
1006				memset(&cl->inner, 0, sizeof(cl->inner));
1007			else {
1008				if (cl->leaf.q && !q->offload)
1009					qdisc_reset(cl->leaf.q);
 
1010			}
1011			cl->prio_activity = 0;
1012			cl->cmode = HTB_CAN_SEND;
 
1013		}
1014	}
1015	qdisc_watchdog_cancel(&q->watchdog);
1016	__qdisc_reset_queue(&q->direct_queue);
 
1017	memset(q->hlevel, 0, sizeof(q->hlevel));
1018	memset(q->row_mask, 0, sizeof(q->row_mask));
 
 
1019}
1020
1021static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1022	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
1023	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
1024	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1025	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1026	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
1027	[TCA_HTB_RATE64] = { .type = NLA_U64 },
1028	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
1029	[TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
1030};
1031
1032static void htb_work_func(struct work_struct *work)
1033{
1034	struct htb_sched *q = container_of(work, struct htb_sched, work);
1035	struct Qdisc *sch = q->watchdog.qdisc;
1036
1037	rcu_read_lock();
1038	__netif_schedule(qdisc_root(sch));
1039	rcu_read_unlock();
1040}
1041
1042static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
1043{
1044	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
1045}
1046
1047static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1048		    struct netlink_ext_ack *extack)
1049{
1050	struct net_device *dev = qdisc_dev(sch);
1051	struct tc_htb_qopt_offload offload_opt;
1052	struct htb_sched *q = qdisc_priv(sch);
1053	struct nlattr *tb[TCA_HTB_MAX + 1];
1054	struct tc_htb_glob *gopt;
1055	unsigned int ntx;
1056	bool offload;
1057	int err;
1058
1059	qdisc_watchdog_init(&q->watchdog, sch);
1060	INIT_WORK(&q->work, htb_work_func);
1061
1062	if (!opt)
1063		return -EINVAL;
1064
1065	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1066	if (err)
1067		return err;
1068
1069	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1070					  NULL);
1071	if (err < 0)
1072		return err;
1073
1074	if (!tb[TCA_HTB_INIT])
1075		return -EINVAL;
1076
1077	gopt = nla_data(tb[TCA_HTB_INIT]);
1078	if (gopt->version != HTB_VER >> 16)
1079		return -EINVAL;
1080
1081	offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
1082
1083	if (offload) {
1084		if (sch->parent != TC_H_ROOT) {
1085			NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload");
1086			return -EOPNOTSUPP;
1087		}
1088
1089		if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) {
1090			NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on");
1091			return -EOPNOTSUPP;
1092		}
1093
1094		q->num_direct_qdiscs = dev->real_num_tx_queues;
1095		q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
1096					   sizeof(*q->direct_qdiscs),
1097					   GFP_KERNEL);
1098		if (!q->direct_qdiscs)
1099			return -ENOMEM;
1100	}
1101
1102	err = qdisc_class_hash_init(&q->clhash);
1103	if (err < 0)
1104		return err;
 
 
 
 
 
 
1105
1106	if (tb[TCA_HTB_DIRECT_QLEN])
1107		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1108	else
1109		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1110
 
 
1111	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1112		q->rate2quantum = 1;
1113	q->defcls = gopt->defcls;
1114
1115	if (!offload)
1116		return 0;
1117
1118	for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1119		struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1120		struct Qdisc *qdisc;
1121
1122		qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1123					  TC_H_MAKE(sch->handle, 0), extack);
1124		if (!qdisc) {
1125			return -ENOMEM;
1126		}
1127
1128		q->direct_qdiscs[ntx] = qdisc;
1129		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1130	}
1131
1132	sch->flags |= TCQ_F_MQROOT;
1133
1134	offload_opt = (struct tc_htb_qopt_offload) {
1135		.command = TC_HTB_CREATE,
1136		.parent_classid = TC_H_MAJ(sch->handle) >> 16,
1137		.classid = TC_H_MIN(q->defcls),
1138		.extack = extack,
1139	};
1140	err = htb_offload(dev, &offload_opt);
1141	if (err)
1142		return err;
1143
1144	/* Defer this assignment, so that htb_destroy skips offload-related
1145	 * parts (especially calling ndo_setup_tc) on errors.
1146	 */
1147	q->offload = true;
1148
1149	return 0;
1150}
1151
1152static void htb_attach_offload(struct Qdisc *sch)
1153{
1154	struct net_device *dev = qdisc_dev(sch);
1155	struct htb_sched *q = qdisc_priv(sch);
1156	unsigned int ntx;
1157
1158	for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1159		struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
1160
1161		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1162		qdisc_put(old);
1163		qdisc_hash_add(qdisc, false);
1164	}
1165	for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
1166		struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1167		struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
1168
1169		qdisc_put(old);
1170	}
1171
1172	kfree(q->direct_qdiscs);
1173	q->direct_qdiscs = NULL;
1174}
1175
1176static void htb_attach_software(struct Qdisc *sch)
1177{
1178	struct net_device *dev = qdisc_dev(sch);
1179	unsigned int ntx;
1180
1181	/* Resemble qdisc_graft behavior. */
1182	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1183		struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1184		struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
1185
1186		qdisc_refcount_inc(sch);
1187
1188		qdisc_put(old);
1189	}
1190}
1191
1192static void htb_attach(struct Qdisc *sch)
1193{
1194	struct htb_sched *q = qdisc_priv(sch);
1195
1196	if (q->offload)
1197		htb_attach_offload(sch);
1198	else
1199		htb_attach_software(sch);
1200}
1201
1202static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1203{
1204	struct htb_sched *q = qdisc_priv(sch);
1205	struct nlattr *nest;
1206	struct tc_htb_glob gopt;
1207
1208	if (q->offload)
1209		sch->flags |= TCQ_F_OFFLOADED;
1210	else
1211		sch->flags &= ~TCQ_F_OFFLOADED;
1212
1213	sch->qstats.overlimits = q->overlimits;
1214	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1215	 * no change can happen on the qdisc parameters.
1216	 */
1217
1218	gopt.direct_pkts = q->direct_pkts;
1219	gopt.version = HTB_VER;
1220	gopt.rate2quantum = q->rate2quantum;
1221	gopt.defcls = q->defcls;
1222	gopt.debug = 0;
1223
1224	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1225	if (nest == NULL)
1226		goto nla_put_failure;
1227	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1228	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1229		goto nla_put_failure;
1230	if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1231		goto nla_put_failure;
1232
1233	return nla_nest_end(skb, nest);
1234
1235nla_put_failure:
1236	nla_nest_cancel(skb, nest);
1237	return -1;
1238}
1239
1240static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1241			  struct sk_buff *skb, struct tcmsg *tcm)
1242{
1243	struct htb_class *cl = (struct htb_class *)arg;
1244	struct htb_sched *q = qdisc_priv(sch);
1245	struct nlattr *nest;
1246	struct tc_htb_opt opt;
1247
1248	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1249	 * no change can happen on the class parameters.
1250	 */
1251	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1252	tcm->tcm_handle = cl->common.classid;
1253	if (!cl->level && cl->leaf.q)
1254		tcm->tcm_info = cl->leaf.q->handle;
1255
1256	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1257	if (nest == NULL)
1258		goto nla_put_failure;
1259
1260	memset(&opt, 0, sizeof(opt));
1261
1262	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1263	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1264	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1265	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1266	opt.quantum = cl->quantum;
1267	opt.prio = cl->prio;
1268	opt.level = cl->level;
1269	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1270		goto nla_put_failure;
1271	if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1272		goto nla_put_failure;
1273	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1274	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1275			      TCA_HTB_PAD))
1276		goto nla_put_failure;
1277	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1278	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1279			      TCA_HTB_PAD))
1280		goto nla_put_failure;
1281
1282	return nla_nest_end(skb, nest);
1283
1284nla_put_failure:
1285	nla_nest_cancel(skb, nest);
1286	return -1;
1287}
1288
1289static void htb_offload_aggregate_stats(struct htb_sched *q,
1290					struct htb_class *cl)
1291{
1292	u64 bytes = 0, packets = 0;
1293	struct htb_class *c;
1294	unsigned int i;
1295
1296	gnet_stats_basic_sync_init(&cl->bstats);
1297
1298	for (i = 0; i < q->clhash.hashsize; i++) {
1299		hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
1300			struct htb_class *p = c;
1301
1302			while (p && p->level < cl->level)
1303				p = p->parent;
1304
1305			if (p != cl)
1306				continue;
1307
1308			bytes += u64_stats_read(&c->bstats_bias.bytes);
1309			packets += u64_stats_read(&c->bstats_bias.packets);
1310			if (c->level == 0) {
1311				bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
1312				packets += u64_stats_read(&c->leaf.q->bstats.packets);
1313			}
1314		}
1315	}
1316	_bstats_update(&cl->bstats, bytes, packets);
1317}
1318
1319static int
1320htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1321{
1322	struct htb_class *cl = (struct htb_class *)arg;
1323	struct htb_sched *q = qdisc_priv(sch);
1324	struct gnet_stats_queue qs = {
1325		.drops = cl->drops,
1326		.overlimits = cl->overlimits,
1327	};
1328	__u32 qlen = 0;
1329
1330	if (!cl->level && cl->leaf.q)
1331		qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1332
1333	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1334				    INT_MIN, INT_MAX);
1335	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1336				     INT_MIN, INT_MAX);
1337
1338	if (q->offload) {
1339		if (!cl->level) {
1340			if (cl->leaf.q)
1341				cl->bstats = cl->leaf.q->bstats;
1342			else
1343				gnet_stats_basic_sync_init(&cl->bstats);
1344			_bstats_update(&cl->bstats,
1345				       u64_stats_read(&cl->bstats_bias.bytes),
1346				       u64_stats_read(&cl->bstats_bias.packets));
1347		} else {
1348			htb_offload_aggregate_stats(q, cl);
1349		}
1350	}
1351
1352	if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
1353	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1354	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
 
 
 
 
 
1355		return -1;
1356
1357	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1358}
1359
1360static struct netdev_queue *
1361htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
1362{
1363	struct net_device *dev = qdisc_dev(sch);
1364	struct tc_htb_qopt_offload offload_opt;
1365	struct htb_sched *q = qdisc_priv(sch);
1366	int err;
1367
1368	if (!q->offload)
1369		return sch->dev_queue;
1370
1371	offload_opt = (struct tc_htb_qopt_offload) {
1372		.command = TC_HTB_LEAF_QUERY_QUEUE,
1373		.classid = TC_H_MIN(tcm->tcm_parent),
1374	};
1375	err = htb_offload(dev, &offload_opt);
1376	if (err || offload_opt.qid >= dev->num_tx_queues)
1377		return NULL;
1378	return netdev_get_tx_queue(dev, offload_opt.qid);
1379}
1380
1381static struct Qdisc *
1382htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
1383{
1384	struct net_device *dev = dev_queue->dev;
1385	struct Qdisc *old_q;
1386
1387	if (dev->flags & IFF_UP)
1388		dev_deactivate(dev);
1389	old_q = dev_graft_qdisc(dev_queue, new_q);
1390	if (new_q)
1391		new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1392	if (dev->flags & IFF_UP)
1393		dev_activate(dev);
1394
1395	return old_q;
1396}
1397
1398static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
1399{
1400	struct netdev_queue *queue;
1401
1402	queue = cl->leaf.offload_queue;
1403	if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
1404		WARN_ON(cl->leaf.q->dev_queue != queue);
1405
1406	return queue;
1407}
1408
1409static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
1410				   struct htb_class *cl_new, bool destroying)
1411{
1412	struct netdev_queue *queue_old, *queue_new;
1413	struct net_device *dev = qdisc_dev(sch);
1414
1415	queue_old = htb_offload_get_queue(cl_old);
1416	queue_new = htb_offload_get_queue(cl_new);
1417
1418	if (!destroying) {
1419		struct Qdisc *qdisc;
1420
1421		if (dev->flags & IFF_UP)
1422			dev_deactivate(dev);
1423		qdisc = dev_graft_qdisc(queue_old, NULL);
1424		WARN_ON(qdisc != cl_old->leaf.q);
1425	}
1426
1427	if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
1428		cl_old->leaf.q->dev_queue = queue_new;
1429	cl_old->leaf.offload_queue = queue_new;
1430
1431	if (!destroying) {
1432		struct Qdisc *qdisc;
1433
1434		qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
1435		if (dev->flags & IFF_UP)
1436			dev_activate(dev);
1437		WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
1438	}
1439}
1440
1441static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1442		     struct Qdisc **old, struct netlink_ext_ack *extack)
1443{
1444	struct netdev_queue *dev_queue = sch->dev_queue;
1445	struct htb_class *cl = (struct htb_class *)arg;
1446	struct htb_sched *q = qdisc_priv(sch);
1447	struct Qdisc *old_q;
1448
1449	if (cl->level)
1450		return -EINVAL;
 
 
 
 
1451
1452	if (q->offload)
1453		dev_queue = htb_offload_get_queue(cl);
1454
1455	if (!new) {
1456		new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1457					cl->common.classid, extack);
1458		if (!new)
1459			return -ENOBUFS;
1460	}
1461
1462	if (q->offload) {
1463		/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1464		qdisc_refcount_inc(new);
1465		old_q = htb_graft_helper(dev_queue, new);
1466	}
1467
1468	*old = qdisc_replace(sch, new, &cl->leaf.q);
1469
1470	if (q->offload) {
1471		WARN_ON(old_q != *old);
1472		qdisc_put(old_q);
1473	}
1474
1475	return 0;
1476}
1477
1478static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1479{
1480	struct htb_class *cl = (struct htb_class *)arg;
1481	return !cl->level ? cl->leaf.q : NULL;
1482}
1483
1484static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1485{
1486	struct htb_class *cl = (struct htb_class *)arg;
1487
1488	htb_deactivate(qdisc_priv(sch), cl);
 
 
 
 
 
 
 
 
 
1489}
1490
1491static inline int htb_parent_last_child(struct htb_class *cl)
1492{
1493	if (!cl->parent)
1494		/* the root class */
1495		return 0;
1496	if (cl->parent->children > 1)
1497		/* not the last child */
1498		return 0;
1499	return 1;
1500}
1501
1502static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
1503			       struct Qdisc *new_q)
1504{
1505	struct htb_sched *q = qdisc_priv(sch);
1506	struct htb_class *parent = cl->parent;
1507
1508	WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1509
1510	if (parent->cmode != HTB_CAN_SEND)
1511		htb_safe_rb_erase(&parent->pq_node,
1512				  &q->hlevel[parent->level].wait_pq);
1513
1514	parent->level = 0;
1515	memset(&parent->inner, 0, sizeof(parent->inner));
1516	parent->leaf.q = new_q ? new_q : &noop_qdisc;
 
1517	parent->tokens = parent->buffer;
1518	parent->ctokens = parent->cbuffer;
1519	parent->t_c = ktime_get_ns();
1520	parent->cmode = HTB_CAN_SEND;
1521	if (q->offload)
1522		parent->leaf.offload_queue = cl->leaf.offload_queue;
1523}
1524
1525static void htb_parent_to_leaf_offload(struct Qdisc *sch,
1526				       struct netdev_queue *dev_queue,
1527				       struct Qdisc *new_q)
1528{
1529	struct Qdisc *old_q;
1530
1531	/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1532	if (new_q)
1533		qdisc_refcount_inc(new_q);
1534	old_q = htb_graft_helper(dev_queue, new_q);
1535	WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1536}
1537
1538static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1539				     bool last_child, bool destroying,
1540				     struct netlink_ext_ack *extack)
1541{
1542	struct tc_htb_qopt_offload offload_opt;
1543	struct netdev_queue *dev_queue;
1544	struct Qdisc *q = cl->leaf.q;
1545	struct Qdisc *old;
1546	int err;
1547
1548	if (cl->level)
1549		return -EINVAL;
1550
1551	WARN_ON(!q);
1552	dev_queue = htb_offload_get_queue(cl);
1553	/* When destroying, caller qdisc_graft grafts the new qdisc and invokes
1554	 * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
1555	 * does not need to graft or qdisc_put the qdisc being destroyed.
1556	 */
1557	if (!destroying) {
1558		old = htb_graft_helper(dev_queue, NULL);
1559		/* Last qdisc grafted should be the same as cl->leaf.q when
1560		 * calling htb_delete.
1561		 */
1562		WARN_ON(old != q);
1563	}
1564
1565	if (cl->parent) {
1566		_bstats_update(&cl->parent->bstats_bias,
1567			       u64_stats_read(&q->bstats.bytes),
1568			       u64_stats_read(&q->bstats.packets));
1569	}
1570
1571	offload_opt = (struct tc_htb_qopt_offload) {
1572		.command = !last_child ? TC_HTB_LEAF_DEL :
1573			   destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
1574			   TC_HTB_LEAF_DEL_LAST,
1575		.classid = cl->common.classid,
1576		.extack = extack,
1577	};
1578	err = htb_offload(qdisc_dev(sch), &offload_opt);
1579
1580	if (!destroying) {
1581		if (!err)
1582			qdisc_put(old);
1583		else
1584			htb_graft_helper(dev_queue, old);
1585	}
1586
1587	if (last_child)
1588		return err;
1589
1590	if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
1591		u32 classid = TC_H_MAJ(sch->handle) |
1592			      TC_H_MIN(offload_opt.classid);
1593		struct htb_class *moved_cl = htb_find(classid, sch);
1594
1595		htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
1596	}
1597
1598	return err;
1599}
1600
1601static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1602{
1603	if (!cl->level) {
1604		WARN_ON(!cl->leaf.q);
1605		qdisc_put(cl->leaf.q);
1606	}
1607	gen_kill_estimator(&cl->rate_est);
1608	tcf_block_put(cl->block);
1609	kfree(cl);
1610}
1611
1612static void htb_destroy(struct Qdisc *sch)
1613{
1614	struct net_device *dev = qdisc_dev(sch);
1615	struct tc_htb_qopt_offload offload_opt;
1616	struct htb_sched *q = qdisc_priv(sch);
1617	struct hlist_node *next;
1618	bool nonempty, changed;
1619	struct htb_class *cl;
1620	unsigned int i;
1621
1622	cancel_work_sync(&q->work);
1623	qdisc_watchdog_cancel(&q->watchdog);
1624	/* This line used to be after htb_destroy_class call below
1625	 * and surprisingly it worked in 2.4. But it must precede it
1626	 * because filter need its target class alive to be able to call
1627	 * unbind_filter on it (without Oops).
1628	 */
1629	tcf_block_put(q->block);
1630
1631	for (i = 0; i < q->clhash.hashsize; i++) {
1632		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1633			tcf_block_put(cl->block);
1634			cl->block = NULL;
1635		}
1636	}
1637
1638	do {
1639		nonempty = false;
1640		changed = false;
1641		for (i = 0; i < q->clhash.hashsize; i++) {
1642			hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1643						  common.hnode) {
1644				bool last_child;
1645
1646				if (!q->offload) {
1647					htb_destroy_class(sch, cl);
1648					continue;
1649				}
1650
1651				nonempty = true;
1652
1653				if (cl->level)
1654					continue;
1655
1656				changed = true;
1657
1658				last_child = htb_parent_last_child(cl);
1659				htb_destroy_class_offload(sch, cl, last_child,
1660							  true, NULL);
1661				qdisc_class_hash_remove(&q->clhash,
1662							&cl->common);
1663				if (cl->parent)
1664					cl->parent->children--;
1665				if (last_child)
1666					htb_parent_to_leaf(sch, cl, NULL);
1667				htb_destroy_class(sch, cl);
1668			}
1669		}
1670	} while (changed);
1671	WARN_ON(nonempty);
1672
1673	qdisc_class_hash_destroy(&q->clhash);
1674	__qdisc_reset_queue(&q->direct_queue);
1675
1676	if (q->offload) {
1677		offload_opt = (struct tc_htb_qopt_offload) {
1678			.command = TC_HTB_DESTROY,
1679		};
1680		htb_offload(dev, &offload_opt);
1681	}
1682
1683	if (!q->direct_qdiscs)
1684		return;
1685	for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
1686		qdisc_put(q->direct_qdiscs[i]);
1687	kfree(q->direct_qdiscs);
1688}
1689
1690static int htb_delete(struct Qdisc *sch, unsigned long arg,
1691		      struct netlink_ext_ack *extack)
1692{
1693	struct htb_sched *q = qdisc_priv(sch);
1694	struct htb_class *cl = (struct htb_class *)arg;
 
1695	struct Qdisc *new_q = NULL;
1696	int last_child = 0;
1697	int err;
1698
1699	/* TODO: why don't allow to delete subtree ? references ? does
1700	 * tc subsys guarantee us that in htb_destroy it holds no class
1701	 * refs so that we can remove children safely there ?
1702	 */
1703	if (cl->children || qdisc_class_in_use(&cl->common)) {
1704		NL_SET_ERR_MSG(extack, "HTB class in use");
1705		return -EBUSY;
1706	}
1707
1708	if (!cl->level && htb_parent_last_child(cl))
 
 
1709		last_child = 1;
1710
1711	if (q->offload) {
1712		err = htb_destroy_class_offload(sch, cl, last_child, false,
1713						extack);
1714		if (err)
1715			return err;
1716	}
1717
1718	if (last_child) {
1719		struct netdev_queue *dev_queue = sch->dev_queue;
1720
1721		if (q->offload)
1722			dev_queue = htb_offload_get_queue(cl);
1723
1724		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1725					  cl->parent->common.classid,
1726					  NULL);
1727		if (q->offload)
1728			htb_parent_to_leaf_offload(sch, dev_queue, new_q);
1729	}
1730
1731	sch_tree_lock(sch);
1732
1733	if (!cl->level)
1734		qdisc_purge_queue(cl->leaf.q);
 
 
 
1735
1736	/* delete from hash and active; remainder in destroy_class */
1737	qdisc_class_hash_remove(&q->clhash, &cl->common);
1738	if (cl->parent)
1739		cl->parent->children--;
1740
1741	if (cl->prio_activity)
1742		htb_deactivate(q, cl);
1743
1744	if (cl->cmode != HTB_CAN_SEND)
1745		htb_safe_rb_erase(&cl->pq_node,
1746				  &q->hlevel[cl->level].wait_pq);
1747
1748	if (last_child)
1749		htb_parent_to_leaf(sch, cl, new_q);
1750
1751	sch_tree_unlock(sch);
 
 
 
 
1752
1753	htb_destroy_class(sch, cl);
1754	return 0;
1755}
1756
 
 
 
 
 
 
 
 
1757static int htb_change_class(struct Qdisc *sch, u32 classid,
1758			    u32 parentid, struct nlattr **tca,
1759			    unsigned long *arg, struct netlink_ext_ack *extack)
1760{
1761	int err = -EINVAL;
1762	struct htb_sched *q = qdisc_priv(sch);
1763	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1764	struct tc_htb_qopt_offload offload_opt;
1765	struct nlattr *opt = tca[TCA_OPTIONS];
1766	struct nlattr *tb[TCA_HTB_MAX + 1];
1767	struct Qdisc *parent_qdisc = NULL;
1768	struct netdev_queue *dev_queue;
1769	struct tc_htb_opt *hopt;
1770	u64 rate64, ceil64;
1771	int warn = 0;
1772
1773	/* extract all subattrs from opt attr */
1774	if (!opt)
1775		goto failure;
1776
1777	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1778					  extack);
1779	if (err < 0)
1780		goto failure;
1781
1782	err = -EINVAL;
1783	if (tb[TCA_HTB_PARMS] == NULL)
1784		goto failure;
1785
1786	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1787
1788	hopt = nla_data(tb[TCA_HTB_PARMS]);
1789	if (!hopt->rate.rate || !hopt->ceil.rate)
1790		goto failure;
1791
1792	if (q->offload) {
1793		/* Options not supported by the offload. */
1794		if (hopt->rate.overhead || hopt->ceil.overhead) {
1795			NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter");
1796			goto failure;
1797		}
1798		if (hopt->rate.mpu || hopt->ceil.mpu) {
1799			NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter");
1800			goto failure;
1801		}
1802	}
1803
1804	/* Keeping backward compatible with rate_table based iproute2 tc */
1805	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1806		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1807					      NULL));
1808
1809	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1810		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1811					      NULL));
1812
1813	rate64 = nla_get_u64_default(tb[TCA_HTB_RATE64], 0);
1814	ceil64 = nla_get_u64_default(tb[TCA_HTB_CEIL64], 0);
1815
1816	if (!cl) {		/* new class */
1817		struct net_device *dev = qdisc_dev(sch);
1818		struct Qdisc *new_q, *old_q;
1819		int prio;
1820		struct {
1821			struct nlattr		nla;
1822			struct gnet_estimator	opt;
1823		} est = {
1824			.nla = {
1825				.nla_len	= nla_attr_size(sizeof(est.opt)),
1826				.nla_type	= TCA_RATE,
1827			},
1828			.opt = {
1829				/* 4s interval, 16s averaging constant */
1830				.interval	= 2,
1831				.ewma_log	= 2,
1832			},
1833		};
1834
1835		/* check for valid classid */
1836		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1837		    htb_find(classid, sch))
1838			goto failure;
1839
1840		/* check maximal depth */
1841		if (parent && parent->parent && parent->parent->level < 2) {
1842			NL_SET_ERR_MSG_MOD(extack, "tree is too deep");
1843			goto failure;
1844		}
1845		err = -ENOBUFS;
1846		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1847		if (!cl)
1848			goto failure;
1849
1850		gnet_stats_basic_sync_init(&cl->bstats);
1851		gnet_stats_basic_sync_init(&cl->bstats_bias);
1852
1853		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1854		if (err) {
1855			kfree(cl);
1856			goto failure;
1857		}
1858		if (htb_rate_est || tca[TCA_RATE]) {
1859			err = gen_new_estimator(&cl->bstats, NULL,
1860						&cl->rate_est,
1861						NULL,
1862						true,
1863						tca[TCA_RATE] ? : &est.nla);
1864			if (err)
1865				goto err_block_put;
 
 
1866		}
1867
 
1868		cl->children = 0;
 
1869		RB_CLEAR_NODE(&cl->pq_node);
1870
1871		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1872			RB_CLEAR_NODE(&cl->node[prio]);
1873
1874		cl->common.classid = classid;
1875
1876		/* Make sure nothing interrupts us in between of two
1877		 * ndo_setup_tc calls.
1878		 */
1879		ASSERT_RTNL();
1880
1881		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1882		 * so that can't be used inside of sch_tree_lock
1883		 * -- thanks to Karlis Peisenieks
1884		 */
1885		if (!q->offload) {
1886			dev_queue = sch->dev_queue;
1887		} else if (!(parent && !parent->level)) {
1888			/* Assign a dev_queue to this classid. */
1889			offload_opt = (struct tc_htb_qopt_offload) {
1890				.command = TC_HTB_LEAF_ALLOC_QUEUE,
1891				.classid = cl->common.classid,
1892				.parent_classid = parent ?
1893					TC_H_MIN(parent->common.classid) :
1894					TC_HTB_CLASSID_ROOT,
1895				.rate = max_t(u64, hopt->rate.rate, rate64),
1896				.ceil = max_t(u64, hopt->ceil.rate, ceil64),
1897				.prio = hopt->prio,
1898				.quantum = hopt->quantum,
1899				.extack = extack,
1900			};
1901			err = htb_offload(dev, &offload_opt);
1902			if (err) {
1903				NL_SET_ERR_MSG_WEAK(extack,
1904						    "Failed to offload TC_HTB_LEAF_ALLOC_QUEUE");
1905				goto err_kill_estimator;
1906			}
1907			dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
1908		} else { /* First child. */
1909			dev_queue = htb_offload_get_queue(parent);
1910			old_q = htb_graft_helper(dev_queue, NULL);
1911			WARN_ON(old_q != parent->leaf.q);
1912			offload_opt = (struct tc_htb_qopt_offload) {
1913				.command = TC_HTB_LEAF_TO_INNER,
1914				.classid = cl->common.classid,
1915				.parent_classid =
1916					TC_H_MIN(parent->common.classid),
1917				.rate = max_t(u64, hopt->rate.rate, rate64),
1918				.ceil = max_t(u64, hopt->ceil.rate, ceil64),
1919				.prio = hopt->prio,
1920				.quantum = hopt->quantum,
1921				.extack = extack,
1922			};
1923			err = htb_offload(dev, &offload_opt);
1924			if (err) {
1925				NL_SET_ERR_MSG_WEAK(extack,
1926						    "Failed to offload TC_HTB_LEAF_TO_INNER");
1927				htb_graft_helper(dev_queue, old_q);
1928				goto err_kill_estimator;
1929			}
1930			_bstats_update(&parent->bstats_bias,
1931				       u64_stats_read(&old_q->bstats.bytes),
1932				       u64_stats_read(&old_q->bstats.packets));
1933			qdisc_put(old_q);
1934		}
1935		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1936					  classid, NULL);
1937		if (q->offload) {
1938			/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1939			if (new_q)
1940				qdisc_refcount_inc(new_q);
1941			old_q = htb_graft_helper(dev_queue, new_q);
1942			/* No qdisc_put needed. */
1943			WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1944		}
1945		sch_tree_lock(sch);
1946		if (parent && !parent->level) {
 
 
1947			/* turn parent into inner node */
1948			qdisc_purge_queue(parent->leaf.q);
1949			parent_qdisc = parent->leaf.q;
 
1950			if (parent->prio_activity)
1951				htb_deactivate(q, parent);
1952
1953			/* remove from evt list because of level change */
1954			if (parent->cmode != HTB_CAN_SEND) {
1955				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1956				parent->cmode = HTB_CAN_SEND;
1957			}
1958			parent->level = (parent->parent ? parent->parent->level
1959					 : TC_HTB_MAXDEPTH) - 1;
1960			memset(&parent->inner, 0, sizeof(parent->inner));
1961		}
1962
1963		/* leaf (we) needs elementary qdisc */
1964		cl->leaf.q = new_q ? new_q : &noop_qdisc;
1965		if (q->offload)
1966			cl->leaf.offload_queue = dev_queue;
1967
 
1968		cl->parent = parent;
1969
1970		/* set class to be in HTB_CAN_SEND state */
1971		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1972		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1973		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1974		cl->t_c = ktime_get_ns();
1975		cl->cmode = HTB_CAN_SEND;
1976
1977		/* attach to the hash list and parent's family */
1978		qdisc_class_hash_insert(&q->clhash, &cl->common);
1979		if (parent)
1980			parent->children++;
1981		if (cl->leaf.q != &noop_qdisc)
1982			qdisc_hash_add(cl->leaf.q, true);
1983	} else {
1984		if (tca[TCA_RATE]) {
1985			err = gen_replace_estimator(&cl->bstats, NULL,
1986						    &cl->rate_est,
1987						    NULL,
1988						    true,
1989						    tca[TCA_RATE]);
1990			if (err)
1991				return err;
1992		}
1993
1994		if (q->offload) {
1995			struct net_device *dev = qdisc_dev(sch);
1996
1997			offload_opt = (struct tc_htb_qopt_offload) {
1998				.command = TC_HTB_NODE_MODIFY,
1999				.classid = cl->common.classid,
2000				.rate = max_t(u64, hopt->rate.rate, rate64),
2001				.ceil = max_t(u64, hopt->ceil.rate, ceil64),
2002				.prio = hopt->prio,
2003				.quantum = hopt->quantum,
2004				.extack = extack,
2005			};
2006			err = htb_offload(dev, &offload_opt);
2007			if (err)
2008				/* Estimator was replaced, and rollback may fail
2009				 * as well, so we don't try to recover it, and
2010				 * the estimator won't work property with the
2011				 * offload anyway, because bstats are updated
2012				 * only when the stats are queried.
2013				 */
2014				return err;
2015		}
2016
2017		sch_tree_lock(sch);
2018	}
2019
 
 
 
 
2020	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
2021	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
2022
2023	/* it used to be a nasty bug here, we have to check that node
2024	 * is really leaf before changing cl->leaf !
2025	 */
2026	if (!cl->level) {
2027		u64 quantum = cl->rate.rate_bytes_ps;
2028
2029		do_div(quantum, q->rate2quantum);
2030		cl->quantum = min_t(u64, quantum, INT_MAX);
2031
2032		if (!hopt->quantum && cl->quantum < 1000) {
2033			warn = -1;
 
2034			cl->quantum = 1000;
2035		}
2036		if (!hopt->quantum && cl->quantum > 200000) {
2037			warn = 1;
 
2038			cl->quantum = 200000;
2039		}
2040		if (hopt->quantum)
2041			cl->quantum = hopt->quantum;
2042		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
2043			cl->prio = TC_HTB_NUMPRIO - 1;
2044	}
2045
2046	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
2047	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
2048
2049	sch_tree_unlock(sch);
2050	qdisc_put(parent_qdisc);
2051
2052	if (warn)
2053		NL_SET_ERR_MSG_FMT_MOD(extack,
2054				       "quantum of class %X is %s. Consider r2q change.",
2055				       cl->common.classid, (warn == -1 ? "small" : "big"));
2056
2057	qdisc_class_hash_grow(sch, &q->clhash);
2058
2059	*arg = (unsigned long)cl;
2060	return 0;
2061
2062err_kill_estimator:
2063	gen_kill_estimator(&cl->rate_est);
2064err_block_put:
2065	tcf_block_put(cl->block);
2066	kfree(cl);
2067failure:
2068	return err;
2069}
2070
2071static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
2072				       struct netlink_ext_ack *extack)
2073{
2074	struct htb_sched *q = qdisc_priv(sch);
2075	struct htb_class *cl = (struct htb_class *)arg;
 
2076
2077	return cl ? cl->block : q->block;
2078}
2079
2080static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
2081				     u32 classid)
2082{
2083	struct htb_class *cl = htb_find(classid, sch);
2084
2085	/*if (cl && !cl->level) return 0;
2086	 * The line above used to be there to prevent attaching filters to
2087	 * leaves. But at least tc_index filter uses this just to get class
2088	 * for other reasons so that we have to allow for it.
2089	 * ----
2090	 * 19.6.2002 As Werner explained it is ok - bind filter is just
2091	 * another way to "lock" the class - unlike "get" this lock can
2092	 * be broken by class during destroy IIUC.
2093	 */
2094	if (cl)
2095		qdisc_class_get(&cl->common);
2096	return (unsigned long)cl;
2097}
2098
2099static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2100{
2101	struct htb_class *cl = (struct htb_class *)arg;
2102
2103	qdisc_class_put(&cl->common);
 
2104}
2105
2106static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2107{
2108	struct htb_sched *q = qdisc_priv(sch);
2109	struct htb_class *cl;
2110	unsigned int i;
2111
2112	if (arg->stop)
2113		return;
2114
2115	for (i = 0; i < q->clhash.hashsize; i++) {
2116		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
2117			if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
 
 
 
 
 
2118				return;
 
 
2119		}
2120	}
2121}
2122
2123static const struct Qdisc_class_ops htb_class_ops = {
2124	.select_queue	=	htb_select_queue,
2125	.graft		=	htb_graft,
2126	.leaf		=	htb_leaf,
2127	.qlen_notify	=	htb_qlen_notify,
2128	.find		=	htb_search,
 
2129	.change		=	htb_change_class,
2130	.delete		=	htb_delete,
2131	.walk		=	htb_walk,
2132	.tcf_block	=	htb_tcf_block,
2133	.bind_tcf	=	htb_bind_filter,
2134	.unbind_tcf	=	htb_unbind_filter,
2135	.dump		=	htb_dump_class,
2136	.dump_stats	=	htb_dump_class_stats,
2137};
2138
2139static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
2140	.cl_ops		=	&htb_class_ops,
2141	.id		=	"htb",
2142	.priv_size	=	sizeof(struct htb_sched),
2143	.enqueue	=	htb_enqueue,
2144	.dequeue	=	htb_dequeue,
2145	.peek		=	qdisc_peek_dequeued,
 
2146	.init		=	htb_init,
2147	.attach		=	htb_attach,
2148	.reset		=	htb_reset,
2149	.destroy	=	htb_destroy,
2150	.dump		=	htb_dump,
2151	.owner		=	THIS_MODULE,
2152};
2153MODULE_ALIAS_NET_SCH("htb");
2154
2155static int __init htb_module_init(void)
2156{
2157	return register_qdisc(&htb_qdisc_ops);
2158}
2159static void __exit htb_module_exit(void)
2160{
2161	unregister_qdisc(&htb_qdisc_ops);
2162}
2163
2164module_init(htb_module_init)
2165module_exit(htb_module_exit)
2166MODULE_LICENSE("GPL");
2167MODULE_DESCRIPTION("Hierarchical Token Bucket scheduler");
v3.15
 
   1/*
   2 * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Martin Devera, <devik@cdi.cz>
  10 *
  11 * Credits (in time order) for older HTB versions:
  12 *              Stef Coene <stef.coene@docum.org>
  13 *			HTB support at LARTC mailing list
  14 *		Ondrej Kraus, <krauso@barr.cz>
  15 *			found missing INIT_QDISC(htb)
  16 *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
  17 *			helped a lot to locate nasty class stall bug
  18 *		Andi Kleen, Jamal Hadi, Bert Hubert
  19 *			code review and helpful comments on shaping
  20 *		Tomasz Wrona, <tw@eter.tym.pl>
  21 *			created test case so that I was able to fix nasty bug
  22 *		Wilfried Weissmann
  23 *			spotted bug in dequeue code and helped with fix
  24 *		Jiri Fojtasek
  25 *			fixed requeue routine
  26 *		and many others. thanks.
  27 */
  28#include <linux/module.h>
  29#include <linux/moduleparam.h>
  30#include <linux/types.h>
  31#include <linux/kernel.h>
  32#include <linux/string.h>
  33#include <linux/errno.h>
  34#include <linux/skbuff.h>
  35#include <linux/list.h>
  36#include <linux/compiler.h>
  37#include <linux/rbtree.h>
  38#include <linux/workqueue.h>
  39#include <linux/slab.h>
  40#include <net/netlink.h>
  41#include <net/sch_generic.h>
  42#include <net/pkt_sched.h>
 
  43
  44/* HTB algorithm.
  45    Author: devik@cdi.cz
  46    ========================================================================
  47    HTB is like TBF with multiple classes. It is also similar to CBQ because
  48    it allows to assign priority to each class in hierarchy.
  49    In fact it is another implementation of Floyd's formal sharing.
  50
  51    Levels:
  52    Each class is assigned level. Leaf has ALWAYS level 0 and root
  53    classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
  54    one less than their parent.
  55*/
  56
  57static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
  58#define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
  59
  60#if HTB_VER >> 16 != TC_HTB_PROTOVER
  61#error "Mismatched sch_htb.c and pkt_sch.h"
  62#endif
  63
  64/* Module parameter and sysfs export */
  65module_param    (htb_hysteresis, int, 0640);
  66MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
  67
  68static int htb_rate_est = 0; /* htb classes have a default rate estimator */
  69module_param(htb_rate_est, int, 0640);
  70MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
  71
  72/* used internaly to keep status of single class */
  73enum htb_cmode {
  74	HTB_CANT_SEND,		/* class can't send and can't borrow */
  75	HTB_MAY_BORROW,		/* class can't send but may borrow */
  76	HTB_CAN_SEND		/* class can send */
  77};
  78
  79struct htb_prio {
  80	union {
  81		struct rb_root	row;
  82		struct rb_root	feed;
  83	};
  84	struct rb_node	*ptr;
  85	/* When class changes from state 1->2 and disconnects from
  86	 * parent's feed then we lost ptr value and start from the
  87	 * first child again. Here we store classid of the
  88	 * last valid ptr (used when ptr is NULL).
  89	 */
  90	u32		last_ptr_id;
  91};
  92
  93/* interior & leaf nodes; props specific to leaves are marked L:
  94 * To reduce false sharing, place mostly read fields at beginning,
  95 * and mostly written ones at the end.
  96 */
  97struct htb_class {
  98	struct Qdisc_class_common common;
  99	struct psched_ratecfg	rate;
 100	struct psched_ratecfg	ceil;
 101	s64			buffer, cbuffer;/* token bucket depth/rate */
 102	s64			mbuffer;	/* max wait time */
 103	u32			prio;		/* these two are used only by leaves... */
 104	int			quantum;	/* but stored for parent-to-leaf return */
 105
 106	struct tcf_proto	*filter_list;	/* class attached filters */
 107	int			filter_cnt;
 108	int			refcnt;		/* usage count of this class */
 109
 110	int			level;		/* our level (see above) */
 111	unsigned int		children;
 112	struct htb_class	*parent;	/* parent class */
 113
 114	struct gnet_stats_rate_est64 rate_est;
 115
 116	/*
 117	 * Written often fields
 118	 */
 119	struct gnet_stats_basic_packed bstats;
 120	struct gnet_stats_queue	qstats;
 121	struct tc_htb_xstats	xstats;	/* our special stats */
 122
 123	/* token bucket parameters */
 124	s64			tokens, ctokens;/* current number of tokens */
 125	s64			t_c;		/* checkpoint time */
 126
 127	union {
 128		struct htb_class_leaf {
 129			struct list_head drop_list;
 130			int		deficit[TC_HTB_MAXDEPTH];
 131			struct Qdisc	*q;
 
 132		} leaf;
 133		struct htb_class_inner {
 134			struct htb_prio clprio[TC_HTB_NUMPRIO];
 135		} inner;
 136	} un;
 137	s64			pq_key;
 138
 139	int			prio_activity;	/* for which prios are we active */
 140	enum htb_cmode		cmode;		/* current mode of the class */
 141	struct rb_node		pq_node;	/* node for event queue */
 142	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
 
 
 
 143};
 144
 145struct htb_level {
 146	struct rb_root	wait_pq;
 147	struct htb_prio hprio[TC_HTB_NUMPRIO];
 148};
 149
 150struct htb_sched {
 151	struct Qdisc_class_hash clhash;
 152	int			defcls;		/* class where unclassified flows go to */
 153	int			rate2quantum;	/* quant = rate / rate2quantum */
 154
 155	/* filters for qdisc itself */
 156	struct tcf_proto	*filter_list;
 
 157
 158#define HTB_WARN_TOOMANYEVENTS	0x1
 159	unsigned int		warned;	/* only one warning */
 160	int			direct_qlen;
 161	struct work_struct	work;
 162
 163	/* non shaped skbs; let them go directly thru */
 164	struct sk_buff_head	direct_queue;
 165	long			direct_pkts;
 
 166
 167	struct qdisc_watchdog	watchdog;
 168
 169	s64			now;	/* cached dequeue time */
 170	struct list_head	drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
 171
 172	/* time of nearest event per level (row) */
 173	s64			near_ev_cache[TC_HTB_MAXDEPTH];
 174
 175	int			row_mask[TC_HTB_MAXDEPTH];
 176
 177	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
 
 
 
 
 
 178};
 179
 180/* find class in global hash table using given handle */
 181static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 182{
 183	struct htb_sched *q = qdisc_priv(sch);
 184	struct Qdisc_class_common *clc;
 185
 186	clc = qdisc_class_find(&q->clhash, handle);
 187	if (clc == NULL)
 188		return NULL;
 189	return container_of(clc, struct htb_class, common);
 190}
 191
 
 
 
 
 
 
 
 192/**
 193 * htb_classify - classify a packet into class
 
 
 
 194 *
 195 * It returns NULL if the packet should be dropped or -1 if the packet
 196 * should be passed directly thru. In all other cases leaf class is returned.
 197 * We allow direct class selection by classid in priority. The we examine
 198 * filters in qdisc and in inner nodes (if higher filter points to the inner
 199 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
 200 * internal fifo (direct). These packets then go directly thru. If we still
 201 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
 202 * then finish and return direct queue.
 203 */
 204#define HTB_DIRECT ((struct htb_class *)-1L)
 205
 206static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
 207				      int *qerr)
 208{
 209	struct htb_sched *q = qdisc_priv(sch);
 210	struct htb_class *cl;
 211	struct tcf_result res;
 212	struct tcf_proto *tcf;
 213	int result;
 214
 215	/* allow to select class by setting skb->priority to valid classid;
 216	 * note that nfmark can be used too by attaching filter fw with no
 217	 * rules in it
 218	 */
 219	if (skb->priority == sch->handle)
 220		return HTB_DIRECT;	/* X:0 (direct flow) selected */
 221	cl = htb_find(skb->priority, sch);
 222	if (cl) {
 223		if (cl->level == 0)
 224			return cl;
 225		/* Start with inner filter chain if a non-leaf class is selected */
 226		tcf = cl->filter_list;
 227	} else {
 228		tcf = q->filter_list;
 229	}
 230
 231	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 232	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
 233#ifdef CONFIG_NET_CLS_ACT
 234		switch (result) {
 235		case TC_ACT_QUEUED:
 236		case TC_ACT_STOLEN:
 
 237			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
 238		case TC_ACT_SHOT:
 239			return NULL;
 240		}
 241#endif
 242		cl = (void *)res.class;
 243		if (!cl) {
 244			if (res.classid == sch->handle)
 245				return HTB_DIRECT;	/* X:0 (direct flow) */
 246			cl = htb_find(res.classid, sch);
 247			if (!cl)
 248				break;	/* filter selected invalid classid */
 249		}
 250		if (!cl->level)
 251			return cl;	/* we hit leaf; return it */
 252
 253		/* we have got inner class; apply inner filter chain */
 254		tcf = cl->filter_list;
 255	}
 256	/* classification failed; try to use default class */
 257	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
 258	if (!cl || cl->level)
 259		return HTB_DIRECT;	/* bad default .. this is safe bet */
 260	return cl;
 261}
 262
 263/**
 264 * htb_add_to_id_tree - adds class to the round robin list
 
 
 
 265 *
 266 * Routine adds class to the list (actually tree) sorted by classid.
 267 * Make sure that class is not already on such list for given prio.
 268 */
 269static void htb_add_to_id_tree(struct rb_root *root,
 270			       struct htb_class *cl, int prio)
 271{
 272	struct rb_node **p = &root->rb_node, *parent = NULL;
 273
 274	while (*p) {
 275		struct htb_class *c;
 276		parent = *p;
 277		c = rb_entry(parent, struct htb_class, node[prio]);
 278
 279		if (cl->common.classid > c->common.classid)
 280			p = &parent->rb_right;
 281		else
 282			p = &parent->rb_left;
 283	}
 284	rb_link_node(&cl->node[prio], parent, p);
 285	rb_insert_color(&cl->node[prio], root);
 286}
 287
 288/**
 289 * htb_add_to_wait_tree - adds class to the event queue with delay
 
 
 
 290 *
 291 * The class is added to priority event queue to indicate that class will
 292 * change its mode in cl->pq_key microseconds. Make sure that class is not
 293 * already in the queue.
 294 */
 295static void htb_add_to_wait_tree(struct htb_sched *q,
 296				 struct htb_class *cl, s64 delay)
 297{
 298	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
 299
 300	cl->pq_key = q->now + delay;
 301	if (cl->pq_key == q->now)
 302		cl->pq_key++;
 303
 304	/* update the nearest event cache */
 305	if (q->near_ev_cache[cl->level] > cl->pq_key)
 306		q->near_ev_cache[cl->level] = cl->pq_key;
 307
 308	while (*p) {
 309		struct htb_class *c;
 310		parent = *p;
 311		c = rb_entry(parent, struct htb_class, pq_node);
 312		if (cl->pq_key >= c->pq_key)
 313			p = &parent->rb_right;
 314		else
 315			p = &parent->rb_left;
 316	}
 317	rb_link_node(&cl->pq_node, parent, p);
 318	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 319}
 320
 321/**
 322 * htb_next_rb_node - finds next node in binary tree
 
 323 *
 324 * When we are past last key we return NULL.
 325 * Average complexity is 2 steps per call.
 326 */
 327static inline void htb_next_rb_node(struct rb_node **n)
 328{
 329	*n = rb_next(*n);
 330}
 331
 332/**
 333 * htb_add_class_to_row - add class to its row
 
 
 
 334 *
 335 * The class is added to row at priorities marked in mask.
 336 * It does nothing if mask == 0.
 337 */
 338static inline void htb_add_class_to_row(struct htb_sched *q,
 339					struct htb_class *cl, int mask)
 340{
 341	q->row_mask[cl->level] |= mask;
 342	while (mask) {
 343		int prio = ffz(~mask);
 344		mask &= ~(1 << prio);
 345		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
 346	}
 347}
 348
 349/* If this triggers, it is a bug in this code, but it need not be fatal */
 350static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
 351{
 352	if (RB_EMPTY_NODE(rb)) {
 353		WARN_ON(1);
 354	} else {
 355		rb_erase(rb, root);
 356		RB_CLEAR_NODE(rb);
 357	}
 358}
 359
 360
 361/**
 362 * htb_remove_class_from_row - removes class from its row
 
 
 
 363 *
 364 * The class is removed from row at priorities marked in mask.
 365 * It does nothing if mask == 0.
 366 */
 367static inline void htb_remove_class_from_row(struct htb_sched *q,
 368						 struct htb_class *cl, int mask)
 369{
 370	int m = 0;
 371	struct htb_level *hlevel = &q->hlevel[cl->level];
 372
 373	while (mask) {
 374		int prio = ffz(~mask);
 375		struct htb_prio *hprio = &hlevel->hprio[prio];
 376
 377		mask &= ~(1 << prio);
 378		if (hprio->ptr == cl->node + prio)
 379			htb_next_rb_node(&hprio->ptr);
 380
 381		htb_safe_rb_erase(cl->node + prio, &hprio->row);
 382		if (!hprio->row.rb_node)
 383			m |= 1 << prio;
 384	}
 385	q->row_mask[cl->level] &= ~m;
 386}
 387
 388/**
 389 * htb_activate_prios - creates active classe's feed chain
 
 
 390 *
 391 * The class is connected to ancestors and/or appropriate rows
 392 * for priorities it is participating on. cl->cmode must be new
 393 * (activated) mode. It does nothing if cl->prio_activity == 0.
 394 */
 395static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 396{
 397	struct htb_class *p = cl->parent;
 398	long m, mask = cl->prio_activity;
 399
 400	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 401		m = mask;
 402		while (m) {
 403			int prio = ffz(~m);
 
 
 
 404			m &= ~(1 << prio);
 405
 406			if (p->un.inner.clprio[prio].feed.rb_node)
 407				/* parent already has its feed in use so that
 408				 * reset bit in mask as parent is already ok
 409				 */
 410				mask &= ~(1 << prio);
 411
 412			htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
 413		}
 414		p->prio_activity |= mask;
 415		cl = p;
 416		p = cl->parent;
 417
 418	}
 419	if (cl->cmode == HTB_CAN_SEND && mask)
 420		htb_add_class_to_row(q, cl, mask);
 421}
 422
 423/**
 424 * htb_deactivate_prios - remove class from feed chain
 
 
 425 *
 426 * cl->cmode must represent old mode (before deactivation). It does
 427 * nothing if cl->prio_activity == 0. Class is removed from all feed
 428 * chains and rows.
 429 */
 430static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 431{
 432	struct htb_class *p = cl->parent;
 433	long m, mask = cl->prio_activity;
 434
 435	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 436		m = mask;
 437		mask = 0;
 438		while (m) {
 439			int prio = ffz(~m);
 440			m &= ~(1 << prio);
 441
 442			if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
 443				/* we are removing child which is pointed to from
 444				 * parent feed - forget the pointer but remember
 445				 * classid
 446				 */
 447				p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
 448				p->un.inner.clprio[prio].ptr = NULL;
 449			}
 450
 451			htb_safe_rb_erase(cl->node + prio,
 452					  &p->un.inner.clprio[prio].feed);
 453
 454			if (!p->un.inner.clprio[prio].feed.rb_node)
 455				mask |= 1 << prio;
 456		}
 457
 458		p->prio_activity &= ~mask;
 459		cl = p;
 460		p = cl->parent;
 461
 462	}
 463	if (cl->cmode == HTB_CAN_SEND && mask)
 464		htb_remove_class_from_row(q, cl, mask);
 465}
 466
 467static inline s64 htb_lowater(const struct htb_class *cl)
 468{
 469	if (htb_hysteresis)
 470		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
 471	else
 472		return 0;
 473}
 474static inline s64 htb_hiwater(const struct htb_class *cl)
 475{
 476	if (htb_hysteresis)
 477		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
 478	else
 479		return 0;
 480}
 481
 482
 483/**
 484 * htb_class_mode - computes and returns current class mode
 
 
 485 *
 486 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
 487 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
 488 * from now to time when cl will change its state.
 489 * Also it is worth to note that class mode doesn't change simply
 490 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
 491 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
 492 * mode transitions per time unit. The speed gain is about 1/6.
 493 */
 494static inline enum htb_cmode
 495htb_class_mode(struct htb_class *cl, s64 *diff)
 496{
 497	s64 toks;
 498
 499	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
 500		*diff = -toks;
 501		return HTB_CANT_SEND;
 502	}
 503
 504	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
 505		return HTB_CAN_SEND;
 506
 507	*diff = -toks;
 508	return HTB_MAY_BORROW;
 509}
 510
 511/**
 512 * htb_change_class_mode - changes classe's mode
 
 
 
 513 *
 514 * This should be the only way how to change classe's mode under normal
 515 * cirsumstances. Routine will update feed lists linkage, change mode
 516 * and add class to the wait event queue if appropriate. New mode should
 517 * be different from old one and cl->pq_key has to be valid if changing
 518 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
 519 */
 520static void
 521htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
 522{
 523	enum htb_cmode new_mode = htb_class_mode(cl, diff);
 524
 525	if (new_mode == cl->cmode)
 526		return;
 527
 
 
 
 
 
 528	if (cl->prio_activity) {	/* not necessary: speed optimization */
 529		if (cl->cmode != HTB_CANT_SEND)
 530			htb_deactivate_prios(q, cl);
 531		cl->cmode = new_mode;
 532		if (new_mode != HTB_CANT_SEND)
 533			htb_activate_prios(q, cl);
 534	} else
 535		cl->cmode = new_mode;
 536}
 537
 538/**
 539 * htb_activate - inserts leaf cl into appropriate active feeds
 
 
 540 *
 541 * Routine learns (new) priority of leaf and activates feed chain
 542 * for the prio. It can be called on already active leaf safely.
 543 * It also adds leaf into droplist.
 544 */
 545static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
 546{
 547	WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
 548
 549	if (!cl->prio_activity) {
 550		cl->prio_activity = 1 << cl->prio;
 551		htb_activate_prios(q, cl);
 552		list_add_tail(&cl->un.leaf.drop_list,
 553			      q->drops + cl->prio);
 554	}
 555}
 556
 557/**
 558 * htb_deactivate - remove leaf cl from active feeds
 
 
 559 *
 560 * Make sure that leaf is active. In the other words it can't be called
 561 * with non-active leaf. It also removes class from the drop list.
 562 */
 563static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 564{
 565	WARN_ON(!cl->prio_activity);
 566
 567	htb_deactivate_prios(q, cl);
 568	cl->prio_activity = 0;
 569	list_del_init(&cl->un.leaf.drop_list);
 570}
 571
 572static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 573{
 574	int uninitialized_var(ret);
 
 575	struct htb_sched *q = qdisc_priv(sch);
 576	struct htb_class *cl = htb_classify(skb, sch, &ret);
 577
 578	if (cl == HTB_DIRECT) {
 579		/* enqueue to helper queue */
 580		if (q->direct_queue.qlen < q->direct_qlen) {
 581			__skb_queue_tail(&q->direct_queue, skb);
 582			q->direct_pkts++;
 583		} else {
 584			return qdisc_drop(skb, sch);
 585		}
 586#ifdef CONFIG_NET_CLS_ACT
 587	} else if (!cl) {
 588		if (ret & __NET_XMIT_BYPASS)
 589			sch->qstats.drops++;
 590		kfree_skb(skb);
 591		return ret;
 592#endif
 593	} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
 
 594		if (net_xmit_drop_count(ret)) {
 595			sch->qstats.drops++;
 596			cl->qstats.drops++;
 597		}
 598		return ret;
 599	} else {
 600		htb_activate(q, cl);
 601	}
 602
 
 603	sch->q.qlen++;
 604	return NET_XMIT_SUCCESS;
 605}
 606
 607static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
 608{
 609	s64 toks = diff + cl->tokens;
 610
 611	if (toks > cl->buffer)
 612		toks = cl->buffer;
 613	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
 614	if (toks <= -cl->mbuffer)
 615		toks = 1 - cl->mbuffer;
 616
 617	cl->tokens = toks;
 618}
 619
 620static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
 621{
 622	s64 toks = diff + cl->ctokens;
 623
 624	if (toks > cl->cbuffer)
 625		toks = cl->cbuffer;
 626	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
 627	if (toks <= -cl->mbuffer)
 628		toks = 1 - cl->mbuffer;
 629
 630	cl->ctokens = toks;
 631}
 632
 633/**
 634 * htb_charge_class - charges amount "bytes" to leaf and ancestors
 
 
 
 
 635 *
 636 * Routine assumes that packet "bytes" long was dequeued from leaf cl
 637 * borrowing from "level". It accounts bytes to ceil leaky bucket for
 638 * leaf and all ancestors and to rate bucket for ancestors at levels
 639 * "level" and higher. It also handles possible change of mode resulting
 640 * from the update. Note that mode can also increase here (MAY_BORROW to
 641 * CAN_SEND) because we can use more precise clock that event queue here.
 642 * In such case we remove class from event queue first.
 643 */
 644static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
 645			     int level, struct sk_buff *skb)
 646{
 647	int bytes = qdisc_pkt_len(skb);
 648	enum htb_cmode old_mode;
 649	s64 diff;
 650
 651	while (cl) {
 652		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 653		if (cl->level >= level) {
 654			if (cl->level == level)
 655				cl->xstats.lends++;
 656			htb_accnt_tokens(cl, bytes, diff);
 657		} else {
 658			cl->xstats.borrows++;
 659			cl->tokens += diff;	/* we moved t_c; update tokens */
 660		}
 661		htb_accnt_ctokens(cl, bytes, diff);
 662		cl->t_c = q->now;
 663
 664		old_mode = cl->cmode;
 665		diff = 0;
 666		htb_change_class_mode(q, cl, &diff);
 667		if (old_mode != cl->cmode) {
 668			if (old_mode != HTB_CAN_SEND)
 669				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 670			if (cl->cmode != HTB_CAN_SEND)
 671				htb_add_to_wait_tree(q, cl, diff);
 672		}
 673
 674		/* update basic stats except for leaves which are already updated */
 675		if (cl->level)
 676			bstats_update(&cl->bstats, skb);
 677
 678		cl = cl->parent;
 679	}
 680}
 681
 682/**
 683 * htb_do_events - make mode changes to classes at the level
 
 
 
 684 *
 685 * Scans event queue for pending events and applies them. Returns time of
 686 * next pending event (0 for no event in pq, q->now for too many events).
 687 * Note: Applied are events whose have cl->pq_key <= q->now.
 688 */
 689static s64 htb_do_events(struct htb_sched *q, const int level,
 690			 unsigned long start)
 691{
 692	/* don't run for longer than 2 jiffies; 2 is used instead of
 693	 * 1 to simplify things when jiffy is going to be incremented
 694	 * too soon
 695	 */
 696	unsigned long stop_at = start + 2;
 697	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
 698
 699	while (time_before(jiffies, stop_at)) {
 700		struct htb_class *cl;
 701		s64 diff;
 702		struct rb_node *p = rb_first(wait_pq);
 703
 704		if (!p)
 705			return 0;
 706
 707		cl = rb_entry(p, struct htb_class, pq_node);
 708		if (cl->pq_key > q->now)
 709			return cl->pq_key;
 710
 711		htb_safe_rb_erase(p, wait_pq);
 712		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
 713		htb_change_class_mode(q, cl, &diff);
 714		if (cl->cmode != HTB_CAN_SEND)
 715			htb_add_to_wait_tree(q, cl, diff);
 716	}
 717
 718	/* too much load - let's continue after a break for scheduling */
 719	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
 720		pr_warn("htb: too many events!\n");
 721		q->warned |= HTB_WARN_TOOMANYEVENTS;
 722	}
 723
 724	return q->now;
 725}
 726
 727/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
 728 * is no such one exists.
 729 */
 730static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
 731					      u32 id)
 732{
 733	struct rb_node *r = NULL;
 734	while (n) {
 735		struct htb_class *cl =
 736		    rb_entry(n, struct htb_class, node[prio]);
 737
 738		if (id > cl->common.classid) {
 739			n = n->rb_right;
 740		} else if (id < cl->common.classid) {
 741			r = n;
 742			n = n->rb_left;
 743		} else {
 744			return n;
 745		}
 746	}
 747	return r;
 748}
 749
 750/**
 751 * htb_lookup_leaf - returns next leaf class in DRR order
 
 
 752 *
 753 * Find leaf where current feed pointers points to.
 754 */
 755static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
 756{
 757	int i;
 758	struct {
 759		struct rb_node *root;
 760		struct rb_node **pptr;
 761		u32 *pid;
 762	} stk[TC_HTB_MAXDEPTH], *sp = stk;
 763
 764	BUG_ON(!hprio->row.rb_node);
 765	sp->root = hprio->row.rb_node;
 766	sp->pptr = &hprio->ptr;
 767	sp->pid = &hprio->last_ptr_id;
 768
 769	for (i = 0; i < 65535; i++) {
 770		if (!*sp->pptr && *sp->pid) {
 771			/* ptr was invalidated but id is valid - try to recover
 772			 * the original or next ptr
 773			 */
 774			*sp->pptr =
 775			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
 776		}
 777		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
 778				 * can become out of date quickly
 779				 */
 780		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
 781			*sp->pptr = sp->root;
 782			while ((*sp->pptr)->rb_left)
 783				*sp->pptr = (*sp->pptr)->rb_left;
 784			if (sp > stk) {
 785				sp--;
 786				if (!*sp->pptr) {
 787					WARN_ON(1);
 788					return NULL;
 789				}
 790				htb_next_rb_node(sp->pptr);
 791			}
 792		} else {
 793			struct htb_class *cl;
 794			struct htb_prio *clp;
 795
 796			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
 797			if (!cl->level)
 798				return cl;
 799			clp = &cl->un.inner.clprio[prio];
 800			(++sp)->root = clp->feed.rb_node;
 801			sp->pptr = &clp->ptr;
 802			sp->pid = &clp->last_ptr_id;
 803		}
 804	}
 805	WARN_ON(1);
 806	return NULL;
 807}
 808
 809/* dequeues packet at given priority and level; call only if
 810 * you are sure that there is active class at prio/level
 811 */
 812static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
 813					const int level)
 814{
 815	struct sk_buff *skb = NULL;
 816	struct htb_class *cl, *start;
 817	struct htb_level *hlevel = &q->hlevel[level];
 818	struct htb_prio *hprio = &hlevel->hprio[prio];
 819
 820	/* look initial class up in the row */
 821	start = cl = htb_lookup_leaf(hprio, prio);
 822
 823	do {
 824next:
 825		if (unlikely(!cl))
 826			return NULL;
 827
 828		/* class can be empty - it is unlikely but can be true if leaf
 829		 * qdisc drops packets in enqueue routine or if someone used
 830		 * graft operation on the leaf since last dequeue;
 831		 * simply deactivate and skip such class
 832		 */
 833		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
 834			struct htb_class *next;
 835			htb_deactivate(q, cl);
 836
 837			/* row/level might become empty */
 838			if ((q->row_mask[level] & (1 << prio)) == 0)
 839				return NULL;
 840
 841			next = htb_lookup_leaf(hprio, prio);
 842
 843			if (cl == start)	/* fix start if we just deleted it */
 844				start = next;
 845			cl = next;
 846			goto next;
 847		}
 848
 849		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
 850		if (likely(skb != NULL))
 851			break;
 852
 853		qdisc_warn_nonwc("htb", cl->un.leaf.q);
 854		htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
 855					 &q->hlevel[0].hprio[prio].ptr);
 856		cl = htb_lookup_leaf(hprio, prio);
 857
 858	} while (cl != start);
 859
 860	if (likely(skb != NULL)) {
 861		bstats_update(&cl->bstats, skb);
 862		cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
 863		if (cl->un.leaf.deficit[level] < 0) {
 864			cl->un.leaf.deficit[level] += cl->quantum;
 865			htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
 866						 &q->hlevel[0].hprio[prio].ptr);
 867		}
 868		/* this used to be after charge_class but this constelation
 869		 * gives us slightly better performance
 870		 */
 871		if (!cl->un.leaf.q->q.qlen)
 872			htb_deactivate(q, cl);
 873		htb_charge_class(q, cl, level, skb);
 874	}
 875	return skb;
 876}
 877
 878static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 879{
 880	struct sk_buff *skb;
 881	struct htb_sched *q = qdisc_priv(sch);
 882	int level;
 883	s64 next_event;
 884	unsigned long start_at;
 885
 886	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 887	skb = __skb_dequeue(&q->direct_queue);
 888	if (skb != NULL) {
 889ok:
 890		qdisc_bstats_update(sch, skb);
 891		qdisc_unthrottled(sch);
 892		sch->q.qlen--;
 893		return skb;
 894	}
 895
 896	if (!sch->q.qlen)
 897		goto fin;
 898	q->now = ktime_to_ns(ktime_get());
 899	start_at = jiffies;
 900
 901	next_event = q->now + 5LLU * NSEC_PER_SEC;
 902
 903	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
 904		/* common case optimization - skip event handler quickly */
 905		int m;
 906		s64 event = q->near_ev_cache[level];
 907
 908		if (q->now >= event) {
 909			event = htb_do_events(q, level, start_at);
 910			if (!event)
 911				event = q->now + NSEC_PER_SEC;
 912			q->near_ev_cache[level] = event;
 913		}
 914
 915		if (next_event > event)
 916			next_event = event;
 917
 918		m = ~q->row_mask[level];
 919		while (m != (int)(-1)) {
 920			int prio = ffz(m);
 921
 922			m |= 1 << prio;
 923			skb = htb_dequeue_tree(q, prio, level);
 924			if (likely(skb != NULL))
 925				goto ok;
 926		}
 927	}
 928	sch->qstats.overlimits++;
 929	if (likely(next_event > q->now)) {
 930		if (!test_bit(__QDISC_STATE_DEACTIVATED,
 931			      &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
 932			ktime_t time = ns_to_ktime(next_event);
 933			qdisc_throttled(q->watchdog.qdisc);
 934			hrtimer_start(&q->watchdog.timer, time,
 935				      HRTIMER_MODE_ABS);
 936		}
 937	} else {
 938		schedule_work(&q->work);
 939	}
 940fin:
 941	return skb;
 942}
 943
 944/* try to drop from each class (by prio) until one succeed */
 945static unsigned int htb_drop(struct Qdisc *sch)
 946{
 947	struct htb_sched *q = qdisc_priv(sch);
 948	int prio;
 949
 950	for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
 951		struct list_head *p;
 952		list_for_each(p, q->drops + prio) {
 953			struct htb_class *cl = list_entry(p, struct htb_class,
 954							  un.leaf.drop_list);
 955			unsigned int len;
 956			if (cl->un.leaf.q->ops->drop &&
 957			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
 958				sch->q.qlen--;
 959				if (!cl->un.leaf.q->q.qlen)
 960					htb_deactivate(q, cl);
 961				return len;
 962			}
 963		}
 964	}
 965	return 0;
 966}
 967
 968/* reset all classes */
 969/* always caled under BH & queue lock */
 970static void htb_reset(struct Qdisc *sch)
 971{
 972	struct htb_sched *q = qdisc_priv(sch);
 973	struct htb_class *cl;
 974	unsigned int i;
 975
 976	for (i = 0; i < q->clhash.hashsize; i++) {
 977		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
 978			if (cl->level)
 979				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
 980			else {
 981				if (cl->un.leaf.q)
 982					qdisc_reset(cl->un.leaf.q);
 983				INIT_LIST_HEAD(&cl->un.leaf.drop_list);
 984			}
 985			cl->prio_activity = 0;
 986			cl->cmode = HTB_CAN_SEND;
 987
 988		}
 989	}
 990	qdisc_watchdog_cancel(&q->watchdog);
 991	__skb_queue_purge(&q->direct_queue);
 992	sch->q.qlen = 0;
 993	memset(q->hlevel, 0, sizeof(q->hlevel));
 994	memset(q->row_mask, 0, sizeof(q->row_mask));
 995	for (i = 0; i < TC_HTB_NUMPRIO; i++)
 996		INIT_LIST_HEAD(q->drops + i);
 997}
 998
 999static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1000	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
1001	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
1002	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1003	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1004	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
1005	[TCA_HTB_RATE64] = { .type = NLA_U64 },
1006	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
 
1007};
1008
1009static void htb_work_func(struct work_struct *work)
1010{
1011	struct htb_sched *q = container_of(work, struct htb_sched, work);
1012	struct Qdisc *sch = q->watchdog.qdisc;
1013
 
1014	__netif_schedule(qdisc_root(sch));
 
 
 
 
 
 
1015}
1016
1017static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 
1018{
 
 
1019	struct htb_sched *q = qdisc_priv(sch);
1020	struct nlattr *tb[TCA_HTB_MAX + 1];
1021	struct tc_htb_glob *gopt;
 
 
1022	int err;
1023	int i;
 
 
1024
1025	if (!opt)
1026		return -EINVAL;
1027
1028	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
 
 
 
 
 
1029	if (err < 0)
1030		return err;
1031
1032	if (!tb[TCA_HTB_INIT])
1033		return -EINVAL;
1034
1035	gopt = nla_data(tb[TCA_HTB_INIT]);
1036	if (gopt->version != HTB_VER >> 16)
1037		return -EINVAL;
1038
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039	err = qdisc_class_hash_init(&q->clhash);
1040	if (err < 0)
1041		return err;
1042	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1043		INIT_LIST_HEAD(q->drops + i);
1044
1045	qdisc_watchdog_init(&q->watchdog, sch);
1046	INIT_WORK(&q->work, htb_work_func);
1047	skb_queue_head_init(&q->direct_queue);
1048
1049	if (tb[TCA_HTB_DIRECT_QLEN])
1050		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1051	else {
1052		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1053		if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
1054			q->direct_qlen = 2;
1055	}
1056	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1057		q->rate2quantum = 1;
1058	q->defcls = gopt->defcls;
1059
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1060	return 0;
1061}
1062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1063static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1064{
1065	struct htb_sched *q = qdisc_priv(sch);
1066	struct nlattr *nest;
1067	struct tc_htb_glob gopt;
1068
 
 
 
 
 
 
1069	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1070	 * no change can happen on the qdisc parameters.
1071	 */
1072
1073	gopt.direct_pkts = q->direct_pkts;
1074	gopt.version = HTB_VER;
1075	gopt.rate2quantum = q->rate2quantum;
1076	gopt.defcls = q->defcls;
1077	gopt.debug = 0;
1078
1079	nest = nla_nest_start(skb, TCA_OPTIONS);
1080	if (nest == NULL)
1081		goto nla_put_failure;
1082	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1083	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1084		goto nla_put_failure;
 
 
1085
1086	return nla_nest_end(skb, nest);
1087
1088nla_put_failure:
1089	nla_nest_cancel(skb, nest);
1090	return -1;
1091}
1092
1093static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1094			  struct sk_buff *skb, struct tcmsg *tcm)
1095{
1096	struct htb_class *cl = (struct htb_class *)arg;
 
1097	struct nlattr *nest;
1098	struct tc_htb_opt opt;
1099
1100	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1101	 * no change can happen on the class parameters.
1102	 */
1103	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1104	tcm->tcm_handle = cl->common.classid;
1105	if (!cl->level && cl->un.leaf.q)
1106		tcm->tcm_info = cl->un.leaf.q->handle;
1107
1108	nest = nla_nest_start(skb, TCA_OPTIONS);
1109	if (nest == NULL)
1110		goto nla_put_failure;
1111
1112	memset(&opt, 0, sizeof(opt));
1113
1114	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1115	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1116	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1117	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1118	opt.quantum = cl->quantum;
1119	opt.prio = cl->prio;
1120	opt.level = cl->level;
1121	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1122		goto nla_put_failure;
 
 
1123	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1124	    nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps))
 
1125		goto nla_put_failure;
1126	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1127	    nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
 
1128		goto nla_put_failure;
1129
1130	return nla_nest_end(skb, nest);
1131
1132nla_put_failure:
1133	nla_nest_cancel(skb, nest);
1134	return -1;
1135}
1136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137static int
1138htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1139{
1140	struct htb_class *cl = (struct htb_class *)arg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1141
1142	if (!cl->level && cl->un.leaf.q)
1143		cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1144	cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
1145	cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
1146
1147	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1148	    gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1149	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
1150		return -1;
1151
1152	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1153}
1154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1156		     struct Qdisc **old)
1157{
 
1158	struct htb_class *cl = (struct htb_class *)arg;
 
 
1159
1160	if (cl->level)
1161		return -EINVAL;
1162	if (new == NULL &&
1163	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1164				     cl->common.classid)) == NULL)
1165		return -ENOBUFS;
1166
1167	sch_tree_lock(sch);
1168	*old = cl->un.leaf.q;
1169	cl->un.leaf.q = new;
1170	if (*old != NULL) {
1171		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1172		qdisc_reset(*old);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1173	}
1174	sch_tree_unlock(sch);
1175	return 0;
1176}
1177
1178static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1179{
1180	struct htb_class *cl = (struct htb_class *)arg;
1181	return !cl->level ? cl->un.leaf.q : NULL;
1182}
1183
1184static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1185{
1186	struct htb_class *cl = (struct htb_class *)arg;
1187
1188	if (cl->un.leaf.q->q.qlen == 0)
1189		htb_deactivate(qdisc_priv(sch), cl);
1190}
1191
1192static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1193{
1194	struct htb_class *cl = htb_find(classid, sch);
1195	if (cl)
1196		cl->refcnt++;
1197	return (unsigned long)cl;
1198}
1199
1200static inline int htb_parent_last_child(struct htb_class *cl)
1201{
1202	if (!cl->parent)
1203		/* the root class */
1204		return 0;
1205	if (cl->parent->children > 1)
1206		/* not the last child */
1207		return 0;
1208	return 1;
1209}
1210
1211static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1212			       struct Qdisc *new_q)
1213{
 
1214	struct htb_class *parent = cl->parent;
1215
1216	WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1217
1218	if (parent->cmode != HTB_CAN_SEND)
1219		htb_safe_rb_erase(&parent->pq_node,
1220				  &q->hlevel[parent->level].wait_pq);
1221
1222	parent->level = 0;
1223	memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1224	INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1225	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1226	parent->tokens = parent->buffer;
1227	parent->ctokens = parent->cbuffer;
1228	parent->t_c = ktime_to_ns(ktime_get());
1229	parent->cmode = HTB_CAN_SEND;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1230}
1231
1232static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1233{
1234	if (!cl->level) {
1235		WARN_ON(!cl->un.leaf.q);
1236		qdisc_destroy(cl->un.leaf.q);
1237	}
1238	gen_kill_estimator(&cl->bstats, &cl->rate_est);
1239	tcf_destroy_chain(&cl->filter_list);
1240	kfree(cl);
1241}
1242
1243static void htb_destroy(struct Qdisc *sch)
1244{
 
 
1245	struct htb_sched *q = qdisc_priv(sch);
1246	struct hlist_node *next;
 
1247	struct htb_class *cl;
1248	unsigned int i;
1249
1250	cancel_work_sync(&q->work);
1251	qdisc_watchdog_cancel(&q->watchdog);
1252	/* This line used to be after htb_destroy_class call below
1253	 * and surprisingly it worked in 2.4. But it must precede it
1254	 * because filter need its target class alive to be able to call
1255	 * unbind_filter on it (without Oops).
1256	 */
1257	tcf_destroy_chain(&q->filter_list);
1258
1259	for (i = 0; i < q->clhash.hashsize; i++) {
1260		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
1261			tcf_destroy_chain(&cl->filter_list);
 
 
1262	}
1263	for (i = 0; i < q->clhash.hashsize; i++) {
1264		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1265					  common.hnode)
1266			htb_destroy_class(sch, cl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267	}
1268	qdisc_class_hash_destroy(&q->clhash);
1269	__skb_queue_purge(&q->direct_queue);
 
 
 
 
1270}
1271
1272static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
1273{
1274	struct htb_sched *q = qdisc_priv(sch);
1275	struct htb_class *cl = (struct htb_class *)arg;
1276	unsigned int qlen;
1277	struct Qdisc *new_q = NULL;
1278	int last_child = 0;
 
1279
1280	/* TODO: why don't allow to delete subtree ? references ? does
1281	 * tc subsys guarantee us that in htb_destroy it holds no class
1282	 * refs so that we can remove children safely there ?
1283	 */
1284	if (cl->children || cl->filter_cnt)
 
1285		return -EBUSY;
 
1286
1287	if (!cl->level && htb_parent_last_child(cl)) {
1288		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1289					  cl->parent->common.classid);
1290		last_child = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291	}
1292
1293	sch_tree_lock(sch);
1294
1295	if (!cl->level) {
1296		qlen = cl->un.leaf.q->q.qlen;
1297		qdisc_reset(cl->un.leaf.q);
1298		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1299	}
1300
1301	/* delete from hash and active; remainder in destroy_class */
1302	qdisc_class_hash_remove(&q->clhash, &cl->common);
1303	if (cl->parent)
1304		cl->parent->children--;
1305
1306	if (cl->prio_activity)
1307		htb_deactivate(q, cl);
1308
1309	if (cl->cmode != HTB_CAN_SEND)
1310		htb_safe_rb_erase(&cl->pq_node,
1311				  &q->hlevel[cl->level].wait_pq);
1312
1313	if (last_child)
1314		htb_parent_to_leaf(q, cl, new_q);
1315
1316	BUG_ON(--cl->refcnt == 0);
1317	/*
1318	 * This shouldn't happen: we "hold" one cops->get() when called
1319	 * from tc_ctl_tclass; the destroy method is done from cops->put().
1320	 */
1321
1322	sch_tree_unlock(sch);
1323	return 0;
1324}
1325
1326static void htb_put(struct Qdisc *sch, unsigned long arg)
1327{
1328	struct htb_class *cl = (struct htb_class *)arg;
1329
1330	if (--cl->refcnt == 0)
1331		htb_destroy_class(sch, cl);
1332}
1333
1334static int htb_change_class(struct Qdisc *sch, u32 classid,
1335			    u32 parentid, struct nlattr **tca,
1336			    unsigned long *arg)
1337{
1338	int err = -EINVAL;
1339	struct htb_sched *q = qdisc_priv(sch);
1340	struct htb_class *cl = (struct htb_class *)*arg, *parent;
 
1341	struct nlattr *opt = tca[TCA_OPTIONS];
1342	struct nlattr *tb[TCA_HTB_MAX + 1];
 
 
1343	struct tc_htb_opt *hopt;
1344	u64 rate64, ceil64;
 
1345
1346	/* extract all subattrs from opt attr */
1347	if (!opt)
1348		goto failure;
1349
1350	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
 
1351	if (err < 0)
1352		goto failure;
1353
1354	err = -EINVAL;
1355	if (tb[TCA_HTB_PARMS] == NULL)
1356		goto failure;
1357
1358	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1359
1360	hopt = nla_data(tb[TCA_HTB_PARMS]);
1361	if (!hopt->rate.rate || !hopt->ceil.rate)
1362		goto failure;
1363
 
 
 
 
 
 
 
 
 
 
 
 
1364	/* Keeping backward compatible with rate_table based iproute2 tc */
1365	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1366		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
 
1367
1368	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1369		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
 
 
 
 
1370
1371	if (!cl) {		/* new class */
1372		struct Qdisc *new_q;
 
1373		int prio;
1374		struct {
1375			struct nlattr		nla;
1376			struct gnet_estimator	opt;
1377		} est = {
1378			.nla = {
1379				.nla_len	= nla_attr_size(sizeof(est.opt)),
1380				.nla_type	= TCA_RATE,
1381			},
1382			.opt = {
1383				/* 4s interval, 16s averaging constant */
1384				.interval	= 2,
1385				.ewma_log	= 2,
1386			},
1387		};
1388
1389		/* check for valid classid */
1390		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1391		    htb_find(classid, sch))
1392			goto failure;
1393
1394		/* check maximal depth */
1395		if (parent && parent->parent && parent->parent->level < 2) {
1396			pr_err("htb: tree is too deep\n");
1397			goto failure;
1398		}
1399		err = -ENOBUFS;
1400		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1401		if (!cl)
1402			goto failure;
1403
 
 
 
 
 
 
 
 
1404		if (htb_rate_est || tca[TCA_RATE]) {
1405			err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1406						qdisc_root_sleeping_lock(sch),
 
 
1407						tca[TCA_RATE] ? : &est.nla);
1408			if (err) {
1409				kfree(cl);
1410				goto failure;
1411			}
1412		}
1413
1414		cl->refcnt = 1;
1415		cl->children = 0;
1416		INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1417		RB_CLEAR_NODE(&cl->pq_node);
1418
1419		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1420			RB_CLEAR_NODE(&cl->node[prio]);
1421
 
 
 
 
 
 
 
1422		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1423		 * so that can't be used inside of sch_tree_lock
1424		 * -- thanks to Karlis Peisenieks
1425		 */
1426		new_q = qdisc_create_dflt(sch->dev_queue,
1427					  &pfifo_qdisc_ops, classid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1428		sch_tree_lock(sch);
1429		if (parent && !parent->level) {
1430			unsigned int qlen = parent->un.leaf.q->q.qlen;
1431
1432			/* turn parent into inner node */
1433			qdisc_reset(parent->un.leaf.q);
1434			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1435			qdisc_destroy(parent->un.leaf.q);
1436			if (parent->prio_activity)
1437				htb_deactivate(q, parent);
1438
1439			/* remove from evt list because of level change */
1440			if (parent->cmode != HTB_CAN_SEND) {
1441				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1442				parent->cmode = HTB_CAN_SEND;
1443			}
1444			parent->level = (parent->parent ? parent->parent->level
1445					 : TC_HTB_MAXDEPTH) - 1;
1446			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1447		}
 
1448		/* leaf (we) needs elementary qdisc */
1449		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
 
 
1450
1451		cl->common.classid = classid;
1452		cl->parent = parent;
1453
1454		/* set class to be in HTB_CAN_SEND state */
1455		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1456		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1457		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1458		cl->t_c = ktime_to_ns(ktime_get());
1459		cl->cmode = HTB_CAN_SEND;
1460
1461		/* attach to the hash list and parent's family */
1462		qdisc_class_hash_insert(&q->clhash, &cl->common);
1463		if (parent)
1464			parent->children++;
 
 
1465	} else {
1466		if (tca[TCA_RATE]) {
1467			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1468						    qdisc_root_sleeping_lock(sch),
 
 
1469						    tca[TCA_RATE]);
1470			if (err)
1471				return err;
1472		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1473		sch_tree_lock(sch);
1474	}
1475
1476	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1477
1478	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1479
1480	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1481	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1482
1483	/* it used to be a nasty bug here, we have to check that node
1484	 * is really leaf before changing cl->un.leaf !
1485	 */
1486	if (!cl->level) {
1487		u64 quantum = cl->rate.rate_bytes_ps;
1488
1489		do_div(quantum, q->rate2quantum);
1490		cl->quantum = min_t(u64, quantum, INT_MAX);
1491
1492		if (!hopt->quantum && cl->quantum < 1000) {
1493			pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
1494				cl->common.classid);
1495			cl->quantum = 1000;
1496		}
1497		if (!hopt->quantum && cl->quantum > 200000) {
1498			pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
1499				cl->common.classid);
1500			cl->quantum = 200000;
1501		}
1502		if (hopt->quantum)
1503			cl->quantum = hopt->quantum;
1504		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1505			cl->prio = TC_HTB_NUMPRIO - 1;
1506	}
1507
1508	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1509	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1510
1511	sch_tree_unlock(sch);
 
 
 
 
 
 
1512
1513	qdisc_class_hash_grow(sch, &q->clhash);
1514
1515	*arg = (unsigned long)cl;
1516	return 0;
1517
 
 
 
 
 
1518failure:
1519	return err;
1520}
1521
1522static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
 
1523{
1524	struct htb_sched *q = qdisc_priv(sch);
1525	struct htb_class *cl = (struct htb_class *)arg;
1526	struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1527
1528	return fl;
1529}
1530
1531static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1532				     u32 classid)
1533{
1534	struct htb_class *cl = htb_find(classid, sch);
1535
1536	/*if (cl && !cl->level) return 0;
1537	 * The line above used to be there to prevent attaching filters to
1538	 * leaves. But at least tc_index filter uses this just to get class
1539	 * for other reasons so that we have to allow for it.
1540	 * ----
1541	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1542	 * another way to "lock" the class - unlike "get" this lock can
1543	 * be broken by class during destroy IIUC.
1544	 */
1545	if (cl)
1546		cl->filter_cnt++;
1547	return (unsigned long)cl;
1548}
1549
1550static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1551{
1552	struct htb_class *cl = (struct htb_class *)arg;
1553
1554	if (cl)
1555		cl->filter_cnt--;
1556}
1557
1558static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1559{
1560	struct htb_sched *q = qdisc_priv(sch);
1561	struct htb_class *cl;
1562	unsigned int i;
1563
1564	if (arg->stop)
1565		return;
1566
1567	for (i = 0; i < q->clhash.hashsize; i++) {
1568		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1569			if (arg->count < arg->skip) {
1570				arg->count++;
1571				continue;
1572			}
1573			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1574				arg->stop = 1;
1575				return;
1576			}
1577			arg->count++;
1578		}
1579	}
1580}
1581
1582static const struct Qdisc_class_ops htb_class_ops = {
 
1583	.graft		=	htb_graft,
1584	.leaf		=	htb_leaf,
1585	.qlen_notify	=	htb_qlen_notify,
1586	.get		=	htb_get,
1587	.put		=	htb_put,
1588	.change		=	htb_change_class,
1589	.delete		=	htb_delete,
1590	.walk		=	htb_walk,
1591	.tcf_chain	=	htb_find_tcf,
1592	.bind_tcf	=	htb_bind_filter,
1593	.unbind_tcf	=	htb_unbind_filter,
1594	.dump		=	htb_dump_class,
1595	.dump_stats	=	htb_dump_class_stats,
1596};
1597
1598static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1599	.cl_ops		=	&htb_class_ops,
1600	.id		=	"htb",
1601	.priv_size	=	sizeof(struct htb_sched),
1602	.enqueue	=	htb_enqueue,
1603	.dequeue	=	htb_dequeue,
1604	.peek		=	qdisc_peek_dequeued,
1605	.drop		=	htb_drop,
1606	.init		=	htb_init,
 
1607	.reset		=	htb_reset,
1608	.destroy	=	htb_destroy,
1609	.dump		=	htb_dump,
1610	.owner		=	THIS_MODULE,
1611};
 
1612
1613static int __init htb_module_init(void)
1614{
1615	return register_qdisc(&htb_qdisc_ops);
1616}
1617static void __exit htb_module_exit(void)
1618{
1619	unregister_qdisc(&htb_qdisc_ops);
1620}
1621
1622module_init(htb_module_init)
1623module_exit(htb_module_exit)
1624MODULE_LICENSE("GPL");