Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v3.1
   1/*
   2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version 2
   7 * of the License, or (at your option) any later version.
   8 *
   9 * 2003-10-17 - Ported from altq
  10 */
  11/*
  12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
  13 *
  14 * Permission to use, copy, modify, and distribute this software and
  15 * its documentation is hereby granted (including for commercial or
  16 * for-profit use), provided that both the copyright notice and this
  17 * permission notice appear in all copies of the software, derivative
  18 * works, or modified versions, and any portions thereof.
  19 *
  20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
  21 * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
  22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
  23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  25 * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
  26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  33 * DAMAGE.
  34 *
  35 * Carnegie Mellon encourages (but does not require) users of this
  36 * software to return any improvements or extensions that they make,
  37 * and to grant Carnegie Mellon the rights to redistribute these
  38 * changes without encumbrance.
  39 */
  40/*
  41 * H-FSC is described in Proceedings of SIGCOMM'97,
  42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
  43 * Real-Time and Priority Service"
  44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
  45 *
  46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
  47 * when a class has an upperlimit, the fit-time is computed from the
  48 * upperlimit service curve.  the link-sharing scheduler does not schedule
  49 * a class whose fit-time exceeds the current time.
  50 */
  51
  52#include <linux/kernel.h>
  53#include <linux/module.h>
  54#include <linux/types.h>
  55#include <linux/errno.h>
  56#include <linux/compiler.h>
  57#include <linux/spinlock.h>
  58#include <linux/skbuff.h>
  59#include <linux/string.h>
  60#include <linux/slab.h>
  61#include <linux/list.h>
  62#include <linux/rbtree.h>
  63#include <linux/init.h>
  64#include <linux/rtnetlink.h>
  65#include <linux/pkt_sched.h>
  66#include <net/netlink.h>
  67#include <net/pkt_sched.h>
  68#include <net/pkt_cls.h>
  69#include <asm/div64.h>
  70
  71/*
  72 * kernel internal service curve representation:
  73 *   coordinates are given by 64 bit unsigned integers.
  74 *   x-axis: unit is clock count.
  75 *   y-axis: unit is byte.
  76 *
  77 *   The service curve parameters are converted to the internal
  78 *   representation. The slope values are scaled to avoid overflow.
  79 *   the inverse slope values as well as the y-projection of the 1st
  80 *   segment are kept in order to avoid 64-bit divide operations
  81 *   that are expensive on 32-bit architectures.
  82 */
  83
  84struct internal_sc {
  85	u64	sm1;	/* scaled slope of the 1st segment */
  86	u64	ism1;	/* scaled inverse-slope of the 1st segment */
  87	u64	dx;	/* the x-projection of the 1st segment */
  88	u64	dy;	/* the y-projection of the 1st segment */
  89	u64	sm2;	/* scaled slope of the 2nd segment */
  90	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
  91};
  92
  93/* runtime service curve */
  94struct runtime_sc {
  95	u64	x;	/* current starting position on x-axis */
  96	u64	y;	/* current starting position on y-axis */
  97	u64	sm1;	/* scaled slope of the 1st segment */
  98	u64	ism1;	/* scaled inverse-slope of the 1st segment */
  99	u64	dx;	/* the x-projection of the 1st segment */
 100	u64	dy;	/* the y-projection of the 1st segment */
 101	u64	sm2;	/* scaled slope of the 2nd segment */
 102	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
 103};
 104
 105enum hfsc_class_flags {
 106	HFSC_RSC = 0x1,
 107	HFSC_FSC = 0x2,
 108	HFSC_USC = 0x4
 109};
 110
 111struct hfsc_class {
 112	struct Qdisc_class_common cl_common;
 113	unsigned int	refcnt;		/* usage count */
 114
 115	struct gnet_stats_basic_packed bstats;
 116	struct gnet_stats_queue qstats;
 117	struct gnet_stats_rate_est rate_est;
 118	unsigned int	level;		/* class level in hierarchy */
 119	struct tcf_proto *filter_list;	/* filter list */
 120	unsigned int	filter_cnt;	/* filter count */
 121
 122	struct hfsc_sched *sched;	/* scheduler data */
 123	struct hfsc_class *cl_parent;	/* parent class */
 124	struct list_head siblings;	/* sibling classes */
 125	struct list_head children;	/* child classes */
 126	struct Qdisc	*qdisc;		/* leaf qdisc */
 127
 128	struct rb_node el_node;		/* qdisc's eligible tree member */
 129	struct rb_root vt_tree;		/* active children sorted by cl_vt */
 130	struct rb_node vt_node;		/* parent's vt_tree member */
 131	struct rb_root cf_tree;		/* active children sorted by cl_f */
 132	struct rb_node cf_node;		/* parent's cf_heap member */
 133	struct list_head dlist;		/* drop list member */
 134
 135	u64	cl_total;		/* total work in bytes */
 136	u64	cl_cumul;		/* cumulative work in bytes done by
 137					   real-time criteria */
 138
 139	u64	cl_d;			/* deadline*/
 140	u64	cl_e;			/* eligible time */
 141	u64	cl_vt;			/* virtual time */
 142	u64	cl_f;			/* time when this class will fit for
 143					   link-sharing, max(myf, cfmin) */
 144	u64	cl_myf;			/* my fit-time (calculated from this
 145					   class's own upperlimit curve) */
 146	u64	cl_myfadj;		/* my fit-time adjustment (to cancel
 147					   history dependence) */
 148	u64	cl_cfmin;		/* earliest children's fit-time (used
 149					   with cl_myf to obtain cl_f) */
 150	u64	cl_cvtmin;		/* minimal virtual time among the
 151					   children fit for link-sharing
 152					   (monotonic within a period) */
 153	u64	cl_vtadj;		/* intra-period cumulative vt
 154					   adjustment */
 155	u64	cl_vtoff;		/* inter-period cumulative vt offset */
 156	u64	cl_cvtmax;		/* max child's vt in the last period */
 157	u64	cl_cvtoff;		/* cumulative cvtmax of all periods */
 158	u64	cl_pcvtoff;		/* parent's cvtoff at initialization
 159					   time */
 160
 161	struct internal_sc cl_rsc;	/* internal real-time service curve */
 162	struct internal_sc cl_fsc;	/* internal fair service curve */
 163	struct internal_sc cl_usc;	/* internal upperlimit service curve */
 164	struct runtime_sc cl_deadline;	/* deadline curve */
 165	struct runtime_sc cl_eligible;	/* eligible curve */
 166	struct runtime_sc cl_virtual;	/* virtual curve */
 167	struct runtime_sc cl_ulimit;	/* upperlimit curve */
 168
 169	unsigned long	cl_flags;	/* which curves are valid */
 170	unsigned long	cl_vtperiod;	/* vt period sequence number */
 171	unsigned long	cl_parentperiod;/* parent's vt period sequence number*/
 172	unsigned long	cl_nactive;	/* number of active children */
 173};
 174
 175struct hfsc_sched {
 176	u16	defcls;				/* default class id */
 177	struct hfsc_class root;			/* root class */
 178	struct Qdisc_class_hash clhash;		/* class hash */
 179	struct rb_root eligible;		/* eligible tree */
 180	struct list_head droplist;		/* active leaf class list (for
 181						   dropping) */
 182	struct qdisc_watchdog watchdog;		/* watchdog timer */
 183};
 184
 185#define	HT_INFINITY	0xffffffffffffffffULL	/* infinite time value */
 186
 187
 188/*
 189 * eligible tree holds backlogged classes being sorted by their eligible times.
 190 * there is one eligible tree per hfsc instance.
 191 */
 192
 193static void
 194eltree_insert(struct hfsc_class *cl)
 195{
 196	struct rb_node **p = &cl->sched->eligible.rb_node;
 197	struct rb_node *parent = NULL;
 198	struct hfsc_class *cl1;
 199
 200	while (*p != NULL) {
 201		parent = *p;
 202		cl1 = rb_entry(parent, struct hfsc_class, el_node);
 203		if (cl->cl_e >= cl1->cl_e)
 204			p = &parent->rb_right;
 205		else
 206			p = &parent->rb_left;
 207	}
 208	rb_link_node(&cl->el_node, parent, p);
 209	rb_insert_color(&cl->el_node, &cl->sched->eligible);
 210}
 211
 212static inline void
 213eltree_remove(struct hfsc_class *cl)
 214{
 215	rb_erase(&cl->el_node, &cl->sched->eligible);
 216}
 217
 218static inline void
 219eltree_update(struct hfsc_class *cl)
 220{
 221	eltree_remove(cl);
 222	eltree_insert(cl);
 223}
 224
 225/* find the class with the minimum deadline among the eligible classes */
 226static inline struct hfsc_class *
 227eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
 228{
 229	struct hfsc_class *p, *cl = NULL;
 230	struct rb_node *n;
 231
 232	for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
 233		p = rb_entry(n, struct hfsc_class, el_node);
 234		if (p->cl_e > cur_time)
 235			break;
 236		if (cl == NULL || p->cl_d < cl->cl_d)
 237			cl = p;
 238	}
 239	return cl;
 240}
 241
 242/* find the class with minimum eligible time among the eligible classes */
 243static inline struct hfsc_class *
 244eltree_get_minel(struct hfsc_sched *q)
 245{
 246	struct rb_node *n;
 247
 248	n = rb_first(&q->eligible);
 249	if (n == NULL)
 250		return NULL;
 251	return rb_entry(n, struct hfsc_class, el_node);
 252}
 253
 254/*
 255 * vttree holds holds backlogged child classes being sorted by their virtual
 256 * time. each intermediate class has one vttree.
 257 */
 258static void
 259vttree_insert(struct hfsc_class *cl)
 260{
 261	struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
 262	struct rb_node *parent = NULL;
 263	struct hfsc_class *cl1;
 264
 265	while (*p != NULL) {
 266		parent = *p;
 267		cl1 = rb_entry(parent, struct hfsc_class, vt_node);
 268		if (cl->cl_vt >= cl1->cl_vt)
 269			p = &parent->rb_right;
 270		else
 271			p = &parent->rb_left;
 272	}
 273	rb_link_node(&cl->vt_node, parent, p);
 274	rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
 275}
 276
 277static inline void
 278vttree_remove(struct hfsc_class *cl)
 279{
 280	rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
 281}
 282
 283static inline void
 284vttree_update(struct hfsc_class *cl)
 285{
 286	vttree_remove(cl);
 287	vttree_insert(cl);
 288}
 289
 290static inline struct hfsc_class *
 291vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
 292{
 293	struct hfsc_class *p;
 294	struct rb_node *n;
 295
 296	for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
 297		p = rb_entry(n, struct hfsc_class, vt_node);
 298		if (p->cl_f <= cur_time)
 299			return p;
 300	}
 301	return NULL;
 302}
 303
 304/*
 305 * get the leaf class with the minimum vt in the hierarchy
 306 */
 307static struct hfsc_class *
 308vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
 309{
 310	/* if root-class's cfmin is bigger than cur_time nothing to do */
 311	if (cl->cl_cfmin > cur_time)
 312		return NULL;
 313
 314	while (cl->level > 0) {
 315		cl = vttree_firstfit(cl, cur_time);
 316		if (cl == NULL)
 317			return NULL;
 318		/*
 319		 * update parent's cl_cvtmin.
 320		 */
 321		if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
 322			cl->cl_parent->cl_cvtmin = cl->cl_vt;
 323	}
 324	return cl;
 325}
 326
 327static void
 328cftree_insert(struct hfsc_class *cl)
 329{
 330	struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
 331	struct rb_node *parent = NULL;
 332	struct hfsc_class *cl1;
 333
 334	while (*p != NULL) {
 335		parent = *p;
 336		cl1 = rb_entry(parent, struct hfsc_class, cf_node);
 337		if (cl->cl_f >= cl1->cl_f)
 338			p = &parent->rb_right;
 339		else
 340			p = &parent->rb_left;
 341	}
 342	rb_link_node(&cl->cf_node, parent, p);
 343	rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
 344}
 345
 346static inline void
 347cftree_remove(struct hfsc_class *cl)
 348{
 349	rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
 350}
 351
 352static inline void
 353cftree_update(struct hfsc_class *cl)
 354{
 355	cftree_remove(cl);
 356	cftree_insert(cl);
 357}
 358
 359/*
 360 * service curve support functions
 361 *
 362 *  external service curve parameters
 363 *	m: bps
 364 *	d: us
 365 *  internal service curve parameters
 366 *	sm: (bytes/psched_us) << SM_SHIFT
 367 *	ism: (psched_us/byte) << ISM_SHIFT
 368 *	dx: psched_us
 369 *
 370 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
 371 *
 372 * sm and ism are scaled in order to keep effective digits.
 373 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
 374 * digits in decimal using the following table.
 375 *
 376 *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
 377 *  ------------+-------------------------------------------------------
 378 *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
 379 *
 380 *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
 381 *
 382 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
 383 */
 384#define	SM_SHIFT	(30 - PSCHED_SHIFT)
 385#define	ISM_SHIFT	(8 + PSCHED_SHIFT)
 386
 387#define	SM_MASK		((1ULL << SM_SHIFT) - 1)
 388#define	ISM_MASK	((1ULL << ISM_SHIFT) - 1)
 389
 390static inline u64
 391seg_x2y(u64 x, u64 sm)
 392{
 393	u64 y;
 394
 395	/*
 396	 * compute
 397	 *	y = x * sm >> SM_SHIFT
 398	 * but divide it for the upper and lower bits to avoid overflow
 399	 */
 400	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
 401	return y;
 402}
 403
 404static inline u64
 405seg_y2x(u64 y, u64 ism)
 406{
 407	u64 x;
 408
 409	if (y == 0)
 410		x = 0;
 411	else if (ism == HT_INFINITY)
 412		x = HT_INFINITY;
 413	else {
 414		x = (y >> ISM_SHIFT) * ism
 415		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
 416	}
 417	return x;
 418}
 419
 420/* Convert m (bps) into sm (bytes/psched us) */
 421static u64
 422m2sm(u32 m)
 423{
 424	u64 sm;
 425
 426	sm = ((u64)m << SM_SHIFT);
 427	sm += PSCHED_TICKS_PER_SEC - 1;
 428	do_div(sm, PSCHED_TICKS_PER_SEC);
 429	return sm;
 430}
 431
 432/* convert m (bps) into ism (psched us/byte) */
 433static u64
 434m2ism(u32 m)
 435{
 436	u64 ism;
 437
 438	if (m == 0)
 439		ism = HT_INFINITY;
 440	else {
 441		ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
 442		ism += m - 1;
 443		do_div(ism, m);
 444	}
 445	return ism;
 446}
 447
 448/* convert d (us) into dx (psched us) */
 449static u64
 450d2dx(u32 d)
 451{
 452	u64 dx;
 453
 454	dx = ((u64)d * PSCHED_TICKS_PER_SEC);
 455	dx += USEC_PER_SEC - 1;
 456	do_div(dx, USEC_PER_SEC);
 457	return dx;
 458}
 459
 460/* convert sm (bytes/psched us) into m (bps) */
 461static u32
 462sm2m(u64 sm)
 463{
 464	u64 m;
 465
 466	m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
 467	return (u32)m;
 468}
 469
 470/* convert dx (psched us) into d (us) */
 471static u32
 472dx2d(u64 dx)
 473{
 474	u64 d;
 475
 476	d = dx * USEC_PER_SEC;
 477	do_div(d, PSCHED_TICKS_PER_SEC);
 478	return (u32)d;
 479}
 480
 481static void
 482sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
 483{
 484	isc->sm1  = m2sm(sc->m1);
 485	isc->ism1 = m2ism(sc->m1);
 486	isc->dx   = d2dx(sc->d);
 487	isc->dy   = seg_x2y(isc->dx, isc->sm1);
 488	isc->sm2  = m2sm(sc->m2);
 489	isc->ism2 = m2ism(sc->m2);
 490}
 491
 492/*
 493 * initialize the runtime service curve with the given internal
 494 * service curve starting at (x, y).
 495 */
 496static void
 497rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
 498{
 499	rtsc->x	   = x;
 500	rtsc->y    = y;
 501	rtsc->sm1  = isc->sm1;
 502	rtsc->ism1 = isc->ism1;
 503	rtsc->dx   = isc->dx;
 504	rtsc->dy   = isc->dy;
 505	rtsc->sm2  = isc->sm2;
 506	rtsc->ism2 = isc->ism2;
 507}
 508
 509/*
 510 * calculate the y-projection of the runtime service curve by the
 511 * given x-projection value
 512 */
 513static u64
 514rtsc_y2x(struct runtime_sc *rtsc, u64 y)
 515{
 516	u64 x;
 517
 518	if (y < rtsc->y)
 519		x = rtsc->x;
 520	else if (y <= rtsc->y + rtsc->dy) {
 521		/* x belongs to the 1st segment */
 522		if (rtsc->dy == 0)
 523			x = rtsc->x + rtsc->dx;
 524		else
 525			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
 526	} else {
 527		/* x belongs to the 2nd segment */
 528		x = rtsc->x + rtsc->dx
 529		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
 530	}
 531	return x;
 532}
 533
 534static u64
 535rtsc_x2y(struct runtime_sc *rtsc, u64 x)
 536{
 537	u64 y;
 538
 539	if (x <= rtsc->x)
 540		y = rtsc->y;
 541	else if (x <= rtsc->x + rtsc->dx)
 542		/* y belongs to the 1st segment */
 543		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
 544	else
 545		/* y belongs to the 2nd segment */
 546		y = rtsc->y + rtsc->dy
 547		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
 548	return y;
 549}
 550
 551/*
 552 * update the runtime service curve by taking the minimum of the current
 553 * runtime service curve and the service curve starting at (x, y).
 554 */
 555static void
 556rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
 557{
 558	u64 y1, y2, dx, dy;
 559	u32 dsm;
 560
 561	if (isc->sm1 <= isc->sm2) {
 562		/* service curve is convex */
 563		y1 = rtsc_x2y(rtsc, x);
 564		if (y1 < y)
 565			/* the current rtsc is smaller */
 566			return;
 567		rtsc->x = x;
 568		rtsc->y = y;
 569		return;
 570	}
 571
 572	/*
 573	 * service curve is concave
 574	 * compute the two y values of the current rtsc
 575	 *	y1: at x
 576	 *	y2: at (x + dx)
 577	 */
 578	y1 = rtsc_x2y(rtsc, x);
 579	if (y1 <= y) {
 580		/* rtsc is below isc, no change to rtsc */
 581		return;
 582	}
 583
 584	y2 = rtsc_x2y(rtsc, x + isc->dx);
 585	if (y2 >= y + isc->dy) {
 586		/* rtsc is above isc, replace rtsc by isc */
 587		rtsc->x = x;
 588		rtsc->y = y;
 589		rtsc->dx = isc->dx;
 590		rtsc->dy = isc->dy;
 591		return;
 592	}
 593
 594	/*
 595	 * the two curves intersect
 596	 * compute the offsets (dx, dy) using the reverse
 597	 * function of seg_x2y()
 598	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
 599	 */
 600	dx = (y1 - y) << SM_SHIFT;
 601	dsm = isc->sm1 - isc->sm2;
 602	do_div(dx, dsm);
 603	/*
 604	 * check if (x, y1) belongs to the 1st segment of rtsc.
 605	 * if so, add the offset.
 606	 */
 607	if (rtsc->x + rtsc->dx > x)
 608		dx += rtsc->x + rtsc->dx - x;
 609	dy = seg_x2y(dx, isc->sm1);
 610
 611	rtsc->x = x;
 612	rtsc->y = y;
 613	rtsc->dx = dx;
 614	rtsc->dy = dy;
 615}
 616
 617static void
 618init_ed(struct hfsc_class *cl, unsigned int next_len)
 619{
 620	u64 cur_time = psched_get_time();
 621
 622	/* update the deadline curve */
 623	rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
 624
 625	/*
 626	 * update the eligible curve.
 627	 * for concave, it is equal to the deadline curve.
 628	 * for convex, it is a linear curve with slope m2.
 629	 */
 630	cl->cl_eligible = cl->cl_deadline;
 631	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
 632		cl->cl_eligible.dx = 0;
 633		cl->cl_eligible.dy = 0;
 634	}
 635
 636	/* compute e and d */
 637	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
 638	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
 639
 640	eltree_insert(cl);
 641}
 642
 643static void
 644update_ed(struct hfsc_class *cl, unsigned int next_len)
 645{
 646	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
 647	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
 648
 649	eltree_update(cl);
 650}
 651
 652static inline void
 653update_d(struct hfsc_class *cl, unsigned int next_len)
 654{
 655	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
 656}
 657
 658static inline void
 659update_cfmin(struct hfsc_class *cl)
 660{
 661	struct rb_node *n = rb_first(&cl->cf_tree);
 662	struct hfsc_class *p;
 663
 664	if (n == NULL) {
 665		cl->cl_cfmin = 0;
 666		return;
 667	}
 668	p = rb_entry(n, struct hfsc_class, cf_node);
 669	cl->cl_cfmin = p->cl_f;
 670}
 671
 672static void
 673init_vf(struct hfsc_class *cl, unsigned int len)
 674{
 675	struct hfsc_class *max_cl;
 676	struct rb_node *n;
 677	u64 vt, f, cur_time;
 678	int go_active;
 679
 680	cur_time = 0;
 681	go_active = 1;
 682	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
 683		if (go_active && cl->cl_nactive++ == 0)
 684			go_active = 1;
 685		else
 686			go_active = 0;
 687
 688		if (go_active) {
 689			n = rb_last(&cl->cl_parent->vt_tree);
 690			if (n != NULL) {
 691				max_cl = rb_entry(n, struct hfsc_class, vt_node);
 692				/*
 693				 * set vt to the average of the min and max
 694				 * classes.  if the parent's period didn't
 695				 * change, don't decrease vt of the class.
 696				 */
 697				vt = max_cl->cl_vt;
 698				if (cl->cl_parent->cl_cvtmin != 0)
 699					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
 700
 701				if (cl->cl_parent->cl_vtperiod !=
 702				    cl->cl_parentperiod || vt > cl->cl_vt)
 703					cl->cl_vt = vt;
 704			} else {
 705				/*
 706				 * first child for a new parent backlog period.
 707				 * add parent's cvtmax to cvtoff to make a new
 708				 * vt (vtoff + vt) larger than the vt in the
 709				 * last period for all children.
 710				 */
 711				vt = cl->cl_parent->cl_cvtmax;
 712				cl->cl_parent->cl_cvtoff += vt;
 713				cl->cl_parent->cl_cvtmax = 0;
 714				cl->cl_parent->cl_cvtmin = 0;
 715				cl->cl_vt = 0;
 716			}
 717
 718			cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
 719							cl->cl_pcvtoff;
 720
 721			/* update the virtual curve */
 722			vt = cl->cl_vt + cl->cl_vtoff;
 723			rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
 724						      cl->cl_total);
 725			if (cl->cl_virtual.x == vt) {
 726				cl->cl_virtual.x -= cl->cl_vtoff;
 727				cl->cl_vtoff = 0;
 728			}
 729			cl->cl_vtadj = 0;
 730
 731			cl->cl_vtperiod++;  /* increment vt period */
 732			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
 733			if (cl->cl_parent->cl_nactive == 0)
 734				cl->cl_parentperiod++;
 735			cl->cl_f = 0;
 736
 737			vttree_insert(cl);
 738			cftree_insert(cl);
 739
 740			if (cl->cl_flags & HFSC_USC) {
 741				/* class has upper limit curve */
 742				if (cur_time == 0)
 743					cur_time = psched_get_time();
 744
 745				/* update the ulimit curve */
 746				rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
 747					 cl->cl_total);
 748				/* compute myf */
 749				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
 750						      cl->cl_total);
 751				cl->cl_myfadj = 0;
 752			}
 753		}
 754
 755		f = max(cl->cl_myf, cl->cl_cfmin);
 756		if (f != cl->cl_f) {
 757			cl->cl_f = f;
 758			cftree_update(cl);
 759		}
 760		update_cfmin(cl->cl_parent);
 761	}
 762}
 763
 764static void
 765update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
 766{
 767	u64 f; /* , myf_bound, delta; */
 768	int go_passive = 0;
 769
 770	if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
 771		go_passive = 1;
 772
 773	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
 774		cl->cl_total += len;
 775
 776		if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
 777			continue;
 778
 779		if (go_passive && --cl->cl_nactive == 0)
 780			go_passive = 1;
 781		else
 782			go_passive = 0;
 783
 784		if (go_passive) {
 785			/* no more active child, going passive */
 786
 787			/* update cvtmax of the parent class */
 788			if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
 789				cl->cl_parent->cl_cvtmax = cl->cl_vt;
 790
 791			/* remove this class from the vt tree */
 792			vttree_remove(cl);
 793
 794			cftree_remove(cl);
 795			update_cfmin(cl->cl_parent);
 796
 797			continue;
 798		}
 799
 800		/*
 801		 * update vt and f
 802		 */
 803		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
 804			    - cl->cl_vtoff + cl->cl_vtadj;
 805
 806		/*
 807		 * if vt of the class is smaller than cvtmin,
 808		 * the class was skipped in the past due to non-fit.
 809		 * if so, we need to adjust vtadj.
 810		 */
 811		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
 812			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
 813			cl->cl_vt = cl->cl_parent->cl_cvtmin;
 814		}
 815
 816		/* update the vt tree */
 817		vttree_update(cl);
 818
 819		if (cl->cl_flags & HFSC_USC) {
 820			cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
 821							      cl->cl_total);
 822#if 0
 823			/*
 824			 * This code causes classes to stay way under their
 825			 * limit when multiple classes are used at gigabit
 826			 * speed. needs investigation. -kaber
 827			 */
 828			/*
 829			 * if myf lags behind by more than one clock tick
 830			 * from the current time, adjust myfadj to prevent
 831			 * a rate-limited class from going greedy.
 832			 * in a steady state under rate-limiting, myf
 833			 * fluctuates within one clock tick.
 834			 */
 835			myf_bound = cur_time - PSCHED_JIFFIE2US(1);
 836			if (cl->cl_myf < myf_bound) {
 837				delta = cur_time - cl->cl_myf;
 838				cl->cl_myfadj += delta;
 839				cl->cl_myf += delta;
 840			}
 841#endif
 842		}
 843
 844		f = max(cl->cl_myf, cl->cl_cfmin);
 845		if (f != cl->cl_f) {
 846			cl->cl_f = f;
 847			cftree_update(cl);
 848			update_cfmin(cl->cl_parent);
 849		}
 850	}
 851}
 852
 853static void
 854set_active(struct hfsc_class *cl, unsigned int len)
 855{
 856	if (cl->cl_flags & HFSC_RSC)
 857		init_ed(cl, len);
 858	if (cl->cl_flags & HFSC_FSC)
 859		init_vf(cl, len);
 860
 861	list_add_tail(&cl->dlist, &cl->sched->droplist);
 862}
 863
 864static void
 865set_passive(struct hfsc_class *cl)
 866{
 867	if (cl->cl_flags & HFSC_RSC)
 868		eltree_remove(cl);
 869
 870	list_del(&cl->dlist);
 871
 872	/*
 873	 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
 874	 * needs to be called explicitly to remove a class from vttree.
 875	 */
 876}
 877
 878static unsigned int
 879qdisc_peek_len(struct Qdisc *sch)
 880{
 881	struct sk_buff *skb;
 882	unsigned int len;
 883
 884	skb = sch->ops->peek(sch);
 885	if (skb == NULL) {
 886		qdisc_warn_nonwc("qdisc_peek_len", sch);
 887		return 0;
 888	}
 889	len = qdisc_pkt_len(skb);
 890
 891	return len;
 892}
 893
 894static void
 895hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
 896{
 897	unsigned int len = cl->qdisc->q.qlen;
 898
 899	qdisc_reset(cl->qdisc);
 900	qdisc_tree_decrease_qlen(cl->qdisc, len);
 901}
 902
 903static void
 904hfsc_adjust_levels(struct hfsc_class *cl)
 905{
 906	struct hfsc_class *p;
 907	unsigned int level;
 908
 909	do {
 910		level = 0;
 911		list_for_each_entry(p, &cl->children, siblings) {
 912			if (p->level >= level)
 913				level = p->level + 1;
 914		}
 915		cl->level = level;
 916	} while ((cl = cl->cl_parent) != NULL);
 917}
 918
 919static inline struct hfsc_class *
 920hfsc_find_class(u32 classid, struct Qdisc *sch)
 921{
 922	struct hfsc_sched *q = qdisc_priv(sch);
 923	struct Qdisc_class_common *clc;
 924
 925	clc = qdisc_class_find(&q->clhash, classid);
 926	if (clc == NULL)
 927		return NULL;
 928	return container_of(clc, struct hfsc_class, cl_common);
 929}
 930
 931static void
 932hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
 933		u64 cur_time)
 934{
 935	sc2isc(rsc, &cl->cl_rsc);
 936	rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
 937	cl->cl_eligible = cl->cl_deadline;
 938	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
 939		cl->cl_eligible.dx = 0;
 940		cl->cl_eligible.dy = 0;
 941	}
 942	cl->cl_flags |= HFSC_RSC;
 943}
 944
 945static void
 946hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
 947{
 948	sc2isc(fsc, &cl->cl_fsc);
 949	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
 950	cl->cl_flags |= HFSC_FSC;
 951}
 952
 953static void
 954hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
 955		u64 cur_time)
 956{
 957	sc2isc(usc, &cl->cl_usc);
 958	rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
 959	cl->cl_flags |= HFSC_USC;
 960}
 961
 962static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
 963	[TCA_HFSC_RSC]	= { .len = sizeof(struct tc_service_curve) },
 964	[TCA_HFSC_FSC]	= { .len = sizeof(struct tc_service_curve) },
 965	[TCA_HFSC_USC]	= { .len = sizeof(struct tc_service_curve) },
 966};
 967
 968static int
 969hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 970		  struct nlattr **tca, unsigned long *arg)
 971{
 972	struct hfsc_sched *q = qdisc_priv(sch);
 973	struct hfsc_class *cl = (struct hfsc_class *)*arg;
 974	struct hfsc_class *parent = NULL;
 975	struct nlattr *opt = tca[TCA_OPTIONS];
 976	struct nlattr *tb[TCA_HFSC_MAX + 1];
 977	struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
 978	u64 cur_time;
 979	int err;
 980
 981	if (opt == NULL)
 982		return -EINVAL;
 983
 984	err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
 985	if (err < 0)
 986		return err;
 987
 988	if (tb[TCA_HFSC_RSC]) {
 989		rsc = nla_data(tb[TCA_HFSC_RSC]);
 990		if (rsc->m1 == 0 && rsc->m2 == 0)
 991			rsc = NULL;
 992	}
 993
 994	if (tb[TCA_HFSC_FSC]) {
 995		fsc = nla_data(tb[TCA_HFSC_FSC]);
 996		if (fsc->m1 == 0 && fsc->m2 == 0)
 997			fsc = NULL;
 998	}
 999
1000	if (tb[TCA_HFSC_USC]) {
1001		usc = nla_data(tb[TCA_HFSC_USC]);
1002		if (usc->m1 == 0 && usc->m2 == 0)
1003			usc = NULL;
1004	}
1005
1006	if (cl != NULL) {
1007		if (parentid) {
1008			if (cl->cl_parent &&
1009			    cl->cl_parent->cl_common.classid != parentid)
1010				return -EINVAL;
1011			if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1012				return -EINVAL;
1013		}
1014		cur_time = psched_get_time();
1015
1016		if (tca[TCA_RATE]) {
1017			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1018					      qdisc_root_sleeping_lock(sch),
1019					      tca[TCA_RATE]);
1020			if (err)
1021				return err;
1022		}
1023
1024		sch_tree_lock(sch);
1025		if (rsc != NULL)
1026			hfsc_change_rsc(cl, rsc, cur_time);
1027		if (fsc != NULL)
1028			hfsc_change_fsc(cl, fsc);
1029		if (usc != NULL)
1030			hfsc_change_usc(cl, usc, cur_time);
1031
1032		if (cl->qdisc->q.qlen != 0) {
1033			if (cl->cl_flags & HFSC_RSC)
1034				update_ed(cl, qdisc_peek_len(cl->qdisc));
1035			if (cl->cl_flags & HFSC_FSC)
1036				update_vf(cl, 0, cur_time);
1037		}
1038		sch_tree_unlock(sch);
1039
1040		return 0;
1041	}
1042
1043	if (parentid == TC_H_ROOT)
1044		return -EEXIST;
1045
1046	parent = &q->root;
1047	if (parentid) {
1048		parent = hfsc_find_class(parentid, sch);
1049		if (parent == NULL)
1050			return -ENOENT;
1051	}
1052
1053	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1054		return -EINVAL;
1055	if (hfsc_find_class(classid, sch))
1056		return -EEXIST;
1057
1058	if (rsc == NULL && fsc == NULL)
1059		return -EINVAL;
1060
1061	cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1062	if (cl == NULL)
1063		return -ENOBUFS;
1064
1065	if (tca[TCA_RATE]) {
1066		err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1067					qdisc_root_sleeping_lock(sch),
1068					tca[TCA_RATE]);
1069		if (err) {
1070			kfree(cl);
1071			return err;
1072		}
1073	}
1074
1075	if (rsc != NULL)
1076		hfsc_change_rsc(cl, rsc, 0);
1077	if (fsc != NULL)
1078		hfsc_change_fsc(cl, fsc);
1079	if (usc != NULL)
1080		hfsc_change_usc(cl, usc, 0);
1081
1082	cl->cl_common.classid = classid;
1083	cl->refcnt    = 1;
1084	cl->sched     = q;
1085	cl->cl_parent = parent;
1086	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
1087				      &pfifo_qdisc_ops, classid);
1088	if (cl->qdisc == NULL)
1089		cl->qdisc = &noop_qdisc;
1090	INIT_LIST_HEAD(&cl->children);
1091	cl->vt_tree = RB_ROOT;
1092	cl->cf_tree = RB_ROOT;
1093
1094	sch_tree_lock(sch);
1095	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1096	list_add_tail(&cl->siblings, &parent->children);
1097	if (parent->level == 0)
1098		hfsc_purge_queue(sch, parent);
1099	hfsc_adjust_levels(parent);
1100	cl->cl_pcvtoff = parent->cl_cvtoff;
1101	sch_tree_unlock(sch);
1102
1103	qdisc_class_hash_grow(sch, &q->clhash);
1104
1105	*arg = (unsigned long)cl;
1106	return 0;
1107}
1108
1109static void
1110hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1111{
1112	struct hfsc_sched *q = qdisc_priv(sch);
1113
1114	tcf_destroy_chain(&cl->filter_list);
1115	qdisc_destroy(cl->qdisc);
1116	gen_kill_estimator(&cl->bstats, &cl->rate_est);
1117	if (cl != &q->root)
1118		kfree(cl);
1119}
1120
1121static int
1122hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1123{
1124	struct hfsc_sched *q = qdisc_priv(sch);
1125	struct hfsc_class *cl = (struct hfsc_class *)arg;
1126
1127	if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1128		return -EBUSY;
1129
1130	sch_tree_lock(sch);
1131
1132	list_del(&cl->siblings);
1133	hfsc_adjust_levels(cl->cl_parent);
1134
1135	hfsc_purge_queue(sch, cl);
1136	qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1137
1138	BUG_ON(--cl->refcnt == 0);
1139	/*
1140	 * This shouldn't happen: we "hold" one cops->get() when called
1141	 * from tc_ctl_tclass; the destroy method is done from cops->put().
1142	 */
1143
1144	sch_tree_unlock(sch);
1145	return 0;
1146}
1147
1148static struct hfsc_class *
1149hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1150{
1151	struct hfsc_sched *q = qdisc_priv(sch);
1152	struct hfsc_class *head, *cl;
1153	struct tcf_result res;
1154	struct tcf_proto *tcf;
1155	int result;
1156
1157	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1158	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1159		if (cl->level == 0)
1160			return cl;
1161
1162	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1163	head = &q->root;
1164	tcf = q->root.filter_list;
1165	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1166#ifdef CONFIG_NET_CLS_ACT
1167		switch (result) {
1168		case TC_ACT_QUEUED:
1169		case TC_ACT_STOLEN:
1170			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1171		case TC_ACT_SHOT:
1172			return NULL;
1173		}
1174#endif
1175		cl = (struct hfsc_class *)res.class;
1176		if (!cl) {
1177			cl = hfsc_find_class(res.classid, sch);
1178			if (!cl)
1179				break; /* filter selected invalid classid */
1180			if (cl->level >= head->level)
1181				break; /* filter may only point downwards */
1182		}
1183
1184		if (cl->level == 0)
1185			return cl; /* hit leaf class */
1186
1187		/* apply inner filter chain */
1188		tcf = cl->filter_list;
1189		head = cl;
1190	}
1191
1192	/* classification failed, try default class */
1193	cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1194	if (cl == NULL || cl->level > 0)
1195		return NULL;
1196
1197	return cl;
1198}
1199
1200static int
1201hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1202		 struct Qdisc **old)
1203{
1204	struct hfsc_class *cl = (struct hfsc_class *)arg;
1205
1206	if (cl->level > 0)
1207		return -EINVAL;
1208	if (new == NULL) {
1209		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1210					cl->cl_common.classid);
1211		if (new == NULL)
1212			new = &noop_qdisc;
1213	}
1214
1215	sch_tree_lock(sch);
1216	hfsc_purge_queue(sch, cl);
1217	*old = cl->qdisc;
1218	cl->qdisc = new;
1219	sch_tree_unlock(sch);
1220	return 0;
1221}
1222
1223static struct Qdisc *
1224hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1225{
1226	struct hfsc_class *cl = (struct hfsc_class *)arg;
1227
1228	if (cl->level == 0)
1229		return cl->qdisc;
1230
1231	return NULL;
1232}
1233
1234static void
1235hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1236{
1237	struct hfsc_class *cl = (struct hfsc_class *)arg;
1238
1239	if (cl->qdisc->q.qlen == 0) {
1240		update_vf(cl, 0, 0);
1241		set_passive(cl);
1242	}
1243}
1244
1245static unsigned long
1246hfsc_get_class(struct Qdisc *sch, u32 classid)
1247{
1248	struct hfsc_class *cl = hfsc_find_class(classid, sch);
1249
1250	if (cl != NULL)
1251		cl->refcnt++;
1252
1253	return (unsigned long)cl;
1254}
1255
1256static void
1257hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1258{
1259	struct hfsc_class *cl = (struct hfsc_class *)arg;
1260
1261	if (--cl->refcnt == 0)
1262		hfsc_destroy_class(sch, cl);
1263}
1264
1265static unsigned long
1266hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1267{
1268	struct hfsc_class *p = (struct hfsc_class *)parent;
1269	struct hfsc_class *cl = hfsc_find_class(classid, sch);
1270
1271	if (cl != NULL) {
1272		if (p != NULL && p->level <= cl->level)
1273			return 0;
1274		cl->filter_cnt++;
1275	}
1276
1277	return (unsigned long)cl;
1278}
1279
1280static void
1281hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1282{
1283	struct hfsc_class *cl = (struct hfsc_class *)arg;
1284
1285	cl->filter_cnt--;
1286}
1287
1288static struct tcf_proto **
1289hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1290{
1291	struct hfsc_sched *q = qdisc_priv(sch);
1292	struct hfsc_class *cl = (struct hfsc_class *)arg;
1293
1294	if (cl == NULL)
1295		cl = &q->root;
1296
1297	return &cl->filter_list;
1298}
1299
1300static int
1301hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1302{
1303	struct tc_service_curve tsc;
1304
1305	tsc.m1 = sm2m(sc->sm1);
1306	tsc.d  = dx2d(sc->dx);
1307	tsc.m2 = sm2m(sc->sm2);
1308	NLA_PUT(skb, attr, sizeof(tsc), &tsc);
 
1309
1310	return skb->len;
1311
1312 nla_put_failure:
1313	return -1;
1314}
1315
1316static int
1317hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1318{
1319	if ((cl->cl_flags & HFSC_RSC) &&
1320	    (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1321		goto nla_put_failure;
1322
1323	if ((cl->cl_flags & HFSC_FSC) &&
1324	    (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1325		goto nla_put_failure;
1326
1327	if ((cl->cl_flags & HFSC_USC) &&
1328	    (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1329		goto nla_put_failure;
1330
1331	return skb->len;
1332
1333 nla_put_failure:
1334	return -1;
1335}
1336
1337static int
1338hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1339		struct tcmsg *tcm)
1340{
1341	struct hfsc_class *cl = (struct hfsc_class *)arg;
1342	struct nlattr *nest;
1343
1344	tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1345					  TC_H_ROOT;
1346	tcm->tcm_handle = cl->cl_common.classid;
1347	if (cl->level == 0)
1348		tcm->tcm_info = cl->qdisc->handle;
1349
1350	nest = nla_nest_start(skb, TCA_OPTIONS);
1351	if (nest == NULL)
1352		goto nla_put_failure;
1353	if (hfsc_dump_curves(skb, cl) < 0)
1354		goto nla_put_failure;
1355	nla_nest_end(skb, nest);
1356	return skb->len;
1357
1358 nla_put_failure:
1359	nla_nest_cancel(skb, nest);
1360	return -EMSGSIZE;
1361}
1362
1363static int
1364hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1365	struct gnet_dump *d)
1366{
1367	struct hfsc_class *cl = (struct hfsc_class *)arg;
1368	struct tc_hfsc_stats xstats;
1369
1370	cl->qstats.qlen = cl->qdisc->q.qlen;
 
1371	xstats.level   = cl->level;
1372	xstats.period  = cl->cl_vtperiod;
1373	xstats.work    = cl->cl_total;
1374	xstats.rtwork  = cl->cl_cumul;
1375
1376	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1377	    gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1378	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
1379		return -1;
1380
1381	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1382}
1383
1384
1385
1386static void
1387hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1388{
1389	struct hfsc_sched *q = qdisc_priv(sch);
1390	struct hlist_node *n;
1391	struct hfsc_class *cl;
1392	unsigned int i;
1393
1394	if (arg->stop)
1395		return;
1396
1397	for (i = 0; i < q->clhash.hashsize; i++) {
1398		hlist_for_each_entry(cl, n, &q->clhash.hash[i],
1399				     cl_common.hnode) {
1400			if (arg->count < arg->skip) {
1401				arg->count++;
1402				continue;
1403			}
1404			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1405				arg->stop = 1;
1406				return;
1407			}
1408			arg->count++;
1409		}
1410	}
1411}
1412
1413static void
1414hfsc_schedule_watchdog(struct Qdisc *sch)
1415{
1416	struct hfsc_sched *q = qdisc_priv(sch);
1417	struct hfsc_class *cl;
1418	u64 next_time = 0;
1419
1420	cl = eltree_get_minel(q);
1421	if (cl)
1422		next_time = cl->cl_e;
1423	if (q->root.cl_cfmin != 0) {
1424		if (next_time == 0 || next_time > q->root.cl_cfmin)
1425			next_time = q->root.cl_cfmin;
1426	}
1427	WARN_ON(next_time == 0);
1428	qdisc_watchdog_schedule(&q->watchdog, next_time);
1429}
1430
1431static int
1432hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1433{
1434	struct hfsc_sched *q = qdisc_priv(sch);
1435	struct tc_hfsc_qopt *qopt;
1436	int err;
1437
1438	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1439		return -EINVAL;
1440	qopt = nla_data(opt);
1441
1442	q->defcls = qopt->defcls;
1443	err = qdisc_class_hash_init(&q->clhash);
1444	if (err < 0)
1445		return err;
1446	q->eligible = RB_ROOT;
1447	INIT_LIST_HEAD(&q->droplist);
1448
1449	q->root.cl_common.classid = sch->handle;
1450	q->root.refcnt  = 1;
1451	q->root.sched   = q;
1452	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1453					  sch->handle);
1454	if (q->root.qdisc == NULL)
1455		q->root.qdisc = &noop_qdisc;
1456	INIT_LIST_HEAD(&q->root.children);
1457	q->root.vt_tree = RB_ROOT;
1458	q->root.cf_tree = RB_ROOT;
1459
1460	qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1461	qdisc_class_hash_grow(sch, &q->clhash);
1462
1463	qdisc_watchdog_init(&q->watchdog, sch);
1464
1465	return 0;
1466}
1467
1468static int
1469hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1470{
1471	struct hfsc_sched *q = qdisc_priv(sch);
1472	struct tc_hfsc_qopt *qopt;
1473
1474	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1475		return -EINVAL;
1476	qopt = nla_data(opt);
1477
1478	sch_tree_lock(sch);
1479	q->defcls = qopt->defcls;
1480	sch_tree_unlock(sch);
1481
1482	return 0;
1483}
1484
1485static void
1486hfsc_reset_class(struct hfsc_class *cl)
1487{
1488	cl->cl_total        = 0;
1489	cl->cl_cumul        = 0;
1490	cl->cl_d            = 0;
1491	cl->cl_e            = 0;
1492	cl->cl_vt           = 0;
1493	cl->cl_vtadj        = 0;
1494	cl->cl_vtoff        = 0;
1495	cl->cl_cvtmin       = 0;
1496	cl->cl_cvtmax       = 0;
1497	cl->cl_cvtoff       = 0;
1498	cl->cl_pcvtoff      = 0;
1499	cl->cl_vtperiod     = 0;
1500	cl->cl_parentperiod = 0;
1501	cl->cl_f            = 0;
1502	cl->cl_myf          = 0;
1503	cl->cl_myfadj       = 0;
1504	cl->cl_cfmin        = 0;
1505	cl->cl_nactive      = 0;
1506
1507	cl->vt_tree = RB_ROOT;
1508	cl->cf_tree = RB_ROOT;
1509	qdisc_reset(cl->qdisc);
1510
1511	if (cl->cl_flags & HFSC_RSC)
1512		rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1513	if (cl->cl_flags & HFSC_FSC)
1514		rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1515	if (cl->cl_flags & HFSC_USC)
1516		rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1517}
1518
1519static void
1520hfsc_reset_qdisc(struct Qdisc *sch)
1521{
1522	struct hfsc_sched *q = qdisc_priv(sch);
1523	struct hfsc_class *cl;
1524	struct hlist_node *n;
1525	unsigned int i;
1526
1527	for (i = 0; i < q->clhash.hashsize; i++) {
1528		hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1529			hfsc_reset_class(cl);
1530	}
1531	q->eligible = RB_ROOT;
1532	INIT_LIST_HEAD(&q->droplist);
1533	qdisc_watchdog_cancel(&q->watchdog);
1534	sch->q.qlen = 0;
1535}
1536
1537static void
1538hfsc_destroy_qdisc(struct Qdisc *sch)
1539{
1540	struct hfsc_sched *q = qdisc_priv(sch);
1541	struct hlist_node *n, *next;
1542	struct hfsc_class *cl;
1543	unsigned int i;
1544
1545	for (i = 0; i < q->clhash.hashsize; i++) {
1546		hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1547			tcf_destroy_chain(&cl->filter_list);
1548	}
1549	for (i = 0; i < q->clhash.hashsize; i++) {
1550		hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1551					  cl_common.hnode)
1552			hfsc_destroy_class(sch, cl);
1553	}
1554	qdisc_class_hash_destroy(&q->clhash);
1555	qdisc_watchdog_cancel(&q->watchdog);
1556}
1557
1558static int
1559hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1560{
1561	struct hfsc_sched *q = qdisc_priv(sch);
1562	unsigned char *b = skb_tail_pointer(skb);
1563	struct tc_hfsc_qopt qopt;
 
 
 
 
 
 
 
 
1564
1565	qopt.defcls = q->defcls;
1566	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
 
1567	return skb->len;
1568
1569 nla_put_failure:
1570	nlmsg_trim(skb, b);
1571	return -1;
1572}
1573
1574static int
1575hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1576{
1577	struct hfsc_class *cl;
1578	int uninitialized_var(err);
1579
1580	cl = hfsc_classify(skb, sch, &err);
1581	if (cl == NULL) {
1582		if (err & __NET_XMIT_BYPASS)
1583			sch->qstats.drops++;
1584		kfree_skb(skb);
1585		return err;
1586	}
1587
1588	err = qdisc_enqueue(skb, cl->qdisc);
1589	if (unlikely(err != NET_XMIT_SUCCESS)) {
1590		if (net_xmit_drop_count(err)) {
1591			cl->qstats.drops++;
1592			sch->qstats.drops++;
1593		}
1594		return err;
1595	}
1596
1597	if (cl->qdisc->q.qlen == 1)
1598		set_active(cl, qdisc_pkt_len(skb));
1599
1600	bstats_update(&cl->bstats, skb);
1601	sch->q.qlen++;
1602
1603	return NET_XMIT_SUCCESS;
1604}
1605
1606static struct sk_buff *
1607hfsc_dequeue(struct Qdisc *sch)
1608{
1609	struct hfsc_sched *q = qdisc_priv(sch);
1610	struct hfsc_class *cl;
1611	struct sk_buff *skb;
1612	u64 cur_time;
1613	unsigned int next_len;
1614	int realtime = 0;
1615
1616	if (sch->q.qlen == 0)
1617		return NULL;
1618
1619	cur_time = psched_get_time();
1620
1621	/*
1622	 * if there are eligible classes, use real-time criteria.
1623	 * find the class with the minimum deadline among
1624	 * the eligible classes.
1625	 */
1626	cl = eltree_get_mindl(q, cur_time);
1627	if (cl) {
1628		realtime = 1;
1629	} else {
1630		/*
1631		 * use link-sharing criteria
1632		 * get the class with the minimum vt in the hierarchy
1633		 */
1634		cl = vttree_get_minvt(&q->root, cur_time);
1635		if (cl == NULL) {
1636			sch->qstats.overlimits++;
1637			hfsc_schedule_watchdog(sch);
1638			return NULL;
1639		}
1640	}
1641
1642	skb = qdisc_dequeue_peeked(cl->qdisc);
1643	if (skb == NULL) {
1644		qdisc_warn_nonwc("HFSC", cl->qdisc);
1645		return NULL;
1646	}
1647
 
1648	update_vf(cl, qdisc_pkt_len(skb), cur_time);
1649	if (realtime)
1650		cl->cl_cumul += qdisc_pkt_len(skb);
1651
1652	if (cl->qdisc->q.qlen != 0) {
1653		if (cl->cl_flags & HFSC_RSC) {
1654			/* update ed */
1655			next_len = qdisc_peek_len(cl->qdisc);
1656			if (realtime)
1657				update_ed(cl, next_len);
1658			else
1659				update_d(cl, next_len);
1660		}
1661	} else {
1662		/* the class becomes passive */
1663		set_passive(cl);
1664	}
1665
1666	qdisc_unthrottled(sch);
1667	qdisc_bstats_update(sch, skb);
1668	sch->q.qlen--;
1669
1670	return skb;
1671}
1672
1673static unsigned int
1674hfsc_drop(struct Qdisc *sch)
1675{
1676	struct hfsc_sched *q = qdisc_priv(sch);
1677	struct hfsc_class *cl;
1678	unsigned int len;
1679
1680	list_for_each_entry(cl, &q->droplist, dlist) {
1681		if (cl->qdisc->ops->drop != NULL &&
1682		    (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1683			if (cl->qdisc->q.qlen == 0) {
1684				update_vf(cl, 0, 0);
1685				set_passive(cl);
1686			} else {
1687				list_move_tail(&cl->dlist, &q->droplist);
1688			}
1689			cl->qstats.drops++;
1690			sch->qstats.drops++;
1691			sch->q.qlen--;
1692			return len;
1693		}
1694	}
1695	return 0;
1696}
1697
1698static const struct Qdisc_class_ops hfsc_class_ops = {
1699	.change		= hfsc_change_class,
1700	.delete		= hfsc_delete_class,
1701	.graft		= hfsc_graft_class,
1702	.leaf		= hfsc_class_leaf,
1703	.qlen_notify	= hfsc_qlen_notify,
1704	.get		= hfsc_get_class,
1705	.put		= hfsc_put_class,
1706	.bind_tcf	= hfsc_bind_tcf,
1707	.unbind_tcf	= hfsc_unbind_tcf,
1708	.tcf_chain	= hfsc_tcf_chain,
1709	.dump		= hfsc_dump_class,
1710	.dump_stats	= hfsc_dump_class_stats,
1711	.walk		= hfsc_walk
1712};
1713
1714static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1715	.id		= "hfsc",
1716	.init		= hfsc_init_qdisc,
1717	.change		= hfsc_change_qdisc,
1718	.reset		= hfsc_reset_qdisc,
1719	.destroy	= hfsc_destroy_qdisc,
1720	.dump		= hfsc_dump_qdisc,
1721	.enqueue	= hfsc_enqueue,
1722	.dequeue	= hfsc_dequeue,
1723	.peek		= qdisc_peek_dequeued,
1724	.drop		= hfsc_drop,
1725	.cl_ops		= &hfsc_class_ops,
1726	.priv_size	= sizeof(struct hfsc_sched),
1727	.owner		= THIS_MODULE
1728};
1729
1730static int __init
1731hfsc_init(void)
1732{
1733	return register_qdisc(&hfsc_qdisc_ops);
1734}
1735
1736static void __exit
1737hfsc_cleanup(void)
1738{
1739	unregister_qdisc(&hfsc_qdisc_ops);
1740}
1741
1742MODULE_LICENSE("GPL");
1743module_init(hfsc_init);
1744module_exit(hfsc_cleanup);
v3.15
   1/*
   2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version 2
   7 * of the License, or (at your option) any later version.
   8 *
   9 * 2003-10-17 - Ported from altq
  10 */
  11/*
  12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
  13 *
  14 * Permission to use, copy, modify, and distribute this software and
  15 * its documentation is hereby granted (including for commercial or
  16 * for-profit use), provided that both the copyright notice and this
  17 * permission notice appear in all copies of the software, derivative
  18 * works, or modified versions, and any portions thereof.
  19 *
  20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
  21 * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
  22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
  23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  25 * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
  26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  33 * DAMAGE.
  34 *
  35 * Carnegie Mellon encourages (but does not require) users of this
  36 * software to return any improvements or extensions that they make,
  37 * and to grant Carnegie Mellon the rights to redistribute these
  38 * changes without encumbrance.
  39 */
  40/*
  41 * H-FSC is described in Proceedings of SIGCOMM'97,
  42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
  43 * Real-Time and Priority Service"
  44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
  45 *
  46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
  47 * when a class has an upperlimit, the fit-time is computed from the
  48 * upperlimit service curve.  the link-sharing scheduler does not schedule
  49 * a class whose fit-time exceeds the current time.
  50 */
  51
  52#include <linux/kernel.h>
  53#include <linux/module.h>
  54#include <linux/types.h>
  55#include <linux/errno.h>
  56#include <linux/compiler.h>
  57#include <linux/spinlock.h>
  58#include <linux/skbuff.h>
  59#include <linux/string.h>
  60#include <linux/slab.h>
  61#include <linux/list.h>
  62#include <linux/rbtree.h>
  63#include <linux/init.h>
  64#include <linux/rtnetlink.h>
  65#include <linux/pkt_sched.h>
  66#include <net/netlink.h>
  67#include <net/pkt_sched.h>
  68#include <net/pkt_cls.h>
  69#include <asm/div64.h>
  70
  71/*
  72 * kernel internal service curve representation:
  73 *   coordinates are given by 64 bit unsigned integers.
  74 *   x-axis: unit is clock count.
  75 *   y-axis: unit is byte.
  76 *
  77 *   The service curve parameters are converted to the internal
  78 *   representation. The slope values are scaled to avoid overflow.
  79 *   the inverse slope values as well as the y-projection of the 1st
  80 *   segment are kept in order to avoid 64-bit divide operations
  81 *   that are expensive on 32-bit architectures.
  82 */
  83
  84struct internal_sc {
  85	u64	sm1;	/* scaled slope of the 1st segment */
  86	u64	ism1;	/* scaled inverse-slope of the 1st segment */
  87	u64	dx;	/* the x-projection of the 1st segment */
  88	u64	dy;	/* the y-projection of the 1st segment */
  89	u64	sm2;	/* scaled slope of the 2nd segment */
  90	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
  91};
  92
  93/* runtime service curve */
  94struct runtime_sc {
  95	u64	x;	/* current starting position on x-axis */
  96	u64	y;	/* current starting position on y-axis */
  97	u64	sm1;	/* scaled slope of the 1st segment */
  98	u64	ism1;	/* scaled inverse-slope of the 1st segment */
  99	u64	dx;	/* the x-projection of the 1st segment */
 100	u64	dy;	/* the y-projection of the 1st segment */
 101	u64	sm2;	/* scaled slope of the 2nd segment */
 102	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
 103};
 104
 105enum hfsc_class_flags {
 106	HFSC_RSC = 0x1,
 107	HFSC_FSC = 0x2,
 108	HFSC_USC = 0x4
 109};
 110
 111struct hfsc_class {
 112	struct Qdisc_class_common cl_common;
 113	unsigned int	refcnt;		/* usage count */
 114
 115	struct gnet_stats_basic_packed bstats;
 116	struct gnet_stats_queue qstats;
 117	struct gnet_stats_rate_est64 rate_est;
 118	unsigned int	level;		/* class level in hierarchy */
 119	struct tcf_proto *filter_list;	/* filter list */
 120	unsigned int	filter_cnt;	/* filter count */
 121
 122	struct hfsc_sched *sched;	/* scheduler data */
 123	struct hfsc_class *cl_parent;	/* parent class */
 124	struct list_head siblings;	/* sibling classes */
 125	struct list_head children;	/* child classes */
 126	struct Qdisc	*qdisc;		/* leaf qdisc */
 127
 128	struct rb_node el_node;		/* qdisc's eligible tree member */
 129	struct rb_root vt_tree;		/* active children sorted by cl_vt */
 130	struct rb_node vt_node;		/* parent's vt_tree member */
 131	struct rb_root cf_tree;		/* active children sorted by cl_f */
 132	struct rb_node cf_node;		/* parent's cf_heap member */
 133	struct list_head dlist;		/* drop list member */
 134
 135	u64	cl_total;		/* total work in bytes */
 136	u64	cl_cumul;		/* cumulative work in bytes done by
 137					   real-time criteria */
 138
 139	u64	cl_d;			/* deadline*/
 140	u64	cl_e;			/* eligible time */
 141	u64	cl_vt;			/* virtual time */
 142	u64	cl_f;			/* time when this class will fit for
 143					   link-sharing, max(myf, cfmin) */
 144	u64	cl_myf;			/* my fit-time (calculated from this
 145					   class's own upperlimit curve) */
 146	u64	cl_myfadj;		/* my fit-time adjustment (to cancel
 147					   history dependence) */
 148	u64	cl_cfmin;		/* earliest children's fit-time (used
 149					   with cl_myf to obtain cl_f) */
 150	u64	cl_cvtmin;		/* minimal virtual time among the
 151					   children fit for link-sharing
 152					   (monotonic within a period) */
 153	u64	cl_vtadj;		/* intra-period cumulative vt
 154					   adjustment */
 155	u64	cl_vtoff;		/* inter-period cumulative vt offset */
 156	u64	cl_cvtmax;		/* max child's vt in the last period */
 157	u64	cl_cvtoff;		/* cumulative cvtmax of all periods */
 158	u64	cl_pcvtoff;		/* parent's cvtoff at initialization
 159					   time */
 160
 161	struct internal_sc cl_rsc;	/* internal real-time service curve */
 162	struct internal_sc cl_fsc;	/* internal fair service curve */
 163	struct internal_sc cl_usc;	/* internal upperlimit service curve */
 164	struct runtime_sc cl_deadline;	/* deadline curve */
 165	struct runtime_sc cl_eligible;	/* eligible curve */
 166	struct runtime_sc cl_virtual;	/* virtual curve */
 167	struct runtime_sc cl_ulimit;	/* upperlimit curve */
 168
 169	unsigned long	cl_flags;	/* which curves are valid */
 170	unsigned long	cl_vtperiod;	/* vt period sequence number */
 171	unsigned long	cl_parentperiod;/* parent's vt period sequence number*/
 172	unsigned long	cl_nactive;	/* number of active children */
 173};
 174
 175struct hfsc_sched {
 176	u16	defcls;				/* default class id */
 177	struct hfsc_class root;			/* root class */
 178	struct Qdisc_class_hash clhash;		/* class hash */
 179	struct rb_root eligible;		/* eligible tree */
 180	struct list_head droplist;		/* active leaf class list (for
 181						   dropping) */
 182	struct qdisc_watchdog watchdog;		/* watchdog timer */
 183};
 184
 185#define	HT_INFINITY	0xffffffffffffffffULL	/* infinite time value */
 186
 187
 188/*
 189 * eligible tree holds backlogged classes being sorted by their eligible times.
 190 * there is one eligible tree per hfsc instance.
 191 */
 192
 193static void
 194eltree_insert(struct hfsc_class *cl)
 195{
 196	struct rb_node **p = &cl->sched->eligible.rb_node;
 197	struct rb_node *parent = NULL;
 198	struct hfsc_class *cl1;
 199
 200	while (*p != NULL) {
 201		parent = *p;
 202		cl1 = rb_entry(parent, struct hfsc_class, el_node);
 203		if (cl->cl_e >= cl1->cl_e)
 204			p = &parent->rb_right;
 205		else
 206			p = &parent->rb_left;
 207	}
 208	rb_link_node(&cl->el_node, parent, p);
 209	rb_insert_color(&cl->el_node, &cl->sched->eligible);
 210}
 211
 212static inline void
 213eltree_remove(struct hfsc_class *cl)
 214{
 215	rb_erase(&cl->el_node, &cl->sched->eligible);
 216}
 217
 218static inline void
 219eltree_update(struct hfsc_class *cl)
 220{
 221	eltree_remove(cl);
 222	eltree_insert(cl);
 223}
 224
 225/* find the class with the minimum deadline among the eligible classes */
 226static inline struct hfsc_class *
 227eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
 228{
 229	struct hfsc_class *p, *cl = NULL;
 230	struct rb_node *n;
 231
 232	for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
 233		p = rb_entry(n, struct hfsc_class, el_node);
 234		if (p->cl_e > cur_time)
 235			break;
 236		if (cl == NULL || p->cl_d < cl->cl_d)
 237			cl = p;
 238	}
 239	return cl;
 240}
 241
 242/* find the class with minimum eligible time among the eligible classes */
 243static inline struct hfsc_class *
 244eltree_get_minel(struct hfsc_sched *q)
 245{
 246	struct rb_node *n;
 247
 248	n = rb_first(&q->eligible);
 249	if (n == NULL)
 250		return NULL;
 251	return rb_entry(n, struct hfsc_class, el_node);
 252}
 253
 254/*
 255 * vttree holds holds backlogged child classes being sorted by their virtual
 256 * time. each intermediate class has one vttree.
 257 */
 258static void
 259vttree_insert(struct hfsc_class *cl)
 260{
 261	struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
 262	struct rb_node *parent = NULL;
 263	struct hfsc_class *cl1;
 264
 265	while (*p != NULL) {
 266		parent = *p;
 267		cl1 = rb_entry(parent, struct hfsc_class, vt_node);
 268		if (cl->cl_vt >= cl1->cl_vt)
 269			p = &parent->rb_right;
 270		else
 271			p = &parent->rb_left;
 272	}
 273	rb_link_node(&cl->vt_node, parent, p);
 274	rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
 275}
 276
 277static inline void
 278vttree_remove(struct hfsc_class *cl)
 279{
 280	rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
 281}
 282
 283static inline void
 284vttree_update(struct hfsc_class *cl)
 285{
 286	vttree_remove(cl);
 287	vttree_insert(cl);
 288}
 289
 290static inline struct hfsc_class *
 291vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
 292{
 293	struct hfsc_class *p;
 294	struct rb_node *n;
 295
 296	for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
 297		p = rb_entry(n, struct hfsc_class, vt_node);
 298		if (p->cl_f <= cur_time)
 299			return p;
 300	}
 301	return NULL;
 302}
 303
 304/*
 305 * get the leaf class with the minimum vt in the hierarchy
 306 */
 307static struct hfsc_class *
 308vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
 309{
 310	/* if root-class's cfmin is bigger than cur_time nothing to do */
 311	if (cl->cl_cfmin > cur_time)
 312		return NULL;
 313
 314	while (cl->level > 0) {
 315		cl = vttree_firstfit(cl, cur_time);
 316		if (cl == NULL)
 317			return NULL;
 318		/*
 319		 * update parent's cl_cvtmin.
 320		 */
 321		if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
 322			cl->cl_parent->cl_cvtmin = cl->cl_vt;
 323	}
 324	return cl;
 325}
 326
 327static void
 328cftree_insert(struct hfsc_class *cl)
 329{
 330	struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
 331	struct rb_node *parent = NULL;
 332	struct hfsc_class *cl1;
 333
 334	while (*p != NULL) {
 335		parent = *p;
 336		cl1 = rb_entry(parent, struct hfsc_class, cf_node);
 337		if (cl->cl_f >= cl1->cl_f)
 338			p = &parent->rb_right;
 339		else
 340			p = &parent->rb_left;
 341	}
 342	rb_link_node(&cl->cf_node, parent, p);
 343	rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
 344}
 345
 346static inline void
 347cftree_remove(struct hfsc_class *cl)
 348{
 349	rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
 350}
 351
 352static inline void
 353cftree_update(struct hfsc_class *cl)
 354{
 355	cftree_remove(cl);
 356	cftree_insert(cl);
 357}
 358
 359/*
 360 * service curve support functions
 361 *
 362 *  external service curve parameters
 363 *	m: bps
 364 *	d: us
 365 *  internal service curve parameters
 366 *	sm: (bytes/psched_us) << SM_SHIFT
 367 *	ism: (psched_us/byte) << ISM_SHIFT
 368 *	dx: psched_us
 369 *
 370 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
 371 *
 372 * sm and ism are scaled in order to keep effective digits.
 373 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
 374 * digits in decimal using the following table.
 375 *
 376 *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
 377 *  ------------+-------------------------------------------------------
 378 *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
 379 *
 380 *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
 381 *
 382 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
 383 */
 384#define	SM_SHIFT	(30 - PSCHED_SHIFT)
 385#define	ISM_SHIFT	(8 + PSCHED_SHIFT)
 386
 387#define	SM_MASK		((1ULL << SM_SHIFT) - 1)
 388#define	ISM_MASK	((1ULL << ISM_SHIFT) - 1)
 389
 390static inline u64
 391seg_x2y(u64 x, u64 sm)
 392{
 393	u64 y;
 394
 395	/*
 396	 * compute
 397	 *	y = x * sm >> SM_SHIFT
 398	 * but divide it for the upper and lower bits to avoid overflow
 399	 */
 400	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
 401	return y;
 402}
 403
 404static inline u64
 405seg_y2x(u64 y, u64 ism)
 406{
 407	u64 x;
 408
 409	if (y == 0)
 410		x = 0;
 411	else if (ism == HT_INFINITY)
 412		x = HT_INFINITY;
 413	else {
 414		x = (y >> ISM_SHIFT) * ism
 415		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
 416	}
 417	return x;
 418}
 419
 420/* Convert m (bps) into sm (bytes/psched us) */
 421static u64
 422m2sm(u32 m)
 423{
 424	u64 sm;
 425
 426	sm = ((u64)m << SM_SHIFT);
 427	sm += PSCHED_TICKS_PER_SEC - 1;
 428	do_div(sm, PSCHED_TICKS_PER_SEC);
 429	return sm;
 430}
 431
 432/* convert m (bps) into ism (psched us/byte) */
 433static u64
 434m2ism(u32 m)
 435{
 436	u64 ism;
 437
 438	if (m == 0)
 439		ism = HT_INFINITY;
 440	else {
 441		ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
 442		ism += m - 1;
 443		do_div(ism, m);
 444	}
 445	return ism;
 446}
 447
 448/* convert d (us) into dx (psched us) */
 449static u64
 450d2dx(u32 d)
 451{
 452	u64 dx;
 453
 454	dx = ((u64)d * PSCHED_TICKS_PER_SEC);
 455	dx += USEC_PER_SEC - 1;
 456	do_div(dx, USEC_PER_SEC);
 457	return dx;
 458}
 459
 460/* convert sm (bytes/psched us) into m (bps) */
 461static u32
 462sm2m(u64 sm)
 463{
 464	u64 m;
 465
 466	m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
 467	return (u32)m;
 468}
 469
 470/* convert dx (psched us) into d (us) */
 471static u32
 472dx2d(u64 dx)
 473{
 474	u64 d;
 475
 476	d = dx * USEC_PER_SEC;
 477	do_div(d, PSCHED_TICKS_PER_SEC);
 478	return (u32)d;
 479}
 480
 481static void
 482sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
 483{
 484	isc->sm1  = m2sm(sc->m1);
 485	isc->ism1 = m2ism(sc->m1);
 486	isc->dx   = d2dx(sc->d);
 487	isc->dy   = seg_x2y(isc->dx, isc->sm1);
 488	isc->sm2  = m2sm(sc->m2);
 489	isc->ism2 = m2ism(sc->m2);
 490}
 491
 492/*
 493 * initialize the runtime service curve with the given internal
 494 * service curve starting at (x, y).
 495 */
 496static void
 497rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
 498{
 499	rtsc->x	   = x;
 500	rtsc->y    = y;
 501	rtsc->sm1  = isc->sm1;
 502	rtsc->ism1 = isc->ism1;
 503	rtsc->dx   = isc->dx;
 504	rtsc->dy   = isc->dy;
 505	rtsc->sm2  = isc->sm2;
 506	rtsc->ism2 = isc->ism2;
 507}
 508
 509/*
 510 * calculate the y-projection of the runtime service curve by the
 511 * given x-projection value
 512 */
 513static u64
 514rtsc_y2x(struct runtime_sc *rtsc, u64 y)
 515{
 516	u64 x;
 517
 518	if (y < rtsc->y)
 519		x = rtsc->x;
 520	else if (y <= rtsc->y + rtsc->dy) {
 521		/* x belongs to the 1st segment */
 522		if (rtsc->dy == 0)
 523			x = rtsc->x + rtsc->dx;
 524		else
 525			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
 526	} else {
 527		/* x belongs to the 2nd segment */
 528		x = rtsc->x + rtsc->dx
 529		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
 530	}
 531	return x;
 532}
 533
 534static u64
 535rtsc_x2y(struct runtime_sc *rtsc, u64 x)
 536{
 537	u64 y;
 538
 539	if (x <= rtsc->x)
 540		y = rtsc->y;
 541	else if (x <= rtsc->x + rtsc->dx)
 542		/* y belongs to the 1st segment */
 543		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
 544	else
 545		/* y belongs to the 2nd segment */
 546		y = rtsc->y + rtsc->dy
 547		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
 548	return y;
 549}
 550
 551/*
 552 * update the runtime service curve by taking the minimum of the current
 553 * runtime service curve and the service curve starting at (x, y).
 554 */
 555static void
 556rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
 557{
 558	u64 y1, y2, dx, dy;
 559	u32 dsm;
 560
 561	if (isc->sm1 <= isc->sm2) {
 562		/* service curve is convex */
 563		y1 = rtsc_x2y(rtsc, x);
 564		if (y1 < y)
 565			/* the current rtsc is smaller */
 566			return;
 567		rtsc->x = x;
 568		rtsc->y = y;
 569		return;
 570	}
 571
 572	/*
 573	 * service curve is concave
 574	 * compute the two y values of the current rtsc
 575	 *	y1: at x
 576	 *	y2: at (x + dx)
 577	 */
 578	y1 = rtsc_x2y(rtsc, x);
 579	if (y1 <= y) {
 580		/* rtsc is below isc, no change to rtsc */
 581		return;
 582	}
 583
 584	y2 = rtsc_x2y(rtsc, x + isc->dx);
 585	if (y2 >= y + isc->dy) {
 586		/* rtsc is above isc, replace rtsc by isc */
 587		rtsc->x = x;
 588		rtsc->y = y;
 589		rtsc->dx = isc->dx;
 590		rtsc->dy = isc->dy;
 591		return;
 592	}
 593
 594	/*
 595	 * the two curves intersect
 596	 * compute the offsets (dx, dy) using the reverse
 597	 * function of seg_x2y()
 598	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
 599	 */
 600	dx = (y1 - y) << SM_SHIFT;
 601	dsm = isc->sm1 - isc->sm2;
 602	do_div(dx, dsm);
 603	/*
 604	 * check if (x, y1) belongs to the 1st segment of rtsc.
 605	 * if so, add the offset.
 606	 */
 607	if (rtsc->x + rtsc->dx > x)
 608		dx += rtsc->x + rtsc->dx - x;
 609	dy = seg_x2y(dx, isc->sm1);
 610
 611	rtsc->x = x;
 612	rtsc->y = y;
 613	rtsc->dx = dx;
 614	rtsc->dy = dy;
 615}
 616
 617static void
 618init_ed(struct hfsc_class *cl, unsigned int next_len)
 619{
 620	u64 cur_time = psched_get_time();
 621
 622	/* update the deadline curve */
 623	rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
 624
 625	/*
 626	 * update the eligible curve.
 627	 * for concave, it is equal to the deadline curve.
 628	 * for convex, it is a linear curve with slope m2.
 629	 */
 630	cl->cl_eligible = cl->cl_deadline;
 631	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
 632		cl->cl_eligible.dx = 0;
 633		cl->cl_eligible.dy = 0;
 634	}
 635
 636	/* compute e and d */
 637	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
 638	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
 639
 640	eltree_insert(cl);
 641}
 642
 643static void
 644update_ed(struct hfsc_class *cl, unsigned int next_len)
 645{
 646	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
 647	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
 648
 649	eltree_update(cl);
 650}
 651
 652static inline void
 653update_d(struct hfsc_class *cl, unsigned int next_len)
 654{
 655	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
 656}
 657
 658static inline void
 659update_cfmin(struct hfsc_class *cl)
 660{
 661	struct rb_node *n = rb_first(&cl->cf_tree);
 662	struct hfsc_class *p;
 663
 664	if (n == NULL) {
 665		cl->cl_cfmin = 0;
 666		return;
 667	}
 668	p = rb_entry(n, struct hfsc_class, cf_node);
 669	cl->cl_cfmin = p->cl_f;
 670}
 671
 672static void
 673init_vf(struct hfsc_class *cl, unsigned int len)
 674{
 675	struct hfsc_class *max_cl;
 676	struct rb_node *n;
 677	u64 vt, f, cur_time;
 678	int go_active;
 679
 680	cur_time = 0;
 681	go_active = 1;
 682	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
 683		if (go_active && cl->cl_nactive++ == 0)
 684			go_active = 1;
 685		else
 686			go_active = 0;
 687
 688		if (go_active) {
 689			n = rb_last(&cl->cl_parent->vt_tree);
 690			if (n != NULL) {
 691				max_cl = rb_entry(n, struct hfsc_class, vt_node);
 692				/*
 693				 * set vt to the average of the min and max
 694				 * classes.  if the parent's period didn't
 695				 * change, don't decrease vt of the class.
 696				 */
 697				vt = max_cl->cl_vt;
 698				if (cl->cl_parent->cl_cvtmin != 0)
 699					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
 700
 701				if (cl->cl_parent->cl_vtperiod !=
 702				    cl->cl_parentperiod || vt > cl->cl_vt)
 703					cl->cl_vt = vt;
 704			} else {
 705				/*
 706				 * first child for a new parent backlog period.
 707				 * add parent's cvtmax to cvtoff to make a new
 708				 * vt (vtoff + vt) larger than the vt in the
 709				 * last period for all children.
 710				 */
 711				vt = cl->cl_parent->cl_cvtmax;
 712				cl->cl_parent->cl_cvtoff += vt;
 713				cl->cl_parent->cl_cvtmax = 0;
 714				cl->cl_parent->cl_cvtmin = 0;
 715				cl->cl_vt = 0;
 716			}
 717
 718			cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
 719							cl->cl_pcvtoff;
 720
 721			/* update the virtual curve */
 722			vt = cl->cl_vt + cl->cl_vtoff;
 723			rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
 724						      cl->cl_total);
 725			if (cl->cl_virtual.x == vt) {
 726				cl->cl_virtual.x -= cl->cl_vtoff;
 727				cl->cl_vtoff = 0;
 728			}
 729			cl->cl_vtadj = 0;
 730
 731			cl->cl_vtperiod++;  /* increment vt period */
 732			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
 733			if (cl->cl_parent->cl_nactive == 0)
 734				cl->cl_parentperiod++;
 735			cl->cl_f = 0;
 736
 737			vttree_insert(cl);
 738			cftree_insert(cl);
 739
 740			if (cl->cl_flags & HFSC_USC) {
 741				/* class has upper limit curve */
 742				if (cur_time == 0)
 743					cur_time = psched_get_time();
 744
 745				/* update the ulimit curve */
 746				rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
 747					 cl->cl_total);
 748				/* compute myf */
 749				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
 750						      cl->cl_total);
 751				cl->cl_myfadj = 0;
 752			}
 753		}
 754
 755		f = max(cl->cl_myf, cl->cl_cfmin);
 756		if (f != cl->cl_f) {
 757			cl->cl_f = f;
 758			cftree_update(cl);
 759		}
 760		update_cfmin(cl->cl_parent);
 761	}
 762}
 763
 764static void
 765update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
 766{
 767	u64 f; /* , myf_bound, delta; */
 768	int go_passive = 0;
 769
 770	if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
 771		go_passive = 1;
 772
 773	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
 774		cl->cl_total += len;
 775
 776		if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
 777			continue;
 778
 779		if (go_passive && --cl->cl_nactive == 0)
 780			go_passive = 1;
 781		else
 782			go_passive = 0;
 783
 784		if (go_passive) {
 785			/* no more active child, going passive */
 786
 787			/* update cvtmax of the parent class */
 788			if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
 789				cl->cl_parent->cl_cvtmax = cl->cl_vt;
 790
 791			/* remove this class from the vt tree */
 792			vttree_remove(cl);
 793
 794			cftree_remove(cl);
 795			update_cfmin(cl->cl_parent);
 796
 797			continue;
 798		}
 799
 800		/*
 801		 * update vt and f
 802		 */
 803		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
 804			    - cl->cl_vtoff + cl->cl_vtadj;
 805
 806		/*
 807		 * if vt of the class is smaller than cvtmin,
 808		 * the class was skipped in the past due to non-fit.
 809		 * if so, we need to adjust vtadj.
 810		 */
 811		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
 812			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
 813			cl->cl_vt = cl->cl_parent->cl_cvtmin;
 814		}
 815
 816		/* update the vt tree */
 817		vttree_update(cl);
 818
 819		if (cl->cl_flags & HFSC_USC) {
 820			cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
 821							      cl->cl_total);
 822#if 0
 823			/*
 824			 * This code causes classes to stay way under their
 825			 * limit when multiple classes are used at gigabit
 826			 * speed. needs investigation. -kaber
 827			 */
 828			/*
 829			 * if myf lags behind by more than one clock tick
 830			 * from the current time, adjust myfadj to prevent
 831			 * a rate-limited class from going greedy.
 832			 * in a steady state under rate-limiting, myf
 833			 * fluctuates within one clock tick.
 834			 */
 835			myf_bound = cur_time - PSCHED_JIFFIE2US(1);
 836			if (cl->cl_myf < myf_bound) {
 837				delta = cur_time - cl->cl_myf;
 838				cl->cl_myfadj += delta;
 839				cl->cl_myf += delta;
 840			}
 841#endif
 842		}
 843
 844		f = max(cl->cl_myf, cl->cl_cfmin);
 845		if (f != cl->cl_f) {
 846			cl->cl_f = f;
 847			cftree_update(cl);
 848			update_cfmin(cl->cl_parent);
 849		}
 850	}
 851}
 852
 853static void
 854set_active(struct hfsc_class *cl, unsigned int len)
 855{
 856	if (cl->cl_flags & HFSC_RSC)
 857		init_ed(cl, len);
 858	if (cl->cl_flags & HFSC_FSC)
 859		init_vf(cl, len);
 860
 861	list_add_tail(&cl->dlist, &cl->sched->droplist);
 862}
 863
 864static void
 865set_passive(struct hfsc_class *cl)
 866{
 867	if (cl->cl_flags & HFSC_RSC)
 868		eltree_remove(cl);
 869
 870	list_del(&cl->dlist);
 871
 872	/*
 873	 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
 874	 * needs to be called explicitly to remove a class from vttree.
 875	 */
 876}
 877
 878static unsigned int
 879qdisc_peek_len(struct Qdisc *sch)
 880{
 881	struct sk_buff *skb;
 882	unsigned int len;
 883
 884	skb = sch->ops->peek(sch);
 885	if (skb == NULL) {
 886		qdisc_warn_nonwc("qdisc_peek_len", sch);
 887		return 0;
 888	}
 889	len = qdisc_pkt_len(skb);
 890
 891	return len;
 892}
 893
 894static void
 895hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
 896{
 897	unsigned int len = cl->qdisc->q.qlen;
 898
 899	qdisc_reset(cl->qdisc);
 900	qdisc_tree_decrease_qlen(cl->qdisc, len);
 901}
 902
 903static void
 904hfsc_adjust_levels(struct hfsc_class *cl)
 905{
 906	struct hfsc_class *p;
 907	unsigned int level;
 908
 909	do {
 910		level = 0;
 911		list_for_each_entry(p, &cl->children, siblings) {
 912			if (p->level >= level)
 913				level = p->level + 1;
 914		}
 915		cl->level = level;
 916	} while ((cl = cl->cl_parent) != NULL);
 917}
 918
 919static inline struct hfsc_class *
 920hfsc_find_class(u32 classid, struct Qdisc *sch)
 921{
 922	struct hfsc_sched *q = qdisc_priv(sch);
 923	struct Qdisc_class_common *clc;
 924
 925	clc = qdisc_class_find(&q->clhash, classid);
 926	if (clc == NULL)
 927		return NULL;
 928	return container_of(clc, struct hfsc_class, cl_common);
 929}
 930
 931static void
 932hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
 933		u64 cur_time)
 934{
 935	sc2isc(rsc, &cl->cl_rsc);
 936	rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
 937	cl->cl_eligible = cl->cl_deadline;
 938	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
 939		cl->cl_eligible.dx = 0;
 940		cl->cl_eligible.dy = 0;
 941	}
 942	cl->cl_flags |= HFSC_RSC;
 943}
 944
 945static void
 946hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
 947{
 948	sc2isc(fsc, &cl->cl_fsc);
 949	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
 950	cl->cl_flags |= HFSC_FSC;
 951}
 952
 953static void
 954hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
 955		u64 cur_time)
 956{
 957	sc2isc(usc, &cl->cl_usc);
 958	rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
 959	cl->cl_flags |= HFSC_USC;
 960}
 961
 962static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
 963	[TCA_HFSC_RSC]	= { .len = sizeof(struct tc_service_curve) },
 964	[TCA_HFSC_FSC]	= { .len = sizeof(struct tc_service_curve) },
 965	[TCA_HFSC_USC]	= { .len = sizeof(struct tc_service_curve) },
 966};
 967
 968static int
 969hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 970		  struct nlattr **tca, unsigned long *arg)
 971{
 972	struct hfsc_sched *q = qdisc_priv(sch);
 973	struct hfsc_class *cl = (struct hfsc_class *)*arg;
 974	struct hfsc_class *parent = NULL;
 975	struct nlattr *opt = tca[TCA_OPTIONS];
 976	struct nlattr *tb[TCA_HFSC_MAX + 1];
 977	struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
 978	u64 cur_time;
 979	int err;
 980
 981	if (opt == NULL)
 982		return -EINVAL;
 983
 984	err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
 985	if (err < 0)
 986		return err;
 987
 988	if (tb[TCA_HFSC_RSC]) {
 989		rsc = nla_data(tb[TCA_HFSC_RSC]);
 990		if (rsc->m1 == 0 && rsc->m2 == 0)
 991			rsc = NULL;
 992	}
 993
 994	if (tb[TCA_HFSC_FSC]) {
 995		fsc = nla_data(tb[TCA_HFSC_FSC]);
 996		if (fsc->m1 == 0 && fsc->m2 == 0)
 997			fsc = NULL;
 998	}
 999
1000	if (tb[TCA_HFSC_USC]) {
1001		usc = nla_data(tb[TCA_HFSC_USC]);
1002		if (usc->m1 == 0 && usc->m2 == 0)
1003			usc = NULL;
1004	}
1005
1006	if (cl != NULL) {
1007		if (parentid) {
1008			if (cl->cl_parent &&
1009			    cl->cl_parent->cl_common.classid != parentid)
1010				return -EINVAL;
1011			if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1012				return -EINVAL;
1013		}
1014		cur_time = psched_get_time();
1015
1016		if (tca[TCA_RATE]) {
1017			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1018					      qdisc_root_sleeping_lock(sch),
1019					      tca[TCA_RATE]);
1020			if (err)
1021				return err;
1022		}
1023
1024		sch_tree_lock(sch);
1025		if (rsc != NULL)
1026			hfsc_change_rsc(cl, rsc, cur_time);
1027		if (fsc != NULL)
1028			hfsc_change_fsc(cl, fsc);
1029		if (usc != NULL)
1030			hfsc_change_usc(cl, usc, cur_time);
1031
1032		if (cl->qdisc->q.qlen != 0) {
1033			if (cl->cl_flags & HFSC_RSC)
1034				update_ed(cl, qdisc_peek_len(cl->qdisc));
1035			if (cl->cl_flags & HFSC_FSC)
1036				update_vf(cl, 0, cur_time);
1037		}
1038		sch_tree_unlock(sch);
1039
1040		return 0;
1041	}
1042
1043	if (parentid == TC_H_ROOT)
1044		return -EEXIST;
1045
1046	parent = &q->root;
1047	if (parentid) {
1048		parent = hfsc_find_class(parentid, sch);
1049		if (parent == NULL)
1050			return -ENOENT;
1051	}
1052
1053	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1054		return -EINVAL;
1055	if (hfsc_find_class(classid, sch))
1056		return -EEXIST;
1057
1058	if (rsc == NULL && fsc == NULL)
1059		return -EINVAL;
1060
1061	cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1062	if (cl == NULL)
1063		return -ENOBUFS;
1064
1065	if (tca[TCA_RATE]) {
1066		err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1067					qdisc_root_sleeping_lock(sch),
1068					tca[TCA_RATE]);
1069		if (err) {
1070			kfree(cl);
1071			return err;
1072		}
1073	}
1074
1075	if (rsc != NULL)
1076		hfsc_change_rsc(cl, rsc, 0);
1077	if (fsc != NULL)
1078		hfsc_change_fsc(cl, fsc);
1079	if (usc != NULL)
1080		hfsc_change_usc(cl, usc, 0);
1081
1082	cl->cl_common.classid = classid;
1083	cl->refcnt    = 1;
1084	cl->sched     = q;
1085	cl->cl_parent = parent;
1086	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
1087				      &pfifo_qdisc_ops, classid);
1088	if (cl->qdisc == NULL)
1089		cl->qdisc = &noop_qdisc;
1090	INIT_LIST_HEAD(&cl->children);
1091	cl->vt_tree = RB_ROOT;
1092	cl->cf_tree = RB_ROOT;
1093
1094	sch_tree_lock(sch);
1095	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1096	list_add_tail(&cl->siblings, &parent->children);
1097	if (parent->level == 0)
1098		hfsc_purge_queue(sch, parent);
1099	hfsc_adjust_levels(parent);
1100	cl->cl_pcvtoff = parent->cl_cvtoff;
1101	sch_tree_unlock(sch);
1102
1103	qdisc_class_hash_grow(sch, &q->clhash);
1104
1105	*arg = (unsigned long)cl;
1106	return 0;
1107}
1108
1109static void
1110hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1111{
1112	struct hfsc_sched *q = qdisc_priv(sch);
1113
1114	tcf_destroy_chain(&cl->filter_list);
1115	qdisc_destroy(cl->qdisc);
1116	gen_kill_estimator(&cl->bstats, &cl->rate_est);
1117	if (cl != &q->root)
1118		kfree(cl);
1119}
1120
1121static int
1122hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1123{
1124	struct hfsc_sched *q = qdisc_priv(sch);
1125	struct hfsc_class *cl = (struct hfsc_class *)arg;
1126
1127	if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1128		return -EBUSY;
1129
1130	sch_tree_lock(sch);
1131
1132	list_del(&cl->siblings);
1133	hfsc_adjust_levels(cl->cl_parent);
1134
1135	hfsc_purge_queue(sch, cl);
1136	qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1137
1138	BUG_ON(--cl->refcnt == 0);
1139	/*
1140	 * This shouldn't happen: we "hold" one cops->get() when called
1141	 * from tc_ctl_tclass; the destroy method is done from cops->put().
1142	 */
1143
1144	sch_tree_unlock(sch);
1145	return 0;
1146}
1147
1148static struct hfsc_class *
1149hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1150{
1151	struct hfsc_sched *q = qdisc_priv(sch);
1152	struct hfsc_class *head, *cl;
1153	struct tcf_result res;
1154	struct tcf_proto *tcf;
1155	int result;
1156
1157	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1158	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1159		if (cl->level == 0)
1160			return cl;
1161
1162	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1163	head = &q->root;
1164	tcf = q->root.filter_list;
1165	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1166#ifdef CONFIG_NET_CLS_ACT
1167		switch (result) {
1168		case TC_ACT_QUEUED:
1169		case TC_ACT_STOLEN:
1170			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1171		case TC_ACT_SHOT:
1172			return NULL;
1173		}
1174#endif
1175		cl = (struct hfsc_class *)res.class;
1176		if (!cl) {
1177			cl = hfsc_find_class(res.classid, sch);
1178			if (!cl)
1179				break; /* filter selected invalid classid */
1180			if (cl->level >= head->level)
1181				break; /* filter may only point downwards */
1182		}
1183
1184		if (cl->level == 0)
1185			return cl; /* hit leaf class */
1186
1187		/* apply inner filter chain */
1188		tcf = cl->filter_list;
1189		head = cl;
1190	}
1191
1192	/* classification failed, try default class */
1193	cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1194	if (cl == NULL || cl->level > 0)
1195		return NULL;
1196
1197	return cl;
1198}
1199
1200static int
1201hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1202		 struct Qdisc **old)
1203{
1204	struct hfsc_class *cl = (struct hfsc_class *)arg;
1205
1206	if (cl->level > 0)
1207		return -EINVAL;
1208	if (new == NULL) {
1209		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1210					cl->cl_common.classid);
1211		if (new == NULL)
1212			new = &noop_qdisc;
1213	}
1214
1215	sch_tree_lock(sch);
1216	hfsc_purge_queue(sch, cl);
1217	*old = cl->qdisc;
1218	cl->qdisc = new;
1219	sch_tree_unlock(sch);
1220	return 0;
1221}
1222
1223static struct Qdisc *
1224hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1225{
1226	struct hfsc_class *cl = (struct hfsc_class *)arg;
1227
1228	if (cl->level == 0)
1229		return cl->qdisc;
1230
1231	return NULL;
1232}
1233
1234static void
1235hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1236{
1237	struct hfsc_class *cl = (struct hfsc_class *)arg;
1238
1239	if (cl->qdisc->q.qlen == 0) {
1240		update_vf(cl, 0, 0);
1241		set_passive(cl);
1242	}
1243}
1244
1245static unsigned long
1246hfsc_get_class(struct Qdisc *sch, u32 classid)
1247{
1248	struct hfsc_class *cl = hfsc_find_class(classid, sch);
1249
1250	if (cl != NULL)
1251		cl->refcnt++;
1252
1253	return (unsigned long)cl;
1254}
1255
1256static void
1257hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1258{
1259	struct hfsc_class *cl = (struct hfsc_class *)arg;
1260
1261	if (--cl->refcnt == 0)
1262		hfsc_destroy_class(sch, cl);
1263}
1264
1265static unsigned long
1266hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1267{
1268	struct hfsc_class *p = (struct hfsc_class *)parent;
1269	struct hfsc_class *cl = hfsc_find_class(classid, sch);
1270
1271	if (cl != NULL) {
1272		if (p != NULL && p->level <= cl->level)
1273			return 0;
1274		cl->filter_cnt++;
1275	}
1276
1277	return (unsigned long)cl;
1278}
1279
1280static void
1281hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1282{
1283	struct hfsc_class *cl = (struct hfsc_class *)arg;
1284
1285	cl->filter_cnt--;
1286}
1287
1288static struct tcf_proto **
1289hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1290{
1291	struct hfsc_sched *q = qdisc_priv(sch);
1292	struct hfsc_class *cl = (struct hfsc_class *)arg;
1293
1294	if (cl == NULL)
1295		cl = &q->root;
1296
1297	return &cl->filter_list;
1298}
1299
1300static int
1301hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1302{
1303	struct tc_service_curve tsc;
1304
1305	tsc.m1 = sm2m(sc->sm1);
1306	tsc.d  = dx2d(sc->dx);
1307	tsc.m2 = sm2m(sc->sm2);
1308	if (nla_put(skb, attr, sizeof(tsc), &tsc))
1309		goto nla_put_failure;
1310
1311	return skb->len;
1312
1313 nla_put_failure:
1314	return -1;
1315}
1316
1317static int
1318hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1319{
1320	if ((cl->cl_flags & HFSC_RSC) &&
1321	    (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1322		goto nla_put_failure;
1323
1324	if ((cl->cl_flags & HFSC_FSC) &&
1325	    (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1326		goto nla_put_failure;
1327
1328	if ((cl->cl_flags & HFSC_USC) &&
1329	    (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1330		goto nla_put_failure;
1331
1332	return skb->len;
1333
1334 nla_put_failure:
1335	return -1;
1336}
1337
1338static int
1339hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1340		struct tcmsg *tcm)
1341{
1342	struct hfsc_class *cl = (struct hfsc_class *)arg;
1343	struct nlattr *nest;
1344
1345	tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1346					  TC_H_ROOT;
1347	tcm->tcm_handle = cl->cl_common.classid;
1348	if (cl->level == 0)
1349		tcm->tcm_info = cl->qdisc->handle;
1350
1351	nest = nla_nest_start(skb, TCA_OPTIONS);
1352	if (nest == NULL)
1353		goto nla_put_failure;
1354	if (hfsc_dump_curves(skb, cl) < 0)
1355		goto nla_put_failure;
1356	return nla_nest_end(skb, nest);
 
1357
1358 nla_put_failure:
1359	nla_nest_cancel(skb, nest);
1360	return -EMSGSIZE;
1361}
1362
1363static int
1364hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1365	struct gnet_dump *d)
1366{
1367	struct hfsc_class *cl = (struct hfsc_class *)arg;
1368	struct tc_hfsc_stats xstats;
1369
1370	cl->qstats.qlen = cl->qdisc->q.qlen;
1371	cl->qstats.backlog = cl->qdisc->qstats.backlog;
1372	xstats.level   = cl->level;
1373	xstats.period  = cl->cl_vtperiod;
1374	xstats.work    = cl->cl_total;
1375	xstats.rtwork  = cl->cl_cumul;
1376
1377	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1378	    gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1379	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
1380		return -1;
1381
1382	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1383}
1384
1385
1386
1387static void
1388hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1389{
1390	struct hfsc_sched *q = qdisc_priv(sch);
 
1391	struct hfsc_class *cl;
1392	unsigned int i;
1393
1394	if (arg->stop)
1395		return;
1396
1397	for (i = 0; i < q->clhash.hashsize; i++) {
1398		hlist_for_each_entry(cl, &q->clhash.hash[i],
1399				     cl_common.hnode) {
1400			if (arg->count < arg->skip) {
1401				arg->count++;
1402				continue;
1403			}
1404			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1405				arg->stop = 1;
1406				return;
1407			}
1408			arg->count++;
1409		}
1410	}
1411}
1412
1413static void
1414hfsc_schedule_watchdog(struct Qdisc *sch)
1415{
1416	struct hfsc_sched *q = qdisc_priv(sch);
1417	struct hfsc_class *cl;
1418	u64 next_time = 0;
1419
1420	cl = eltree_get_minel(q);
1421	if (cl)
1422		next_time = cl->cl_e;
1423	if (q->root.cl_cfmin != 0) {
1424		if (next_time == 0 || next_time > q->root.cl_cfmin)
1425			next_time = q->root.cl_cfmin;
1426	}
1427	WARN_ON(next_time == 0);
1428	qdisc_watchdog_schedule(&q->watchdog, next_time);
1429}
1430
1431static int
1432hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1433{
1434	struct hfsc_sched *q = qdisc_priv(sch);
1435	struct tc_hfsc_qopt *qopt;
1436	int err;
1437
1438	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1439		return -EINVAL;
1440	qopt = nla_data(opt);
1441
1442	q->defcls = qopt->defcls;
1443	err = qdisc_class_hash_init(&q->clhash);
1444	if (err < 0)
1445		return err;
1446	q->eligible = RB_ROOT;
1447	INIT_LIST_HEAD(&q->droplist);
1448
1449	q->root.cl_common.classid = sch->handle;
1450	q->root.refcnt  = 1;
1451	q->root.sched   = q;
1452	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1453					  sch->handle);
1454	if (q->root.qdisc == NULL)
1455		q->root.qdisc = &noop_qdisc;
1456	INIT_LIST_HEAD(&q->root.children);
1457	q->root.vt_tree = RB_ROOT;
1458	q->root.cf_tree = RB_ROOT;
1459
1460	qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1461	qdisc_class_hash_grow(sch, &q->clhash);
1462
1463	qdisc_watchdog_init(&q->watchdog, sch);
1464
1465	return 0;
1466}
1467
1468static int
1469hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1470{
1471	struct hfsc_sched *q = qdisc_priv(sch);
1472	struct tc_hfsc_qopt *qopt;
1473
1474	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1475		return -EINVAL;
1476	qopt = nla_data(opt);
1477
1478	sch_tree_lock(sch);
1479	q->defcls = qopt->defcls;
1480	sch_tree_unlock(sch);
1481
1482	return 0;
1483}
1484
1485static void
1486hfsc_reset_class(struct hfsc_class *cl)
1487{
1488	cl->cl_total        = 0;
1489	cl->cl_cumul        = 0;
1490	cl->cl_d            = 0;
1491	cl->cl_e            = 0;
1492	cl->cl_vt           = 0;
1493	cl->cl_vtadj        = 0;
1494	cl->cl_vtoff        = 0;
1495	cl->cl_cvtmin       = 0;
1496	cl->cl_cvtmax       = 0;
1497	cl->cl_cvtoff       = 0;
1498	cl->cl_pcvtoff      = 0;
1499	cl->cl_vtperiod     = 0;
1500	cl->cl_parentperiod = 0;
1501	cl->cl_f            = 0;
1502	cl->cl_myf          = 0;
1503	cl->cl_myfadj       = 0;
1504	cl->cl_cfmin        = 0;
1505	cl->cl_nactive      = 0;
1506
1507	cl->vt_tree = RB_ROOT;
1508	cl->cf_tree = RB_ROOT;
1509	qdisc_reset(cl->qdisc);
1510
1511	if (cl->cl_flags & HFSC_RSC)
1512		rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1513	if (cl->cl_flags & HFSC_FSC)
1514		rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1515	if (cl->cl_flags & HFSC_USC)
1516		rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1517}
1518
1519static void
1520hfsc_reset_qdisc(struct Qdisc *sch)
1521{
1522	struct hfsc_sched *q = qdisc_priv(sch);
1523	struct hfsc_class *cl;
 
1524	unsigned int i;
1525
1526	for (i = 0; i < q->clhash.hashsize; i++) {
1527		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1528			hfsc_reset_class(cl);
1529	}
1530	q->eligible = RB_ROOT;
1531	INIT_LIST_HEAD(&q->droplist);
1532	qdisc_watchdog_cancel(&q->watchdog);
1533	sch->q.qlen = 0;
1534}
1535
1536static void
1537hfsc_destroy_qdisc(struct Qdisc *sch)
1538{
1539	struct hfsc_sched *q = qdisc_priv(sch);
1540	struct hlist_node *next;
1541	struct hfsc_class *cl;
1542	unsigned int i;
1543
1544	for (i = 0; i < q->clhash.hashsize; i++) {
1545		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1546			tcf_destroy_chain(&cl->filter_list);
1547	}
1548	for (i = 0; i < q->clhash.hashsize; i++) {
1549		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1550					  cl_common.hnode)
1551			hfsc_destroy_class(sch, cl);
1552	}
1553	qdisc_class_hash_destroy(&q->clhash);
1554	qdisc_watchdog_cancel(&q->watchdog);
1555}
1556
1557static int
1558hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1559{
1560	struct hfsc_sched *q = qdisc_priv(sch);
1561	unsigned char *b = skb_tail_pointer(skb);
1562	struct tc_hfsc_qopt qopt;
1563	struct hfsc_class *cl;
1564	unsigned int i;
1565
1566	sch->qstats.backlog = 0;
1567	for (i = 0; i < q->clhash.hashsize; i++) {
1568		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1569			sch->qstats.backlog += cl->qdisc->qstats.backlog;
1570	}
1571
1572	qopt.defcls = q->defcls;
1573	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1574		goto nla_put_failure;
1575	return skb->len;
1576
1577 nla_put_failure:
1578	nlmsg_trim(skb, b);
1579	return -1;
1580}
1581
1582static int
1583hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1584{
1585	struct hfsc_class *cl;
1586	int uninitialized_var(err);
1587
1588	cl = hfsc_classify(skb, sch, &err);
1589	if (cl == NULL) {
1590		if (err & __NET_XMIT_BYPASS)
1591			sch->qstats.drops++;
1592		kfree_skb(skb);
1593		return err;
1594	}
1595
1596	err = qdisc_enqueue(skb, cl->qdisc);
1597	if (unlikely(err != NET_XMIT_SUCCESS)) {
1598		if (net_xmit_drop_count(err)) {
1599			cl->qstats.drops++;
1600			sch->qstats.drops++;
1601		}
1602		return err;
1603	}
1604
1605	if (cl->qdisc->q.qlen == 1)
1606		set_active(cl, qdisc_pkt_len(skb));
1607
 
1608	sch->q.qlen++;
1609
1610	return NET_XMIT_SUCCESS;
1611}
1612
1613static struct sk_buff *
1614hfsc_dequeue(struct Qdisc *sch)
1615{
1616	struct hfsc_sched *q = qdisc_priv(sch);
1617	struct hfsc_class *cl;
1618	struct sk_buff *skb;
1619	u64 cur_time;
1620	unsigned int next_len;
1621	int realtime = 0;
1622
1623	if (sch->q.qlen == 0)
1624		return NULL;
1625
1626	cur_time = psched_get_time();
1627
1628	/*
1629	 * if there are eligible classes, use real-time criteria.
1630	 * find the class with the minimum deadline among
1631	 * the eligible classes.
1632	 */
1633	cl = eltree_get_mindl(q, cur_time);
1634	if (cl) {
1635		realtime = 1;
1636	} else {
1637		/*
1638		 * use link-sharing criteria
1639		 * get the class with the minimum vt in the hierarchy
1640		 */
1641		cl = vttree_get_minvt(&q->root, cur_time);
1642		if (cl == NULL) {
1643			sch->qstats.overlimits++;
1644			hfsc_schedule_watchdog(sch);
1645			return NULL;
1646		}
1647	}
1648
1649	skb = qdisc_dequeue_peeked(cl->qdisc);
1650	if (skb == NULL) {
1651		qdisc_warn_nonwc("HFSC", cl->qdisc);
1652		return NULL;
1653	}
1654
1655	bstats_update(&cl->bstats, skb);
1656	update_vf(cl, qdisc_pkt_len(skb), cur_time);
1657	if (realtime)
1658		cl->cl_cumul += qdisc_pkt_len(skb);
1659
1660	if (cl->qdisc->q.qlen != 0) {
1661		if (cl->cl_flags & HFSC_RSC) {
1662			/* update ed */
1663			next_len = qdisc_peek_len(cl->qdisc);
1664			if (realtime)
1665				update_ed(cl, next_len);
1666			else
1667				update_d(cl, next_len);
1668		}
1669	} else {
1670		/* the class becomes passive */
1671		set_passive(cl);
1672	}
1673
1674	qdisc_unthrottled(sch);
1675	qdisc_bstats_update(sch, skb);
1676	sch->q.qlen--;
1677
1678	return skb;
1679}
1680
1681static unsigned int
1682hfsc_drop(struct Qdisc *sch)
1683{
1684	struct hfsc_sched *q = qdisc_priv(sch);
1685	struct hfsc_class *cl;
1686	unsigned int len;
1687
1688	list_for_each_entry(cl, &q->droplist, dlist) {
1689		if (cl->qdisc->ops->drop != NULL &&
1690		    (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1691			if (cl->qdisc->q.qlen == 0) {
1692				update_vf(cl, 0, 0);
1693				set_passive(cl);
1694			} else {
1695				list_move_tail(&cl->dlist, &q->droplist);
1696			}
1697			cl->qstats.drops++;
1698			sch->qstats.drops++;
1699			sch->q.qlen--;
1700			return len;
1701		}
1702	}
1703	return 0;
1704}
1705
1706static const struct Qdisc_class_ops hfsc_class_ops = {
1707	.change		= hfsc_change_class,
1708	.delete		= hfsc_delete_class,
1709	.graft		= hfsc_graft_class,
1710	.leaf		= hfsc_class_leaf,
1711	.qlen_notify	= hfsc_qlen_notify,
1712	.get		= hfsc_get_class,
1713	.put		= hfsc_put_class,
1714	.bind_tcf	= hfsc_bind_tcf,
1715	.unbind_tcf	= hfsc_unbind_tcf,
1716	.tcf_chain	= hfsc_tcf_chain,
1717	.dump		= hfsc_dump_class,
1718	.dump_stats	= hfsc_dump_class_stats,
1719	.walk		= hfsc_walk
1720};
1721
1722static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1723	.id		= "hfsc",
1724	.init		= hfsc_init_qdisc,
1725	.change		= hfsc_change_qdisc,
1726	.reset		= hfsc_reset_qdisc,
1727	.destroy	= hfsc_destroy_qdisc,
1728	.dump		= hfsc_dump_qdisc,
1729	.enqueue	= hfsc_enqueue,
1730	.dequeue	= hfsc_dequeue,
1731	.peek		= qdisc_peek_dequeued,
1732	.drop		= hfsc_drop,
1733	.cl_ops		= &hfsc_class_ops,
1734	.priv_size	= sizeof(struct hfsc_sched),
1735	.owner		= THIS_MODULE
1736};
1737
1738static int __init
1739hfsc_init(void)
1740{
1741	return register_qdisc(&hfsc_qdisc_ops);
1742}
1743
1744static void __exit
1745hfsc_cleanup(void)
1746{
1747	unregister_qdisc(&hfsc_qdisc_ops);
1748}
1749
1750MODULE_LICENSE("GPL");
1751module_init(hfsc_init);
1752module_exit(hfsc_cleanup);