Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.1
 
   1/*
   2 * net/sched/sch_qfq.c         Quick Fair Queueing Scheduler.
   3 *
   4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * version 2 as published by the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/bitops.h>
  14#include <linux/errno.h>
  15#include <linux/netdevice.h>
  16#include <linux/pkt_sched.h>
  17#include <net/sch_generic.h>
  18#include <net/pkt_sched.h>
  19#include <net/pkt_cls.h>
  20
  21
  22/*  Quick Fair Queueing
  23    ===================
  24
  25    Sources:
  26
  27    Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
 
 
 
 
 
 
  28    Packet Scheduling with Tight Bandwidth Distribution Guarantees."
  29
  30    See also:
  31    http://retis.sssup.it/~fabio/linux/qfq/
  32 */
  33
  34/*
  35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36  Virtual time computations.
  37
  38  S, F and V are all computed in fixed point arithmetic with
  39  FRAC_BITS decimal bits.
  40
  41  QFQ_MAX_INDEX is the maximum index allowed for a group. We need
  42	one bit per index.
  43  QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
  44
  45  The layout of the bits is as below:
  46
  47                   [ MTU_SHIFT ][      FRAC_BITS    ]
  48                   [ MAX_INDEX    ][ MIN_SLOT_SHIFT ]
  49				 ^.__grp->index = 0
  50				 *.__grp->slot_shift
  51
  52  where MIN_SLOT_SHIFT is derived by difference from the others.
  53
  54  The max group index corresponds to Lmax/w_min, where
  55  Lmax=1<<MTU_SHIFT, w_min = 1 .
  56  From this, and knowing how many groups (MAX_INDEX) we want,
  57  we can derive the shift corresponding to each group.
  58
  59  Because we often need to compute
  60	F = S + len/w_i  and V = V + len/wsum
  61  instead of storing w_i store the value
  62	inv_w = (1<<FRAC_BITS)/w_i
  63  so we can do F = S + len * inv_w * wsum.
  64  We use W_TOT in the formulas so we can easily move between
  65  static and adaptive weight sum.
  66
  67  The per-scheduler-instance data contain all the data structures
  68  for the scheduler: bitmaps and bucket lists.
  69
  70 */
  71
  72/*
  73 * Maximum number of consecutive slots occupied by backlogged classes
  74 * inside a group.
  75 */
  76#define QFQ_MAX_SLOTS	32
  77
  78/*
  79 * Shifts used for class<->group mapping.  We allow class weights that are
  80 * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
  81 * group with the smallest index that can support the L_i / r_i configured
  82 * for the class.
  83 *
  84 * grp->index is the index of the group; and grp->slot_shift
  85 * is the shift for the corresponding (scaled) sigma_i.
  86 */
  87#define QFQ_MAX_INDEX		19
  88#define QFQ_MAX_WSHIFT		16
  89
  90#define	QFQ_MAX_WEIGHT		(1<<QFQ_MAX_WSHIFT)
  91#define QFQ_MAX_WSUM		(2*QFQ_MAX_WEIGHT)
  92
  93#define FRAC_BITS		30	/* fixed point arithmetic */
  94#define ONE_FP			(1UL << FRAC_BITS)
  95#define IWSUM			(ONE_FP/QFQ_MAX_WSUM)
  96
  97#define QFQ_MTU_SHIFT		11
  98#define QFQ_MIN_SLOT_SHIFT	(FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
 
 
 
  99
 100/*
 101 * Possible group states.  These values are used as indexes for the bitmaps
 102 * array of struct qfq_queue.
 103 */
 104enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
 105
 106struct qfq_group;
 107
 
 
 108struct qfq_class {
 109	struct Qdisc_class_common common;
 110
 111	unsigned int refcnt;
 112	unsigned int filter_cnt;
 113
 114	struct gnet_stats_basic_packed bstats;
 115	struct gnet_stats_queue qstats;
 116	struct gnet_stats_rate_est rate_est;
 117	struct Qdisc *qdisc;
 
 
 
 
 118
 
 119	struct hlist_node next;	/* Link for the slot list. */
 120	u64 S, F;		/* flow timestamps (exact) */
 121
 122	/* group we belong to. In principle we would need the index,
 123	 * which is log_2(lmax/weight), but we never reference it
 124	 * directly, only the group.
 125	 */
 126	struct qfq_group *grp;
 127
 128	/* these are copied from the flowset. */
 129	u32	inv_w;		/* ONE_FP/weight */
 130	u32	lmax;		/* Max packet size for this flow. */
 
 
 
 
 
 
 
 
 
 
 131};
 132
 133struct qfq_group {
 134	u64 S, F;			/* group timestamps (approx). */
 135	unsigned int slot_shift;	/* Slot shift. */
 136	unsigned int index;		/* Group index. */
 137	unsigned int front;		/* Index of the front slot. */
 138	unsigned long full_slots;	/* non-empty slots */
 139
 140	/* Array of RR lists of active classes. */
 141	struct hlist_head slots[QFQ_MAX_SLOTS];
 142};
 143
 144struct qfq_sched {
 145	struct tcf_proto *filter_list;
 
 146	struct Qdisc_class_hash clhash;
 147
 148	u64		V;		/* Precise virtual time. */
 149	u32		wsum;		/* weight sum */
 
 
 150
 151	unsigned long bitmaps[QFQ_MAX_STATE];	    /* Group bitmaps. */
 152	struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
 
 
 
 
 153};
 154
 
 
 
 
 
 
 
 
 
 155static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
 156{
 157	struct qfq_sched *q = qdisc_priv(sch);
 158	struct Qdisc_class_common *clc;
 159
 160	clc = qdisc_class_find(&q->clhash, classid);
 161	if (clc == NULL)
 162		return NULL;
 163	return container_of(clc, struct qfq_class, common);
 164}
 165
 166static void qfq_purge_queue(struct qfq_class *cl)
 167{
 168	unsigned int len = cl->qdisc->q.qlen;
 169
 170	qdisc_reset(cl->qdisc);
 171	qdisc_tree_decrease_qlen(cl->qdisc, len);
 172}
 173
 174static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
 175	[TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
 176	[TCA_QFQ_LMAX] = { .type = NLA_U32 },
 177};
 178
 179/*
 180 * Calculate a flow index, given its weight and maximum packet length.
 181 * index = log_2(maxlen/weight) but we need to apply the scaling.
 182 * This is used only once at flow creation.
 183 */
 184static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
 185{
 186	u64 slot_size = (u64)maxlen * inv_w;
 187	unsigned long size_map;
 188	int index = 0;
 189
 190	size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
 191	if (!size_map)
 192		goto out;
 193
 194	index = __fls(size_map) + 1;	/* basically a log_2 */
 195	index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
 196
 197	if (index < 0)
 198		index = 0;
 199out:
 200	pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
 201		 (unsigned long) ONE_FP/inv_w, maxlen, index);
 202
 203	return index;
 204}
 205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 207			    struct nlattr **tca, unsigned long *arg)
 
 208{
 209	struct qfq_sched *q = qdisc_priv(sch);
 210	struct qfq_class *cl = (struct qfq_class *)*arg;
 
 211	struct nlattr *tb[TCA_QFQ_MAX + 1];
 
 212	u32 weight, lmax, inv_w;
 213	int i, err;
 
 214
 215	if (tca[TCA_OPTIONS] == NULL) {
 216		pr_notice("qfq: no options\n");
 217		return -EINVAL;
 218	}
 219
 220	err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy);
 
 221	if (err < 0)
 222		return err;
 223
 224	if (tb[TCA_QFQ_WEIGHT]) {
 225		weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
 226		if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
 227			pr_notice("qfq: invalid weight %u\n", weight);
 
 
 
 
 
 
 228			return -EINVAL;
 229		}
 230	} else
 231		weight = 1;
 232
 233	inv_w = ONE_FP / weight;
 234	weight = ONE_FP / inv_w;
 235	if (q->wsum + weight > QFQ_MAX_WSUM) {
 236		pr_notice("qfq: total weight out of range (%u + %u)\n",
 237			  weight, q->wsum);
 
 
 
 
 
 
 
 
 
 238		return -EINVAL;
 239	}
 240
 241	if (tb[TCA_QFQ_LMAX]) {
 242		lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
 243		if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
 244			pr_notice("qfq: invalid max length %u\n", lmax);
 245			return -EINVAL;
 246		}
 247	} else
 248		lmax = 1UL << QFQ_MTU_SHIFT;
 249
 250	if (cl != NULL) {
 251		if (tca[TCA_RATE]) {
 252			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
 253						    qdisc_root_sleeping_lock(sch),
 
 
 254						    tca[TCA_RATE]);
 255			if (err)
 256				return err;
 257		}
 258
 259		sch_tree_lock(sch);
 260		if (tb[TCA_QFQ_WEIGHT]) {
 261			q->wsum = weight - ONE_FP / cl->inv_w;
 262			cl->inv_w = inv_w;
 263		}
 264		sch_tree_unlock(sch);
 265
 266		return 0;
 267	}
 268
 
 269	cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
 270	if (cl == NULL)
 271		return -ENOBUFS;
 272
 273	cl->refcnt = 1;
 274	cl->common.classid = classid;
 275	cl->lmax = lmax;
 276	cl->inv_w = inv_w;
 277	i = qfq_calc_index(cl->inv_w, cl->lmax);
 278
 279	cl->grp = &q->groups[i];
 280	q->wsum += weight;
 281
 282	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
 283				      &pfifo_qdisc_ops, classid);
 284	if (cl->qdisc == NULL)
 285		cl->qdisc = &noop_qdisc;
 286
 287	if (tca[TCA_RATE]) {
 288		err = gen_new_estimator(&cl->bstats, &cl->rate_est,
 289					qdisc_root_sleeping_lock(sch),
 
 
 290					tca[TCA_RATE]);
 291		if (err) {
 292			qdisc_destroy(cl->qdisc);
 293			kfree(cl);
 294			return err;
 295		}
 296	}
 297
 
 
 
 
 298	sch_tree_lock(sch);
 299	qdisc_class_hash_insert(&q->clhash, &cl->common);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300	sch_tree_unlock(sch);
 301
 302	qdisc_class_hash_grow(sch, &q->clhash);
 303
 304	*arg = (unsigned long)cl;
 305	return 0;
 
 
 
 
 
 306}
 307
 308static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
 309{
 310	struct qfq_sched *q = qdisc_priv(sch);
 311
 312	if (cl->inv_w) {
 313		q->wsum -= ONE_FP / cl->inv_w;
 314		cl->inv_w = 0;
 315	}
 316
 317	gen_kill_estimator(&cl->bstats, &cl->rate_est);
 318	qdisc_destroy(cl->qdisc);
 319	kfree(cl);
 320}
 321
 322static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
 
 323{
 324	struct qfq_sched *q = qdisc_priv(sch);
 325	struct qfq_class *cl = (struct qfq_class *)arg;
 326
 327	if (cl->filter_cnt > 0)
 
 328		return -EBUSY;
 
 329
 330	sch_tree_lock(sch);
 331
 332	qfq_purge_queue(cl);
 333	qdisc_class_hash_remove(&q->clhash, &cl->common);
 334
 335	BUG_ON(--cl->refcnt == 0);
 336	/*
 337	 * This shouldn't happen: we "hold" one cops->get() when called
 338	 * from tc_ctl_tclass; the destroy method is done from cops->put().
 339	 */
 340
 341	sch_tree_unlock(sch);
 342	return 0;
 343}
 344
 345static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid)
 346{
 347	struct qfq_class *cl = qfq_find_class(sch, classid);
 348
 349	if (cl != NULL)
 350		cl->refcnt++;
 351
 352	return (unsigned long)cl;
 353}
 354
 355static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
 356{
 357	struct qfq_class *cl = (struct qfq_class *)arg;
 358
 359	if (--cl->refcnt == 0)
 360		qfq_destroy_class(sch, cl);
 361}
 362
 363static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
 
 364{
 365	struct qfq_sched *q = qdisc_priv(sch);
 366
 367	if (cl)
 368		return NULL;
 369
 370	return &q->filter_list;
 371}
 372
 373static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
 374				  u32 classid)
 375{
 376	struct qfq_class *cl = qfq_find_class(sch, classid);
 377
 378	if (cl != NULL)
 379		cl->filter_cnt++;
 380
 381	return (unsigned long)cl;
 382}
 383
 384static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 385{
 386	struct qfq_class *cl = (struct qfq_class *)arg;
 387
 388	cl->filter_cnt--;
 389}
 390
 391static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
 392			   struct Qdisc *new, struct Qdisc **old)
 
 393{
 394	struct qfq_class *cl = (struct qfq_class *)arg;
 395
 396	if (new == NULL) {
 397		new = qdisc_create_dflt(sch->dev_queue,
 398					&pfifo_qdisc_ops, cl->common.classid);
 399		if (new == NULL)
 400			new = &noop_qdisc;
 401	}
 402
 403	sch_tree_lock(sch);
 404	qfq_purge_queue(cl);
 405	*old = cl->qdisc;
 406	cl->qdisc = new;
 407	sch_tree_unlock(sch);
 408	return 0;
 409}
 410
 411static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
 412{
 413	struct qfq_class *cl = (struct qfq_class *)arg;
 414
 415	return cl->qdisc;
 416}
 417
 418static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
 419			  struct sk_buff *skb, struct tcmsg *tcm)
 420{
 421	struct qfq_class *cl = (struct qfq_class *)arg;
 422	struct nlattr *nest;
 423
 424	tcm->tcm_parent	= TC_H_ROOT;
 425	tcm->tcm_handle	= cl->common.classid;
 426	tcm->tcm_info	= cl->qdisc->handle;
 427
 428	nest = nla_nest_start(skb, TCA_OPTIONS);
 429	if (nest == NULL)
 430		goto nla_put_failure;
 431	NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
 432	NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
 
 433	return nla_nest_end(skb, nest);
 434
 435nla_put_failure:
 436	nla_nest_cancel(skb, nest);
 437	return -EMSGSIZE;
 438}
 439
 440static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 441				struct gnet_dump *d)
 442{
 443	struct qfq_class *cl = (struct qfq_class *)arg;
 444	struct tc_qfq_stats xstats;
 445
 446	memset(&xstats, 0, sizeof(xstats));
 447	cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
 448
 449	xstats.weight = ONE_FP/cl->inv_w;
 450	xstats.lmax = cl->lmax;
 451
 452	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
 453	    gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
 454	    gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
 455		return -1;
 456
 457	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
 458}
 459
 460static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 461{
 462	struct qfq_sched *q = qdisc_priv(sch);
 463	struct qfq_class *cl;
 464	struct hlist_node *n;
 465	unsigned int i;
 466
 467	if (arg->stop)
 468		return;
 469
 470	for (i = 0; i < q->clhash.hashsize; i++) {
 471		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
 472			if (arg->count < arg->skip) {
 473				arg->count++;
 474				continue;
 475			}
 476			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
 477				arg->stop = 1;
 478				return;
 479			}
 480			arg->count++;
 481		}
 482	}
 483}
 484
 485static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
 486				      int *qerr)
 487{
 488	struct qfq_sched *q = qdisc_priv(sch);
 489	struct qfq_class *cl;
 490	struct tcf_result res;
 
 491	int result;
 492
 493	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
 494		pr_debug("qfq_classify: found %d\n", skb->priority);
 495		cl = qfq_find_class(sch, skb->priority);
 496		if (cl != NULL)
 497			return cl;
 498	}
 499
 500	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 501	result = tc_classify(skb, q->filter_list, &res);
 
 502	if (result >= 0) {
 503#ifdef CONFIG_NET_CLS_ACT
 504		switch (result) {
 505		case TC_ACT_QUEUED:
 506		case TC_ACT_STOLEN:
 
 507			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
 508		case TC_ACT_SHOT:
 509			return NULL;
 510		}
 511#endif
 512		cl = (struct qfq_class *)res.class;
 513		if (cl == NULL)
 514			cl = qfq_find_class(sch, res.classid);
 515		return cl;
 516	}
 517
 518	return NULL;
 519}
 520
 521/* Generic comparison function, handling wraparound. */
 522static inline int qfq_gt(u64 a, u64 b)
 523{
 524	return (s64)(a - b) > 0;
 525}
 526
 527/* Round a precise timestamp to its slotted value. */
 528static inline u64 qfq_round_down(u64 ts, unsigned int shift)
 529{
 530	return ts & ~((1ULL << shift) - 1);
 531}
 532
 533/* return the pointer to the group with lowest index in the bitmap */
 534static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
 535					unsigned long bitmap)
 536{
 537	int index = __ffs(bitmap);
 538	return &q->groups[index];
 539}
 540/* Calculate a mask to mimic what would be ffs_from(). */
 541static inline unsigned long mask_from(unsigned long bitmap, int from)
 542{
 543	return bitmap & ~((1UL << from) - 1);
 544}
 545
 546/*
 547 * The state computation relies on ER=0, IR=1, EB=2, IB=3
 548 * First compute eligibility comparing grp->S, q->V,
 549 * then check if someone is blocking us and possibly add EB
 550 */
 551static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
 552{
 553	/* if S > V we are not eligible */
 554	unsigned int state = qfq_gt(grp->S, q->V);
 555	unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
 556	struct qfq_group *next;
 557
 558	if (mask) {
 559		next = qfq_ffs(q, mask);
 560		if (qfq_gt(grp->F, next->F))
 561			state |= EB;
 562	}
 563
 564	return state;
 565}
 566
 567
 568/*
 569 * In principle
 570 *	q->bitmaps[dst] |= q->bitmaps[src] & mask;
 571 *	q->bitmaps[src] &= ~mask;
 572 * but we should make sure that src != dst
 573 */
 574static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
 575				   int src, int dst)
 576{
 577	q->bitmaps[dst] |= q->bitmaps[src] & mask;
 578	q->bitmaps[src] &= ~mask;
 579}
 580
 581static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
 582{
 583	unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
 584	struct qfq_group *next;
 585
 586	if (mask) {
 587		next = qfq_ffs(q, mask);
 588		if (!qfq_gt(next->F, old_F))
 589			return;
 590	}
 591
 592	mask = (1UL << index) - 1;
 593	qfq_move_groups(q, mask, EB, ER);
 594	qfq_move_groups(q, mask, IB, IR);
 595}
 596
 597/*
 598 * perhaps
 599 *
 600	old_V ^= q->V;
 601	old_V >>= QFQ_MIN_SLOT_SHIFT;
 602	if (old_V) {
 603		...
 604	}
 605 *
 606 */
 607static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
 608{
 609	unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
 610	unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
 611
 612	if (vslot != old_vslot) {
 613		unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
 
 
 
 
 
 
 
 614		qfq_move_groups(q, mask, IR, ER);
 615		qfq_move_groups(q, mask, IB, EB);
 616	}
 617}
 618
 619
 620/*
 621 * XXX we should make sure that slot becomes less than 32.
 622 * This is guaranteed by the input values.
 623 * roundedS is always cl->S rounded on grp->slot_shift bits.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 624 */
 625static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
 626			    u64 roundedS)
 627{
 628	u64 slot = (roundedS - grp->S) >> grp->slot_shift;
 629	unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
 630
 631	hlist_add_head(&cl->next, &grp->slots[i]);
 
 
 
 
 
 
 
 
 
 
 632	__set_bit(slot, &grp->full_slots);
 633}
 634
 635/* Maybe introduce hlist_first_entry?? */
 636static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
 637{
 638	return hlist_entry(grp->slots[grp->front].first,
 639			   struct qfq_class, next);
 640}
 641
 642/*
 643 * remove the entry from the slot
 644 */
 645static void qfq_front_slot_remove(struct qfq_group *grp)
 646{
 647	struct qfq_class *cl = qfq_slot_head(grp);
 648
 649	BUG_ON(!cl);
 650	hlist_del(&cl->next);
 651	if (hlist_empty(&grp->slots[grp->front]))
 652		__clear_bit(0, &grp->full_slots);
 653}
 654
 655/*
 656 * Returns the first full queue in a group. As a side effect,
 657 * adjust the bucket list so the first non-empty bucket is at
 658 * position 0 in full_slots.
 659 */
 660static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
 661{
 662	unsigned int i;
 663
 664	pr_debug("qfq slot_scan: grp %u full %#lx\n",
 665		 grp->index, grp->full_slots);
 666
 667	if (grp->full_slots == 0)
 668		return NULL;
 669
 670	i = __ffs(grp->full_slots);  /* zero based */
 671	if (i > 0) {
 672		grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
 673		grp->full_slots >>= i;
 674	}
 675
 676	return qfq_slot_head(grp);
 677}
 678
 679/*
 680 * adjust the bucket list. When the start time of a group decreases,
 681 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
 682 * move the objects. The mask of occupied slots must be shifted
 683 * because we use ffs() to find the first non-empty slot.
 684 * This covers decreases in the group's start time, but what about
 685 * increases of the start time ?
 686 * Here too we should make sure that i is less than 32
 687 */
 688static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
 689{
 690	unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
 691
 692	grp->full_slots <<= i;
 693	grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
 694}
 695
 696static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
 697{
 698	struct qfq_group *grp;
 699	unsigned long ineligible;
 700
 701	ineligible = q->bitmaps[IR] | q->bitmaps[IB];
 702	if (ineligible) {
 703		if (!q->bitmaps[ER]) {
 704			grp = qfq_ffs(q, ineligible);
 705			if (qfq_gt(grp->S, q->V))
 706				q->V = grp->S;
 707		}
 708		qfq_make_eligible(q, old_V);
 709	}
 710}
 711
 712/* What is length of next packet in queue (0 if queue is empty) */
 713static unsigned int qdisc_peek_len(struct Qdisc *sch)
 714{
 715	struct sk_buff *skb;
 716
 717	skb = sch->ops->peek(sch);
 718	return skb ? qdisc_pkt_len(skb) : 0;
 719}
 720
 721/*
 722 * Updates the class, returns true if also the group needs to be updated.
 723 */
 724static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
 725{
 726	unsigned int len = qdisc_peek_len(cl->qdisc);
 727
 728	cl->S = cl->F;
 729	if (!len)
 730		qfq_front_slot_remove(grp);	/* queue is empty */
 731	else {
 732		u64 roundedS;
 733
 734		cl->F = cl->S + (u64)len * cl->inv_w;
 735		roundedS = qfq_round_down(cl->S, grp->slot_shift);
 736		if (roundedS == grp->S)
 737			return false;
 738
 739		qfq_front_slot_remove(grp);
 740		qfq_slot_insert(grp, cl, roundedS);
 
 
 
 741	}
 742
 743	return true;
 744}
 745
 746static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
 
 
 747{
 748	struct qfq_sched *q = qdisc_priv(sch);
 749	struct qfq_group *grp;
 750	struct qfq_class *cl;
 751	struct sk_buff *skb;
 752	unsigned int len;
 753	u64 old_V;
 754
 755	if (!q->bitmaps[ER])
 756		return NULL;
 757
 758	grp = qfq_ffs(q, q->bitmaps[ER]);
 759
 760	cl = qfq_slot_head(grp);
 761	skb = qdisc_dequeue_peeked(cl->qdisc);
 762	if (!skb) {
 763		WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
 764		return NULL;
 765	}
 766
 767	sch->q.qlen--;
 768	qdisc_bstats_update(sch, skb);
 769
 770	old_V = q->V;
 771	len = qdisc_pkt_len(skb);
 772	q->V += (u64)len * IWSUM;
 773	pr_debug("qfq dequeue: len %u F %lld now %lld\n",
 774		 len, (unsigned long long) cl->F, (unsigned long long) q->V);
 775
 776	if (qfq_update_class(grp, cl)) {
 777		u64 old_F = grp->F;
 778
 779		cl = qfq_slot_scan(grp);
 780		if (!cl)
 781			__clear_bit(grp->index, &q->bitmaps[ER]);
 782		else {
 783			u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
 784			unsigned int s;
 785
 786			if (grp->S == roundedS)
 787				goto skip_unblock;
 788			grp->S = roundedS;
 789			grp->F = roundedS + (2ULL << grp->slot_shift);
 790			__clear_bit(grp->index, &q->bitmaps[ER]);
 791			s = qfq_calc_state(q, grp);
 792			__set_bit(grp->index, &q->bitmaps[s]);
 793		}
 794
 795		qfq_unblock_groups(q, grp->index, old_F);
 796	}
 797
 798skip_unblock:
 799	qfq_update_eligible(q, old_V);
 
 
 
 
 
 
 
 
 800
 801	return skb;
 802}
 803
 804/*
 805 * Assign a reasonable start time for a new flow k in group i.
 806 * Admissible values for \hat(F) are multiples of \sigma_i
 807 * no greater than V+\sigma_i . Larger values mean that
 808 * we had a wraparound so we consider the timestamp to be stale.
 809 *
 810 * If F is not stale and F >= V then we set S = F.
 811 * Otherwise we should assign S = V, but this may violate
 812 * the ordering in ER. So, if we have groups in ER, set S to
 813 * the F_j of the first group j which would be blocking us.
 814 * We are guaranteed not to move S backward because
 815 * otherwise our group i would still be blocked.
 816 */
 817static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
 818{
 819	unsigned long mask;
 820	uint32_t limit, roundedF;
 821	int slot_shift = cl->grp->slot_shift;
 822
 823	roundedF = qfq_round_down(cl->F, slot_shift);
 824	limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
 825
 826	if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
 827		/* timestamp was stale */
 828		mask = mask_from(q->bitmaps[ER], cl->grp->index);
 829		if (mask) {
 830			struct qfq_group *next = qfq_ffs(q, mask);
 831			if (qfq_gt(roundedF, next->F)) {
 832				cl->S = next->F;
 
 
 
 833				return;
 834			}
 835		}
 836		cl->S = q->V;
 837	} else  /* timestamp is not stale */
 838		cl->S = cl->F;
 839}
 840
 841static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 842{
 843	struct qfq_sched *q = qdisc_priv(sch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 844	struct qfq_group *grp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 845	struct qfq_class *cl;
 846	int err;
 847	u64 roundedS;
 848	int s;
 849
 850	cl = qfq_classify(skb, sch, &err);
 851	if (cl == NULL) {
 852		if (err & __NET_XMIT_BYPASS)
 853			sch->qstats.drops++;
 854		kfree_skb(skb);
 855		return err;
 856	}
 857	pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
 858
 859	err = qdisc_enqueue(skb, cl->qdisc);
 
 
 
 
 
 
 
 
 
 
 
 
 860	if (unlikely(err != NET_XMIT_SUCCESS)) {
 861		pr_debug("qfq_enqueue: enqueue failed %d\n", err);
 862		if (net_xmit_drop_count(err)) {
 863			cl->qstats.drops++;
 864			sch->qstats.drops++;
 865		}
 866		return err;
 867	}
 868
 869	bstats_update(&cl->bstats, skb);
 
 870	++sch->q.qlen;
 871
 872	/* If the new skb is not the head of queue, then done here. */
 873	if (cl->qdisc->q.qlen != 1)
 
 
 
 
 
 
 874		return err;
 
 
 
 
 
 
 
 
 
 875
 876	/* If reach this point, queue q was idle */
 877	grp = cl->grp;
 878	qfq_update_start(q, cl);
 879
 880	/* compute new finish time and rounded start. */
 881	cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
 882	roundedS = qfq_round_down(cl->S, grp->slot_shift);
 
 
 
 
 
 
 
 
 883
 884	/*
 885	 * insert cl in the correct bucket.
 886	 * If cl->S >= grp->S we don't need to adjust the
 887	 * bucket list and simply go to the insertion phase.
 888	 * Otherwise grp->S is decreasing, we must make room
 889	 * in the bucket list, and also recompute the group state.
 890	 * Finally, if there were no flows in this group and nobody
 891	 * was in ER make sure to adjust V.
 892	 */
 893	if (grp->full_slots) {
 894		if (!qfq_gt(grp->S, cl->S))
 895			goto skip_update;
 896
 897		/* create a slot for this cl->S */
 898		qfq_slot_rotate(grp, roundedS);
 899		/* group was surely ineligible, remove */
 900		__clear_bit(grp->index, &q->bitmaps[IR]);
 901		__clear_bit(grp->index, &q->bitmaps[IB]);
 902	} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
 
 903		q->V = roundedS;
 904
 905	grp->S = roundedS;
 906	grp->F = roundedS + (2ULL << grp->slot_shift);
 907	s = qfq_calc_state(q, grp);
 908	__set_bit(grp->index, &q->bitmaps[s]);
 909
 910	pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
 911		 s, q->bitmaps[s],
 912		 (unsigned long long) cl->S,
 913		 (unsigned long long) cl->F,
 914		 (unsigned long long) q->V);
 915
 916skip_update:
 917	qfq_slot_insert(grp, cl, roundedS);
 918
 919	return err;
 920}
 921
 922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 923static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
 924			    struct qfq_class *cl)
 925{
 926	unsigned int i, offset;
 927	u64 roundedS;
 928
 929	roundedS = qfq_round_down(cl->S, grp->slot_shift);
 930	offset = (roundedS - grp->S) >> grp->slot_shift;
 
 931	i = (grp->front + offset) % QFQ_MAX_SLOTS;
 932
 933	hlist_del(&cl->next);
 934	if (hlist_empty(&grp->slots[i]))
 935		__clear_bit(offset, &grp->full_slots);
 936}
 937
 938/*
 939 * called to forcibly destroy a queue.
 940 * If the queue is not in the front bucket, or if it has
 941 * other queues in the front bucket, we can simply remove
 942 * the queue with no other side effects.
 943 * Otherwise we must propagate the event up.
 944 */
 945static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
 946{
 947	struct qfq_group *grp = cl->grp;
 948	unsigned long mask;
 949	u64 roundedS;
 950	int s;
 951
 952	cl->F = cl->S;
 953	qfq_slot_remove(q, grp, cl);
 
 
 
 
 
 
 954
 955	if (!grp->full_slots) {
 956		__clear_bit(grp->index, &q->bitmaps[IR]);
 957		__clear_bit(grp->index, &q->bitmaps[EB]);
 958		__clear_bit(grp->index, &q->bitmaps[IB]);
 959
 960		if (test_bit(grp->index, &q->bitmaps[ER]) &&
 961		    !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
 962			mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
 963			if (mask)
 964				mask = ~((1UL << __fls(mask)) - 1);
 965			else
 966				mask = ~0UL;
 967			qfq_move_groups(q, mask, EB, ER);
 968			qfq_move_groups(q, mask, IB, IR);
 969		}
 970		__clear_bit(grp->index, &q->bitmaps[ER]);
 971	} else if (hlist_empty(&grp->slots[grp->front])) {
 972		cl = qfq_slot_scan(grp);
 973		roundedS = qfq_round_down(cl->S, grp->slot_shift);
 974		if (grp->S != roundedS) {
 975			__clear_bit(grp->index, &q->bitmaps[ER]);
 976			__clear_bit(grp->index, &q->bitmaps[IR]);
 977			__clear_bit(grp->index, &q->bitmaps[EB]);
 978			__clear_bit(grp->index, &q->bitmaps[IB]);
 979			grp->S = roundedS;
 980			grp->F = roundedS + (2ULL << grp->slot_shift);
 981			s = qfq_calc_state(q, grp);
 982			__set_bit(grp->index, &q->bitmaps[s]);
 983		}
 984	}
 985
 986	qfq_update_eligible(q, q->V);
 987}
 988
 989static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
 990{
 991	struct qfq_sched *q = qdisc_priv(sch);
 992	struct qfq_class *cl = (struct qfq_class *)arg;
 993
 994	if (cl->qdisc->q.qlen == 0)
 995		qfq_deactivate_class(q, cl);
 996}
 997
 998static unsigned int qfq_drop(struct Qdisc *sch)
 999{
1000	struct qfq_sched *q = qdisc_priv(sch);
1001	struct qfq_group *grp;
1002	unsigned int i, j, len;
1003
1004	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1005		grp = &q->groups[i];
1006		for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1007			struct qfq_class *cl;
1008			struct hlist_node *n;
1009
1010			hlist_for_each_entry(cl, n, &grp->slots[j], next) {
1011
1012				if (!cl->qdisc->ops->drop)
1013					continue;
1014
1015				len = cl->qdisc->ops->drop(cl->qdisc);
1016				if (len > 0) {
1017					sch->q.qlen--;
1018					if (!cl->qdisc->q.qlen)
1019						qfq_deactivate_class(q, cl);
1020
1021					return len;
1022				}
1023			}
1024		}
1025	}
1026
1027	return 0;
1028}
1029
1030static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1031{
1032	struct qfq_sched *q = qdisc_priv(sch);
1033	struct qfq_group *grp;
1034	int i, j, err;
 
 
 
 
 
1035
1036	err = qdisc_class_hash_init(&q->clhash);
1037	if (err < 0)
1038		return err;
1039
 
 
 
 
 
 
 
 
 
 
1040	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1041		grp = &q->groups[i];
1042		grp->index = i;
1043		grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
1044				   - (QFQ_MAX_INDEX - i);
1045		for (j = 0; j < QFQ_MAX_SLOTS; j++)
1046			INIT_HLIST_HEAD(&grp->slots[j]);
1047	}
1048
 
 
1049	return 0;
1050}
1051
1052static void qfq_reset_qdisc(struct Qdisc *sch)
1053{
1054	struct qfq_sched *q = qdisc_priv(sch);
1055	struct qfq_group *grp;
1056	struct qfq_class *cl;
1057	struct hlist_node *n, *tmp;
1058	unsigned int i, j;
1059
1060	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1061		grp = &q->groups[i];
1062		for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1063			hlist_for_each_entry_safe(cl, n, tmp,
1064						  &grp->slots[j], next) {
1065				qfq_deactivate_class(q, cl);
1066			}
1067		}
1068	}
1069
1070	for (i = 0; i < q->clhash.hashsize; i++) {
1071		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1072			qdisc_reset(cl->qdisc);
 
1073	}
1074	sch->q.qlen = 0;
1075}
1076
1077static void qfq_destroy_qdisc(struct Qdisc *sch)
1078{
1079	struct qfq_sched *q = qdisc_priv(sch);
1080	struct qfq_class *cl;
1081	struct hlist_node *n, *next;
1082	unsigned int i;
1083
1084	tcf_destroy_chain(&q->filter_list);
1085
1086	for (i = 0; i < q->clhash.hashsize; i++) {
1087		hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1088					  common.hnode) {
1089			qfq_destroy_class(sch, cl);
1090		}
1091	}
1092	qdisc_class_hash_destroy(&q->clhash);
1093}
1094
1095static const struct Qdisc_class_ops qfq_class_ops = {
1096	.change		= qfq_change_class,
1097	.delete		= qfq_delete_class,
1098	.get		= qfq_get_class,
1099	.put		= qfq_put_class,
1100	.tcf_chain	= qfq_tcf_chain,
1101	.bind_tcf	= qfq_bind_tcf,
1102	.unbind_tcf	= qfq_unbind_tcf,
1103	.graft		= qfq_graft_class,
1104	.leaf		= qfq_class_leaf,
1105	.qlen_notify	= qfq_qlen_notify,
1106	.dump		= qfq_dump_class,
1107	.dump_stats	= qfq_dump_class_stats,
1108	.walk		= qfq_walk,
1109};
1110
1111static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1112	.cl_ops		= &qfq_class_ops,
1113	.id		= "qfq",
1114	.priv_size	= sizeof(struct qfq_sched),
1115	.enqueue	= qfq_enqueue,
1116	.dequeue	= qfq_dequeue,
1117	.peek		= qdisc_peek_dequeued,
1118	.drop		= qfq_drop,
1119	.init		= qfq_init_qdisc,
1120	.reset		= qfq_reset_qdisc,
1121	.destroy	= qfq_destroy_qdisc,
1122	.owner		= THIS_MODULE,
1123};
 
1124
1125static int __init qfq_init(void)
1126{
1127	return register_qdisc(&qfq_qdisc_ops);
1128}
1129
1130static void __exit qfq_exit(void)
1131{
1132	unregister_qdisc(&qfq_qdisc_ops);
1133}
1134
1135module_init(qfq_init);
1136module_exit(qfq_exit);
1137MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * net/sched/sch_qfq.c         Quick Fair Queueing Plus Scheduler.
   4 *
   5 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
   6 * Copyright (c) 2012 Paolo Valente.
 
 
 
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/bitops.h>
  12#include <linux/errno.h>
  13#include <linux/netdevice.h>
  14#include <linux/pkt_sched.h>
  15#include <net/sch_generic.h>
  16#include <net/pkt_sched.h>
  17#include <net/pkt_cls.h>
  18
  19
  20/*  Quick Fair Queueing Plus
  21    ========================
  22
  23    Sources:
  24
  25    [1] Paolo Valente,
  26    "Reducing the Execution Time of Fair-Queueing Schedulers."
  27    http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
  28
  29    Sources for QFQ:
  30
  31    [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
  32    Packet Scheduling with Tight Bandwidth Distribution Guarantees."
  33
  34    See also:
  35    http://retis.sssup.it/~fabio/linux/qfq/
  36 */
  37
  38/*
  39
  40  QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
  41  classes. Each aggregate is timestamped with a virtual start time S
  42  and a virtual finish time F, and scheduled according to its
  43  timestamps. S and F are computed as a function of a system virtual
  44  time function V. The classes within each aggregate are instead
  45  scheduled with DRR.
  46
  47  To speed up operations, QFQ+ divides also aggregates into a limited
  48  number of groups. Which group a class belongs to depends on the
  49  ratio between the maximum packet length for the class and the weight
  50  of the class. Groups have their own S and F. In the end, QFQ+
  51  schedules groups, then aggregates within groups, then classes within
  52  aggregates. See [1] and [2] for a full description.
  53
  54  Virtual time computations.
  55
  56  S, F and V are all computed in fixed point arithmetic with
  57  FRAC_BITS decimal bits.
  58
  59  QFQ_MAX_INDEX is the maximum index allowed for a group. We need
  60	one bit per index.
  61  QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
  62
  63  The layout of the bits is as below:
  64
  65                   [ MTU_SHIFT ][      FRAC_BITS    ]
  66                   [ MAX_INDEX    ][ MIN_SLOT_SHIFT ]
  67				 ^.__grp->index = 0
  68				 *.__grp->slot_shift
  69
  70  where MIN_SLOT_SHIFT is derived by difference from the others.
  71
  72  The max group index corresponds to Lmax/w_min, where
  73  Lmax=1<<MTU_SHIFT, w_min = 1 .
  74  From this, and knowing how many groups (MAX_INDEX) we want,
  75  we can derive the shift corresponding to each group.
  76
  77  Because we often need to compute
  78	F = S + len/w_i  and V = V + len/wsum
  79  instead of storing w_i store the value
  80	inv_w = (1<<FRAC_BITS)/w_i
  81  so we can do F = S + len * inv_w * wsum.
  82  We use W_TOT in the formulas so we can easily move between
  83  static and adaptive weight sum.
  84
  85  The per-scheduler-instance data contain all the data structures
  86  for the scheduler: bitmaps and bucket lists.
  87
  88 */
  89
  90/*
  91 * Maximum number of consecutive slots occupied by backlogged classes
  92 * inside a group.
  93 */
  94#define QFQ_MAX_SLOTS	32
  95
  96/*
  97 * Shifts used for aggregate<->group mapping.  We allow class weights that are
  98 * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
  99 * group with the smallest index that can support the L_i / r_i configured
 100 * for the classes in the aggregate.
 101 *
 102 * grp->index is the index of the group; and grp->slot_shift
 103 * is the shift for the corresponding (scaled) sigma_i.
 104 */
 105#define QFQ_MAX_INDEX		24
 106#define QFQ_MAX_WSHIFT		10
 107
 108#define	QFQ_MAX_WEIGHT		(1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
 109#define QFQ_MAX_WSUM		(64*QFQ_MAX_WEIGHT)
 110
 111#define FRAC_BITS		30	/* fixed point arithmetic */
 112#define ONE_FP			(1UL << FRAC_BITS)
 
 113
 114#define QFQ_MTU_SHIFT		16	/* to support TSO/GSO */
 115#define QFQ_MIN_LMAX		512	/* see qfq_slot_insert */
 116#define QFQ_MAX_LMAX		(1UL << QFQ_MTU_SHIFT)
 117
 118#define QFQ_MAX_AGG_CLASSES	8 /* max num classes per aggregate allowed */
 119
 120/*
 121 * Possible group states.  These values are used as indexes for the bitmaps
 122 * array of struct qfq_queue.
 123 */
 124enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
 125
 126struct qfq_group;
 127
 128struct qfq_aggregate;
 129
 130struct qfq_class {
 131	struct Qdisc_class_common common;
 132
 133	struct gnet_stats_basic_sync bstats;
 
 
 
 134	struct gnet_stats_queue qstats;
 135	struct net_rate_estimator __rcu *rate_est;
 136	struct Qdisc *qdisc;
 137	struct list_head alist;		/* Link for active-classes list. */
 138	struct qfq_aggregate *agg;	/* Parent aggregate. */
 139	int deficit;			/* DRR deficit counter. */
 140};
 141
 142struct qfq_aggregate {
 143	struct hlist_node next;	/* Link for the slot list. */
 144	u64 S, F;		/* flow timestamps (exact) */
 145
 146	/* group we belong to. In principle we would need the index,
 147	 * which is log_2(lmax/weight), but we never reference it
 148	 * directly, only the group.
 149	 */
 150	struct qfq_group *grp;
 151
 152	/* these are copied from the flowset. */
 153	u32	class_weight; /* Weight of each class in this aggregate. */
 154	/* Max pkt size for the classes in this aggregate, DRR quantum. */
 155	int	lmax;
 156
 157	u32	inv_w;	    /* ONE_FP/(sum of weights of classes in aggr.). */
 158	u32	budgetmax;  /* Max budget for this aggregate. */
 159	u32	initial_budget, budget;     /* Initial and current budget. */
 160
 161	int		  num_classes;	/* Number of classes in this aggr. */
 162	struct list_head  active;	/* DRR queue of active classes. */
 163
 164	struct hlist_node nonfull_next;	/* See nonfull_aggs in qfq_sched. */
 165};
 166
 167struct qfq_group {
 168	u64 S, F;			/* group timestamps (approx). */
 169	unsigned int slot_shift;	/* Slot shift. */
 170	unsigned int index;		/* Group index. */
 171	unsigned int front;		/* Index of the front slot. */
 172	unsigned long full_slots;	/* non-empty slots */
 173
 174	/* Array of RR lists of active aggregates. */
 175	struct hlist_head slots[QFQ_MAX_SLOTS];
 176};
 177
 178struct qfq_sched {
 179	struct tcf_proto __rcu *filter_list;
 180	struct tcf_block	*block;
 181	struct Qdisc_class_hash clhash;
 182
 183	u64			oldV, V;	/* Precise virtual times. */
 184	struct qfq_aggregate	*in_serv_agg;   /* Aggregate being served. */
 185	u32			wsum;		/* weight sum */
 186	u32			iwsum;		/* inverse weight sum */
 187
 188	unsigned long bitmaps[QFQ_MAX_STATE];	    /* Group bitmaps. */
 189	struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
 190	u32 min_slot_shift;	/* Index of the group-0 bit in the bitmaps. */
 191
 192	u32 max_agg_classes;		/* Max number of classes per aggr. */
 193	struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
 194};
 195
 196/*
 197 * Possible reasons why the timestamps of an aggregate are updated
 198 * enqueue: the aggregate switches from idle to active and must scheduled
 199 *	    for service
 200 * requeue: the aggregate finishes its budget, so it stops being served and
 201 *	    must be rescheduled for service
 202 */
 203enum update_reason {enqueue, requeue};
 204
 205static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
 206{
 207	struct qfq_sched *q = qdisc_priv(sch);
 208	struct Qdisc_class_common *clc;
 209
 210	clc = qdisc_class_find(&q->clhash, classid);
 211	if (clc == NULL)
 212		return NULL;
 213	return container_of(clc, struct qfq_class, common);
 214}
 215
 216static const struct netlink_range_validation lmax_range = {
 217	.min = QFQ_MIN_LMAX,
 218	.max = QFQ_MAX_LMAX,
 219};
 
 
 
 220
 221static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
 222	[TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT),
 223	[TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range),
 224};
 225
 226/*
 227 * Calculate a flow index, given its weight and maximum packet length.
 228 * index = log_2(maxlen/weight) but we need to apply the scaling.
 229 * This is used only once at flow creation.
 230 */
 231static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
 232{
 233	u64 slot_size = (u64)maxlen * inv_w;
 234	unsigned long size_map;
 235	int index = 0;
 236
 237	size_map = slot_size >> min_slot_shift;
 238	if (!size_map)
 239		goto out;
 240
 241	index = __fls(size_map) + 1;	/* basically a log_2 */
 242	index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
 243
 244	if (index < 0)
 245		index = 0;
 246out:
 247	pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
 248		 (unsigned long) ONE_FP/inv_w, maxlen, index);
 249
 250	return index;
 251}
 252
 253static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
 254static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
 255			     enum update_reason);
 256
 257static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
 258			 u32 lmax, u32 weight)
 259{
 260	INIT_LIST_HEAD(&agg->active);
 261	hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
 262
 263	agg->lmax = lmax;
 264	agg->class_weight = weight;
 265}
 266
 267static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
 268					  u32 lmax, u32 weight)
 269{
 270	struct qfq_aggregate *agg;
 271
 272	hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
 273		if (agg->lmax == lmax && agg->class_weight == weight)
 274			return agg;
 275
 276	return NULL;
 277}
 278
 279
 280/* Update aggregate as a function of the new number of classes. */
 281static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
 282			   int new_num_classes)
 283{
 284	u32 new_agg_weight;
 285
 286	if (new_num_classes == q->max_agg_classes)
 287		hlist_del_init(&agg->nonfull_next);
 288
 289	if (agg->num_classes > new_num_classes &&
 290	    new_num_classes == q->max_agg_classes - 1) /* agg no more full */
 291		hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
 292
 293	/* The next assignment may let
 294	 * agg->initial_budget > agg->budgetmax
 295	 * hold, we will take it into account in charge_actual_service().
 296	 */
 297	agg->budgetmax = new_num_classes * agg->lmax;
 298	new_agg_weight = agg->class_weight * new_num_classes;
 299	agg->inv_w = ONE_FP/new_agg_weight;
 300
 301	if (agg->grp == NULL) {
 302		int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
 303				       q->min_slot_shift);
 304		agg->grp = &q->groups[i];
 305	}
 306
 307	q->wsum +=
 308		(int) agg->class_weight * (new_num_classes - agg->num_classes);
 309	q->iwsum = ONE_FP / q->wsum;
 310
 311	agg->num_classes = new_num_classes;
 312}
 313
 314/* Add class to aggregate. */
 315static void qfq_add_to_agg(struct qfq_sched *q,
 316			   struct qfq_aggregate *agg,
 317			   struct qfq_class *cl)
 318{
 319	cl->agg = agg;
 320
 321	qfq_update_agg(q, agg, agg->num_classes+1);
 322	if (cl->qdisc->q.qlen > 0) { /* adding an active class */
 323		list_add_tail(&cl->alist, &agg->active);
 324		if (list_first_entry(&agg->active, struct qfq_class, alist) ==
 325		    cl && q->in_serv_agg != agg) /* agg was inactive */
 326			qfq_activate_agg(q, agg, enqueue); /* schedule agg */
 327	}
 328}
 329
 330static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
 331
 332static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
 333{
 334	hlist_del_init(&agg->nonfull_next);
 335	q->wsum -= agg->class_weight;
 336	if (q->wsum != 0)
 337		q->iwsum = ONE_FP / q->wsum;
 338
 339	if (q->in_serv_agg == agg)
 340		q->in_serv_agg = qfq_choose_next_agg(q);
 341	kfree(agg);
 342}
 343
 344/* Deschedule class from within its parent aggregate. */
 345static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
 346{
 347	struct qfq_aggregate *agg = cl->agg;
 348
 349
 350	list_del(&cl->alist); /* remove from RR queue of the aggregate */
 351	if (list_empty(&agg->active)) /* agg is now inactive */
 352		qfq_deactivate_agg(q, agg);
 353}
 354
 355/* Remove class from its parent aggregate. */
 356static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
 357{
 358	struct qfq_aggregate *agg = cl->agg;
 359
 360	cl->agg = NULL;
 361	if (agg->num_classes == 1) { /* agg being emptied, destroy it */
 362		qfq_destroy_agg(q, agg);
 363		return;
 364	}
 365	qfq_update_agg(q, agg, agg->num_classes-1);
 366}
 367
 368/* Deschedule class and remove it from its parent aggregate. */
 369static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
 370{
 371	if (cl->qdisc->q.qlen > 0) /* class is active */
 372		qfq_deactivate_class(q, cl);
 373
 374	qfq_rm_from_agg(q, cl);
 375}
 376
 377/* Move class to a new aggregate, matching the new class weight and/or lmax */
 378static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
 379			   u32 lmax)
 380{
 381	struct qfq_sched *q = qdisc_priv(sch);
 382	struct qfq_aggregate *new_agg;
 383
 384	/* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */
 385	if (lmax > QFQ_MAX_LMAX)
 386		return -EINVAL;
 387
 388	new_agg = qfq_find_agg(q, lmax, weight);
 389	if (new_agg == NULL) { /* create new aggregate */
 390		new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
 391		if (new_agg == NULL)
 392			return -ENOBUFS;
 393		qfq_init_agg(q, new_agg, lmax, weight);
 394	}
 395	qfq_deact_rm_from_agg(q, cl);
 396	qfq_add_to_agg(q, new_agg, cl);
 397
 398	return 0;
 399}
 400
 401static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 402			    struct nlattr **tca, unsigned long *arg,
 403			    struct netlink_ext_ack *extack)
 404{
 405	struct qfq_sched *q = qdisc_priv(sch);
 406	struct qfq_class *cl = (struct qfq_class *)*arg;
 407	bool existing = false;
 408	struct nlattr *tb[TCA_QFQ_MAX + 1];
 409	struct qfq_aggregate *new_agg = NULL;
 410	u32 weight, lmax, inv_w;
 411	int err;
 412	int delta_w;
 413
 414	if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) {
 415		NL_SET_ERR_MSG_MOD(extack, "missing options");
 416		return -EINVAL;
 417	}
 418
 419	err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS],
 420					  qfq_policy, extack);
 421	if (err < 0)
 422		return err;
 423
 424	weight = nla_get_u32_default(tb[TCA_QFQ_WEIGHT], 1);
 425
 426	if (tb[TCA_QFQ_LMAX]) {
 427		lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
 428	} else {
 429		/* MTU size is user controlled */
 430		lmax = psched_mtu(qdisc_dev(sch));
 431		if (lmax < QFQ_MIN_LMAX || lmax > QFQ_MAX_LMAX) {
 432			NL_SET_ERR_MSG_MOD(extack,
 433					   "MTU size out of bounds for qfq");
 434			return -EINVAL;
 435		}
 436	}
 
 437
 438	inv_w = ONE_FP / weight;
 439	weight = ONE_FP / inv_w;
 440
 441	if (cl != NULL &&
 442	    lmax == cl->agg->lmax &&
 443	    weight == cl->agg->class_weight)
 444		return 0; /* nothing to change */
 445
 446	delta_w = weight - (cl ? cl->agg->class_weight : 0);
 447
 448	if (q->wsum + delta_w > QFQ_MAX_WSUM) {
 449		NL_SET_ERR_MSG_FMT_MOD(extack,
 450				       "total weight out of range (%d + %u)\n",
 451				       delta_w, q->wsum);
 452		return -EINVAL;
 453	}
 454
 455	if (cl != NULL) { /* modify existing class */
 
 
 
 
 
 
 
 
 
 456		if (tca[TCA_RATE]) {
 457			err = gen_replace_estimator(&cl->bstats, NULL,
 458						    &cl->rate_est,
 459						    NULL,
 460						    true,
 461						    tca[TCA_RATE]);
 462			if (err)
 463				return err;
 464		}
 465		existing = true;
 466		goto set_change_agg;
 
 
 
 
 
 
 
 467	}
 468
 469	/* create and init new class */
 470	cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
 471	if (cl == NULL)
 472		return -ENOBUFS;
 473
 474	gnet_stats_basic_sync_init(&cl->bstats);
 475	cl->common.classid = classid;
 476	cl->deficit = lmax;
 
 
 477
 478	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
 479				      classid, NULL);
 
 
 
 480	if (cl->qdisc == NULL)
 481		cl->qdisc = &noop_qdisc;
 482
 483	if (tca[TCA_RATE]) {
 484		err = gen_new_estimator(&cl->bstats, NULL,
 485					&cl->rate_est,
 486					NULL,
 487					true,
 488					tca[TCA_RATE]);
 489		if (err)
 490			goto destroy_class;
 
 
 
 491	}
 492
 493	if (cl->qdisc != &noop_qdisc)
 494		qdisc_hash_add(cl->qdisc, true);
 495
 496set_change_agg:
 497	sch_tree_lock(sch);
 498	new_agg = qfq_find_agg(q, lmax, weight);
 499	if (new_agg == NULL) { /* create new aggregate */
 500		sch_tree_unlock(sch);
 501		new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
 502		if (new_agg == NULL) {
 503			err = -ENOBUFS;
 504			gen_kill_estimator(&cl->rate_est);
 505			goto destroy_class;
 506		}
 507		sch_tree_lock(sch);
 508		qfq_init_agg(q, new_agg, lmax, weight);
 509	}
 510	if (existing)
 511		qfq_deact_rm_from_agg(q, cl);
 512	else
 513		qdisc_class_hash_insert(&q->clhash, &cl->common);
 514	qfq_add_to_agg(q, new_agg, cl);
 515	sch_tree_unlock(sch);
 
 516	qdisc_class_hash_grow(sch, &q->clhash);
 517
 518	*arg = (unsigned long)cl;
 519	return 0;
 520
 521destroy_class:
 522	qdisc_put(cl->qdisc);
 523	kfree(cl);
 524	return err;
 525}
 526
 527static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
 528{
 529	struct qfq_sched *q = qdisc_priv(sch);
 530
 531	qfq_rm_from_agg(q, cl);
 532	gen_kill_estimator(&cl->rate_est);
 533	qdisc_put(cl->qdisc);
 
 
 
 
 534	kfree(cl);
 535}
 536
 537static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
 538			    struct netlink_ext_ack *extack)
 539{
 540	struct qfq_sched *q = qdisc_priv(sch);
 541	struct qfq_class *cl = (struct qfq_class *)arg;
 542
 543	if (qdisc_class_in_use(&cl->common)) {
 544		NL_SET_ERR_MSG_MOD(extack, "QFQ class in use");
 545		return -EBUSY;
 546	}
 547
 548	sch_tree_lock(sch);
 549
 550	qdisc_purge_queue(cl->qdisc);
 551	qdisc_class_hash_remove(&q->clhash, &cl->common);
 552
 
 
 
 
 
 
 553	sch_tree_unlock(sch);
 
 
 
 
 
 
 554
 555	qfq_destroy_class(sch, cl);
 556	return 0;
 
 
 557}
 558
 559static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
 560{
 561	return (unsigned long)qfq_find_class(sch, classid);
 
 
 
 562}
 563
 564static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
 565				       struct netlink_ext_ack *extack)
 566{
 567	struct qfq_sched *q = qdisc_priv(sch);
 568
 569	if (cl)
 570		return NULL;
 571
 572	return q->block;
 573}
 574
 575static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
 576				  u32 classid)
 577{
 578	struct qfq_class *cl = qfq_find_class(sch, classid);
 579
 580	if (cl)
 581		qdisc_class_get(&cl->common);
 582
 583	return (unsigned long)cl;
 584}
 585
 586static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 587{
 588	struct qfq_class *cl = (struct qfq_class *)arg;
 589
 590	qdisc_class_put(&cl->common);
 591}
 592
 593static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
 594			   struct Qdisc *new, struct Qdisc **old,
 595			   struct netlink_ext_ack *extack)
 596{
 597	struct qfq_class *cl = (struct qfq_class *)arg;
 598
 599	if (new == NULL) {
 600		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
 601					cl->common.classid, NULL);
 602		if (new == NULL)
 603			new = &noop_qdisc;
 604	}
 605
 606	*old = qdisc_replace(sch, new, &cl->qdisc);
 
 
 
 
 607	return 0;
 608}
 609
 610static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
 611{
 612	struct qfq_class *cl = (struct qfq_class *)arg;
 613
 614	return cl->qdisc;
 615}
 616
 617static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
 618			  struct sk_buff *skb, struct tcmsg *tcm)
 619{
 620	struct qfq_class *cl = (struct qfq_class *)arg;
 621	struct nlattr *nest;
 622
 623	tcm->tcm_parent	= TC_H_ROOT;
 624	tcm->tcm_handle	= cl->common.classid;
 625	tcm->tcm_info	= cl->qdisc->handle;
 626
 627	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
 628	if (nest == NULL)
 629		goto nla_put_failure;
 630	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
 631	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
 632		goto nla_put_failure;
 633	return nla_nest_end(skb, nest);
 634
 635nla_put_failure:
 636	nla_nest_cancel(skb, nest);
 637	return -EMSGSIZE;
 638}
 639
 640static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 641				struct gnet_dump *d)
 642{
 643	struct qfq_class *cl = (struct qfq_class *)arg;
 644	struct tc_qfq_stats xstats;
 645
 646	memset(&xstats, 0, sizeof(xstats));
 
 647
 648	xstats.weight = cl->agg->class_weight;
 649	xstats.lmax = cl->agg->lmax;
 650
 651	if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
 652	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
 653	    qdisc_qstats_copy(d, cl->qdisc) < 0)
 654		return -1;
 655
 656	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
 657}
 658
 659static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 660{
 661	struct qfq_sched *q = qdisc_priv(sch);
 662	struct qfq_class *cl;
 
 663	unsigned int i;
 664
 665	if (arg->stop)
 666		return;
 667
 668	for (i = 0; i < q->clhash.hashsize; i++) {
 669		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
 670			if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
 
 
 
 
 
 671				return;
 
 
 672		}
 673	}
 674}
 675
 676static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
 677				      int *qerr)
 678{
 679	struct qfq_sched *q = qdisc_priv(sch);
 680	struct qfq_class *cl;
 681	struct tcf_result res;
 682	struct tcf_proto *fl;
 683	int result;
 684
 685	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
 686		pr_debug("qfq_classify: found %d\n", skb->priority);
 687		cl = qfq_find_class(sch, skb->priority);
 688		if (cl != NULL)
 689			return cl;
 690	}
 691
 692	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 693	fl = rcu_dereference_bh(q->filter_list);
 694	result = tcf_classify(skb, NULL, fl, &res, false);
 695	if (result >= 0) {
 696#ifdef CONFIG_NET_CLS_ACT
 697		switch (result) {
 698		case TC_ACT_QUEUED:
 699		case TC_ACT_STOLEN:
 700		case TC_ACT_TRAP:
 701			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 702			fallthrough;
 703		case TC_ACT_SHOT:
 704			return NULL;
 705		}
 706#endif
 707		cl = (struct qfq_class *)res.class;
 708		if (cl == NULL)
 709			cl = qfq_find_class(sch, res.classid);
 710		return cl;
 711	}
 712
 713	return NULL;
 714}
 715
 716/* Generic comparison function, handling wraparound. */
 717static inline int qfq_gt(u64 a, u64 b)
 718{
 719	return (s64)(a - b) > 0;
 720}
 721
 722/* Round a precise timestamp to its slotted value. */
 723static inline u64 qfq_round_down(u64 ts, unsigned int shift)
 724{
 725	return ts & ~((1ULL << shift) - 1);
 726}
 727
 728/* return the pointer to the group with lowest index in the bitmap */
 729static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
 730					unsigned long bitmap)
 731{
 732	int index = __ffs(bitmap);
 733	return &q->groups[index];
 734}
 735/* Calculate a mask to mimic what would be ffs_from(). */
 736static inline unsigned long mask_from(unsigned long bitmap, int from)
 737{
 738	return bitmap & ~((1UL << from) - 1);
 739}
 740
 741/*
 742 * The state computation relies on ER=0, IR=1, EB=2, IB=3
 743 * First compute eligibility comparing grp->S, q->V,
 744 * then check if someone is blocking us and possibly add EB
 745 */
 746static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
 747{
 748	/* if S > V we are not eligible */
 749	unsigned int state = qfq_gt(grp->S, q->V);
 750	unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
 751	struct qfq_group *next;
 752
 753	if (mask) {
 754		next = qfq_ffs(q, mask);
 755		if (qfq_gt(grp->F, next->F))
 756			state |= EB;
 757	}
 758
 759	return state;
 760}
 761
 762
 763/*
 764 * In principle
 765 *	q->bitmaps[dst] |= q->bitmaps[src] & mask;
 766 *	q->bitmaps[src] &= ~mask;
 767 * but we should make sure that src != dst
 768 */
 769static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
 770				   int src, int dst)
 771{
 772	q->bitmaps[dst] |= q->bitmaps[src] & mask;
 773	q->bitmaps[src] &= ~mask;
 774}
 775
 776static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
 777{
 778	unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
 779	struct qfq_group *next;
 780
 781	if (mask) {
 782		next = qfq_ffs(q, mask);
 783		if (!qfq_gt(next->F, old_F))
 784			return;
 785	}
 786
 787	mask = (1UL << index) - 1;
 788	qfq_move_groups(q, mask, EB, ER);
 789	qfq_move_groups(q, mask, IB, IR);
 790}
 791
 792/*
 793 * perhaps
 794 *
 795	old_V ^= q->V;
 796	old_V >>= q->min_slot_shift;
 797	if (old_V) {
 798		...
 799	}
 800 *
 801 */
 802static void qfq_make_eligible(struct qfq_sched *q)
 803{
 804	unsigned long vslot = q->V >> q->min_slot_shift;
 805	unsigned long old_vslot = q->oldV >> q->min_slot_shift;
 806
 807	if (vslot != old_vslot) {
 808		unsigned long mask;
 809		int last_flip_pos = fls(vslot ^ old_vslot);
 810
 811		if (last_flip_pos > 31) /* higher than the number of groups */
 812			mask = ~0UL;    /* make all groups eligible */
 813		else
 814			mask = (1UL << last_flip_pos) - 1;
 815
 816		qfq_move_groups(q, mask, IR, ER);
 817		qfq_move_groups(q, mask, IB, EB);
 818	}
 819}
 820
 
 821/*
 822 * The index of the slot in which the input aggregate agg is to be
 823 * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
 824 * and not a '-1' because the start time of the group may be moved
 825 * backward by one slot after the aggregate has been inserted, and
 826 * this would cause non-empty slots to be right-shifted by one
 827 * position.
 828 *
 829 * QFQ+ fully satisfies this bound to the slot index if the parameters
 830 * of the classes are not changed dynamically, and if QFQ+ never
 831 * happens to postpone the service of agg unjustly, i.e., it never
 832 * happens that the aggregate becomes backlogged and eligible, or just
 833 * eligible, while an aggregate with a higher approximated finish time
 834 * is being served. In particular, in this case QFQ+ guarantees that
 835 * the timestamps of agg are low enough that the slot index is never
 836 * higher than 2. Unfortunately, QFQ+ cannot provide the same
 837 * guarantee if it happens to unjustly postpone the service of agg, or
 838 * if the parameters of some class are changed.
 839 *
 840 * As for the first event, i.e., an out-of-order service, the
 841 * upper bound to the slot index guaranteed by QFQ+ grows to
 842 * 2 +
 843 * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
 844 * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
 845 *
 846 * The following function deals with this problem by backward-shifting
 847 * the timestamps of agg, if needed, so as to guarantee that the slot
 848 * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
 849 * cause the service of other aggregates to be postponed, yet the
 850 * worst-case guarantees of these aggregates are not violated.  In
 851 * fact, in case of no out-of-order service, the timestamps of agg
 852 * would have been even lower than they are after the backward shift,
 853 * because QFQ+ would have guaranteed a maximum value equal to 2 for
 854 * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
 855 * service is postponed because of the backward-shift would have
 856 * however waited for the service of agg before being served.
 857 *
 858 * The other event that may cause the slot index to be higher than 2
 859 * for agg is a recent change of the parameters of some class. If the
 860 * weight of a class is increased or the lmax (max_pkt_size) of the
 861 * class is decreased, then a new aggregate with smaller slot size
 862 * than the original parent aggregate of the class may happen to be
 863 * activated. The activation of this aggregate should be properly
 864 * delayed to when the service of the class has finished in the ideal
 865 * system tracked by QFQ+. If the activation of the aggregate is not
 866 * delayed to this reference time instant, then this aggregate may be
 867 * unjustly served before other aggregates waiting for service. This
 868 * may cause the above bound to the slot index to be violated for some
 869 * of these unlucky aggregates.
 870 *
 871 * Instead of delaying the activation of the new aggregate, which is
 872 * quite complex, the above-discussed capping of the slot index is
 873 * used to handle also the consequences of a change of the parameters
 874 * of a class.
 875 */
 876static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
 877			    u64 roundedS)
 878{
 879	u64 slot = (roundedS - grp->S) >> grp->slot_shift;
 880	unsigned int i; /* slot index in the bucket list */
 881
 882	if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
 883		u64 deltaS = roundedS - grp->S -
 884			((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
 885		agg->S -= deltaS;
 886		agg->F -= deltaS;
 887		slot = QFQ_MAX_SLOTS - 2;
 888	}
 889
 890	i = (grp->front + slot) % QFQ_MAX_SLOTS;
 891
 892	hlist_add_head(&agg->next, &grp->slots[i]);
 893	__set_bit(slot, &grp->full_slots);
 894}
 895
 896/* Maybe introduce hlist_first_entry?? */
 897static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
 898{
 899	return hlist_entry(grp->slots[grp->front].first,
 900			   struct qfq_aggregate, next);
 901}
 902
 903/*
 904 * remove the entry from the slot
 905 */
 906static void qfq_front_slot_remove(struct qfq_group *grp)
 907{
 908	struct qfq_aggregate *agg = qfq_slot_head(grp);
 909
 910	BUG_ON(!agg);
 911	hlist_del(&agg->next);
 912	if (hlist_empty(&grp->slots[grp->front]))
 913		__clear_bit(0, &grp->full_slots);
 914}
 915
 916/*
 917 * Returns the first aggregate in the first non-empty bucket of the
 918 * group. As a side effect, adjusts the bucket list so the first
 919 * non-empty bucket is at position 0 in full_slots.
 920 */
 921static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
 922{
 923	unsigned int i;
 924
 925	pr_debug("qfq slot_scan: grp %u full %#lx\n",
 926		 grp->index, grp->full_slots);
 927
 928	if (grp->full_slots == 0)
 929		return NULL;
 930
 931	i = __ffs(grp->full_slots);  /* zero based */
 932	if (i > 0) {
 933		grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
 934		grp->full_slots >>= i;
 935	}
 936
 937	return qfq_slot_head(grp);
 938}
 939
 940/*
 941 * adjust the bucket list. When the start time of a group decreases,
 942 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
 943 * move the objects. The mask of occupied slots must be shifted
 944 * because we use ffs() to find the first non-empty slot.
 945 * This covers decreases in the group's start time, but what about
 946 * increases of the start time ?
 947 * Here too we should make sure that i is less than 32
 948 */
 949static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
 950{
 951	unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
 952
 953	grp->full_slots <<= i;
 954	grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
 955}
 956
 957static void qfq_update_eligible(struct qfq_sched *q)
 958{
 959	struct qfq_group *grp;
 960	unsigned long ineligible;
 961
 962	ineligible = q->bitmaps[IR] | q->bitmaps[IB];
 963	if (ineligible) {
 964		if (!q->bitmaps[ER]) {
 965			grp = qfq_ffs(q, ineligible);
 966			if (qfq_gt(grp->S, q->V))
 967				q->V = grp->S;
 968		}
 969		qfq_make_eligible(q);
 970	}
 971}
 972
 973/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
 974static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
 975				   struct qfq_class *cl, unsigned int len)
 
 
 
 
 
 
 
 
 
 
 976{
 977	struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc);
 978
 979	if (!skb)
 980		return NULL;
 
 
 
 981
 982	cl->deficit -= (int) len;
 
 
 
 983
 984	if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
 985		list_del(&cl->alist);
 986	else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
 987		cl->deficit += agg->lmax;
 988		list_move_tail(&cl->alist, &agg->active);
 989	}
 990
 991	return skb;
 992}
 993
 994static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
 995					   struct qfq_class **cl,
 996					   unsigned int *len)
 997{
 
 
 
 998	struct sk_buff *skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999
1000	*cl = list_first_entry(&agg->active, struct qfq_class, alist);
1001	skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
1002	if (skb == NULL)
1003		qdisc_warn_nonwc("qfq_dequeue", (*cl)->qdisc);
1004	else
1005		*len = qdisc_pkt_len(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006
1007	return skb;
1008}
1009
1010/* Update F according to the actual service received by the aggregate. */
1011static inline void charge_actual_service(struct qfq_aggregate *agg)
1012{
1013	/* Compute the service received by the aggregate, taking into
1014	 * account that, after decreasing the number of classes in
1015	 * agg, it may happen that
1016	 * agg->initial_budget - agg->budget > agg->bugdetmax
1017	 */
1018	u32 service_received = min(agg->budgetmax,
1019				   agg->initial_budget - agg->budget);
1020
1021	agg->F = agg->S + (u64)service_received * agg->inv_w;
1022}
1023
1024/* Assign a reasonable start time for a new aggregate in group i.
 
1025 * Admissible values for \hat(F) are multiples of \sigma_i
1026 * no greater than V+\sigma_i . Larger values mean that
1027 * we had a wraparound so we consider the timestamp to be stale.
1028 *
1029 * If F is not stale and F >= V then we set S = F.
1030 * Otherwise we should assign S = V, but this may violate
1031 * the ordering in EB (see [2]). So, if we have groups in ER,
1032 * set S to the F_j of the first group j which would be blocking us.
1033 * We are guaranteed not to move S backward because
1034 * otherwise our group i would still be blocked.
1035 */
1036static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
1037{
1038	unsigned long mask;
1039	u64 limit, roundedF;
1040	int slot_shift = agg->grp->slot_shift;
1041
1042	roundedF = qfq_round_down(agg->F, slot_shift);
1043	limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
1044
1045	if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
1046		/* timestamp was stale */
1047		mask = mask_from(q->bitmaps[ER], agg->grp->index);
1048		if (mask) {
1049			struct qfq_group *next = qfq_ffs(q, mask);
1050			if (qfq_gt(roundedF, next->F)) {
1051				if (qfq_gt(limit, next->F))
1052					agg->S = next->F;
1053				else /* preserve timestamp correctness */
1054					agg->S = limit;
1055				return;
1056			}
1057		}
1058		agg->S = q->V;
1059	} else  /* timestamp is not stale */
1060		agg->S = agg->F;
1061}
1062
1063/* Update the timestamps of agg before scheduling/rescheduling it for
1064 * service.  In particular, assign to agg->F its maximum possible
1065 * value, i.e., the virtual finish time with which the aggregate
1066 * should be labeled if it used all its budget once in service.
1067 */
1068static inline void
1069qfq_update_agg_ts(struct qfq_sched *q,
1070		    struct qfq_aggregate *agg, enum update_reason reason)
1071{
1072	if (reason != requeue)
1073		qfq_update_start(q, agg);
1074	else /* just charge agg for the service received */
1075		agg->S = agg->F;
1076
1077	agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
1078}
1079
1080static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
1081
1082static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
1083{
1084	struct qfq_sched *q = qdisc_priv(sch);
1085	struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
1086	struct qfq_class *cl;
1087	struct sk_buff *skb = NULL;
1088	/* next-packet len, 0 means no more active classes in in-service agg */
1089	unsigned int len = 0;
1090
1091	if (in_serv_agg == NULL)
1092		return NULL;
1093
1094	if (!list_empty(&in_serv_agg->active))
1095		skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1096
1097	/*
1098	 * If there are no active classes in the in-service aggregate,
1099	 * or if the aggregate has not enough budget to serve its next
1100	 * class, then choose the next aggregate to serve.
1101	 */
1102	if (len == 0 || in_serv_agg->budget < len) {
1103		charge_actual_service(in_serv_agg);
1104
1105		/* recharge the budget of the aggregate */
1106		in_serv_agg->initial_budget = in_serv_agg->budget =
1107			in_serv_agg->budgetmax;
1108
1109		if (!list_empty(&in_serv_agg->active)) {
1110			/*
1111			 * Still active: reschedule for
1112			 * service. Possible optimization: if no other
1113			 * aggregate is active, then there is no point
1114			 * in rescheduling this aggregate, and we can
1115			 * just keep it as the in-service one. This
1116			 * should be however a corner case, and to
1117			 * handle it, we would need to maintain an
1118			 * extra num_active_aggs field.
1119			*/
1120			qfq_update_agg_ts(q, in_serv_agg, requeue);
1121			qfq_schedule_agg(q, in_serv_agg);
1122		} else if (sch->q.qlen == 0) { /* no aggregate to serve */
1123			q->in_serv_agg = NULL;
1124			return NULL;
1125		}
1126
1127		/*
1128		 * If we get here, there are other aggregates queued:
1129		 * choose the new aggregate to serve.
1130		 */
1131		in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
1132		skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1133	}
1134	if (!skb)
1135		return NULL;
1136
1137	sch->q.qlen--;
1138
1139	skb = agg_dequeue(in_serv_agg, cl, len);
1140
1141	if (!skb) {
1142		sch->q.qlen++;
1143		return NULL;
1144	}
1145
1146	qdisc_qstats_backlog_dec(sch, skb);
1147	qdisc_bstats_update(sch, skb);
1148
1149	/* If lmax is lowered, through qfq_change_class, for a class
1150	 * owning pending packets with larger size than the new value
1151	 * of lmax, then the following condition may hold.
1152	 */
1153	if (unlikely(in_serv_agg->budget < len))
1154		in_serv_agg->budget = 0;
1155	else
1156		in_serv_agg->budget -= len;
1157
1158	q->V += (u64)len * q->iwsum;
1159	pr_debug("qfq dequeue: len %u F %lld now %lld\n",
1160		 len, (unsigned long long) in_serv_agg->F,
1161		 (unsigned long long) q->V);
1162
1163	return skb;
1164}
1165
1166static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1167{
1168	struct qfq_group *grp;
1169	struct qfq_aggregate *agg, *new_front_agg;
1170	u64 old_F;
1171
1172	qfq_update_eligible(q);
1173	q->oldV = q->V;
1174
1175	if (!q->bitmaps[ER])
1176		return NULL;
1177
1178	grp = qfq_ffs(q, q->bitmaps[ER]);
1179	old_F = grp->F;
1180
1181	agg = qfq_slot_head(grp);
1182
1183	/* agg starts to be served, remove it from schedule */
1184	qfq_front_slot_remove(grp);
1185
1186	new_front_agg = qfq_slot_scan(grp);
1187
1188	if (new_front_agg == NULL) /* group is now inactive, remove from ER */
1189		__clear_bit(grp->index, &q->bitmaps[ER]);
1190	else {
1191		u64 roundedS = qfq_round_down(new_front_agg->S,
1192					      grp->slot_shift);
1193		unsigned int s;
1194
1195		if (grp->S == roundedS)
1196			return agg;
1197		grp->S = roundedS;
1198		grp->F = roundedS + (2ULL << grp->slot_shift);
1199		__clear_bit(grp->index, &q->bitmaps[ER]);
1200		s = qfq_calc_state(q, grp);
1201		__set_bit(grp->index, &q->bitmaps[s]);
1202	}
1203
1204	qfq_unblock_groups(q, grp->index, old_F);
1205
1206	return agg;
1207}
1208
1209static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1210		       struct sk_buff **to_free)
1211{
1212	unsigned int len = qdisc_pkt_len(skb), gso_segs;
1213	struct qfq_sched *q = qdisc_priv(sch);
1214	struct qfq_class *cl;
1215	struct qfq_aggregate *agg;
1216	int err = 0;
1217	bool first;
1218
1219	cl = qfq_classify(skb, sch, &err);
1220	if (cl == NULL) {
1221		if (err & __NET_XMIT_BYPASS)
1222			qdisc_qstats_drop(sch);
1223		__qdisc_drop(skb, to_free);
1224		return err;
1225	}
1226	pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
1227
1228	if (unlikely(cl->agg->lmax < len)) {
1229		pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
1230			 cl->agg->lmax, len, cl->common.classid);
1231		err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
1232		if (err) {
1233			cl->qstats.drops++;
1234			return qdisc_drop(skb, sch, to_free);
1235		}
1236	}
1237
1238	gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
1239	first = !cl->qdisc->q.qlen;
1240	err = qdisc_enqueue(skb, cl->qdisc, to_free);
1241	if (unlikely(err != NET_XMIT_SUCCESS)) {
1242		pr_debug("qfq_enqueue: enqueue failed %d\n", err);
1243		if (net_xmit_drop_count(err)) {
1244			cl->qstats.drops++;
1245			qdisc_qstats_drop(sch);
1246		}
1247		return err;
1248	}
1249
1250	_bstats_update(&cl->bstats, len, gso_segs);
1251	sch->qstats.backlog += len;
1252	++sch->q.qlen;
1253
1254	agg = cl->agg;
1255	/* if the queue was not empty, then done here */
1256	if (!first) {
1257		if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
1258		    list_first_entry(&agg->active, struct qfq_class, alist)
1259		    == cl && cl->deficit < len)
1260			list_move_tail(&cl->alist, &agg->active);
1261
1262		return err;
1263	}
1264
1265	/* schedule class for service within the aggregate */
1266	cl->deficit = agg->lmax;
1267	list_add_tail(&cl->alist, &agg->active);
1268
1269	if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
1270	    q->in_serv_agg == agg)
1271		return err; /* non-empty or in service, nothing else to do */
1272
1273	qfq_activate_agg(q, agg, enqueue);
1274
1275	return err;
1276}
1277
1278/*
1279 * Schedule aggregate according to its timestamps.
1280 */
1281static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
1282{
1283	struct qfq_group *grp = agg->grp;
1284	u64 roundedS;
1285	int s;
1286
1287	roundedS = qfq_round_down(agg->S, grp->slot_shift);
1288
1289	/*
1290	 * Insert agg in the correct bucket.
1291	 * If agg->S >= grp->S we don't need to adjust the
1292	 * bucket list and simply go to the insertion phase.
1293	 * Otherwise grp->S is decreasing, we must make room
1294	 * in the bucket list, and also recompute the group state.
1295	 * Finally, if there were no flows in this group and nobody
1296	 * was in ER make sure to adjust V.
1297	 */
1298	if (grp->full_slots) {
1299		if (!qfq_gt(grp->S, agg->S))
1300			goto skip_update;
1301
1302		/* create a slot for this agg->S */
1303		qfq_slot_rotate(grp, roundedS);
1304		/* group was surely ineligible, remove */
1305		__clear_bit(grp->index, &q->bitmaps[IR]);
1306		__clear_bit(grp->index, &q->bitmaps[IB]);
1307	} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
1308		   q->in_serv_agg == NULL)
1309		q->V = roundedS;
1310
1311	grp->S = roundedS;
1312	grp->F = roundedS + (2ULL << grp->slot_shift);
1313	s = qfq_calc_state(q, grp);
1314	__set_bit(grp->index, &q->bitmaps[s]);
1315
1316	pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
1317		 s, q->bitmaps[s],
1318		 (unsigned long long) agg->S,
1319		 (unsigned long long) agg->F,
1320		 (unsigned long long) q->V);
1321
1322skip_update:
1323	qfq_slot_insert(grp, agg, roundedS);
 
 
1324}
1325
1326
1327/* Update agg ts and schedule agg for service */
1328static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
1329			     enum update_reason reason)
1330{
1331	agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
1332
1333	qfq_update_agg_ts(q, agg, reason);
1334	if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
1335		q->in_serv_agg = agg; /* start serving this aggregate */
1336		 /* update V: to be in service, agg must be eligible */
1337		q->oldV = q->V = agg->S;
1338	} else if (agg != q->in_serv_agg)
1339		qfq_schedule_agg(q, agg);
1340}
1341
1342static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
1343			    struct qfq_aggregate *agg)
1344{
1345	unsigned int i, offset;
1346	u64 roundedS;
1347
1348	roundedS = qfq_round_down(agg->S, grp->slot_shift);
1349	offset = (roundedS - grp->S) >> grp->slot_shift;
1350
1351	i = (grp->front + offset) % QFQ_MAX_SLOTS;
1352
1353	hlist_del(&agg->next);
1354	if (hlist_empty(&grp->slots[i]))
1355		__clear_bit(offset, &grp->full_slots);
1356}
1357
1358/*
1359 * Called to forcibly deschedule an aggregate.  If the aggregate is
1360 * not in the front bucket, or if the latter has other aggregates in
1361 * the front bucket, we can simply remove the aggregate with no other
1362 * side effects.
1363 * Otherwise we must propagate the event up.
1364 */
1365static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
1366{
1367	struct qfq_group *grp = agg->grp;
1368	unsigned long mask;
1369	u64 roundedS;
1370	int s;
1371
1372	if (agg == q->in_serv_agg) {
1373		charge_actual_service(agg);
1374		q->in_serv_agg = qfq_choose_next_agg(q);
1375		return;
1376	}
1377
1378	agg->F = agg->S;
1379	qfq_slot_remove(q, grp, agg);
1380
1381	if (!grp->full_slots) {
1382		__clear_bit(grp->index, &q->bitmaps[IR]);
1383		__clear_bit(grp->index, &q->bitmaps[EB]);
1384		__clear_bit(grp->index, &q->bitmaps[IB]);
1385
1386		if (test_bit(grp->index, &q->bitmaps[ER]) &&
1387		    !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
1388			mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
1389			if (mask)
1390				mask = ~((1UL << __fls(mask)) - 1);
1391			else
1392				mask = ~0UL;
1393			qfq_move_groups(q, mask, EB, ER);
1394			qfq_move_groups(q, mask, IB, IR);
1395		}
1396		__clear_bit(grp->index, &q->bitmaps[ER]);
1397	} else if (hlist_empty(&grp->slots[grp->front])) {
1398		agg = qfq_slot_scan(grp);
1399		roundedS = qfq_round_down(agg->S, grp->slot_shift);
1400		if (grp->S != roundedS) {
1401			__clear_bit(grp->index, &q->bitmaps[ER]);
1402			__clear_bit(grp->index, &q->bitmaps[IR]);
1403			__clear_bit(grp->index, &q->bitmaps[EB]);
1404			__clear_bit(grp->index, &q->bitmaps[IB]);
1405			grp->S = roundedS;
1406			grp->F = roundedS + (2ULL << grp->slot_shift);
1407			s = qfq_calc_state(q, grp);
1408			__set_bit(grp->index, &q->bitmaps[s]);
1409		}
1410	}
 
 
1411}
1412
1413static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1414{
1415	struct qfq_sched *q = qdisc_priv(sch);
1416	struct qfq_class *cl = (struct qfq_class *)arg;
1417
1418	qfq_deactivate_class(q, cl);
 
1419}
1420
1421static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
1422			  struct netlink_ext_ack *extack)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423{
1424	struct qfq_sched *q = qdisc_priv(sch);
1425	struct qfq_group *grp;
1426	int i, j, err;
1427	u32 max_cl_shift, maxbudg_shift, max_classes;
1428
1429	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1430	if (err)
1431		return err;
1432
1433	err = qdisc_class_hash_init(&q->clhash);
1434	if (err < 0)
1435		return err;
1436
1437	max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
1438			    QFQ_MAX_AGG_CLASSES);
1439	/* max_cl_shift = floor(log_2(max_classes)) */
1440	max_cl_shift = __fls(max_classes);
1441	q->max_agg_classes = 1<<max_cl_shift;
1442
1443	/* maxbudg_shift = log2(max_len * max_classes_per_agg) */
1444	maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
1445	q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
1446
1447	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1448		grp = &q->groups[i];
1449		grp->index = i;
1450		grp->slot_shift = q->min_slot_shift + i;
 
1451		for (j = 0; j < QFQ_MAX_SLOTS; j++)
1452			INIT_HLIST_HEAD(&grp->slots[j]);
1453	}
1454
1455	INIT_HLIST_HEAD(&q->nonfull_aggs);
1456
1457	return 0;
1458}
1459
1460static void qfq_reset_qdisc(struct Qdisc *sch)
1461{
1462	struct qfq_sched *q = qdisc_priv(sch);
 
1463	struct qfq_class *cl;
1464	unsigned int i;
 
1465
1466	for (i = 0; i < q->clhash.hashsize; i++) {
1467		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1468			if (cl->qdisc->q.qlen > 0)
 
 
1469				qfq_deactivate_class(q, cl);
 
 
 
1470
 
 
1471			qdisc_reset(cl->qdisc);
1472		}
1473	}
 
1474}
1475
1476static void qfq_destroy_qdisc(struct Qdisc *sch)
1477{
1478	struct qfq_sched *q = qdisc_priv(sch);
1479	struct qfq_class *cl;
1480	struct hlist_node *next;
1481	unsigned int i;
1482
1483	tcf_block_put(q->block);
1484
1485	for (i = 0; i < q->clhash.hashsize; i++) {
1486		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1487					  common.hnode) {
1488			qfq_destroy_class(sch, cl);
1489		}
1490	}
1491	qdisc_class_hash_destroy(&q->clhash);
1492}
1493
1494static const struct Qdisc_class_ops qfq_class_ops = {
1495	.change		= qfq_change_class,
1496	.delete		= qfq_delete_class,
1497	.find		= qfq_search_class,
1498	.tcf_block	= qfq_tcf_block,
 
1499	.bind_tcf	= qfq_bind_tcf,
1500	.unbind_tcf	= qfq_unbind_tcf,
1501	.graft		= qfq_graft_class,
1502	.leaf		= qfq_class_leaf,
1503	.qlen_notify	= qfq_qlen_notify,
1504	.dump		= qfq_dump_class,
1505	.dump_stats	= qfq_dump_class_stats,
1506	.walk		= qfq_walk,
1507};
1508
1509static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1510	.cl_ops		= &qfq_class_ops,
1511	.id		= "qfq",
1512	.priv_size	= sizeof(struct qfq_sched),
1513	.enqueue	= qfq_enqueue,
1514	.dequeue	= qfq_dequeue,
1515	.peek		= qdisc_peek_dequeued,
 
1516	.init		= qfq_init_qdisc,
1517	.reset		= qfq_reset_qdisc,
1518	.destroy	= qfq_destroy_qdisc,
1519	.owner		= THIS_MODULE,
1520};
1521MODULE_ALIAS_NET_SCH("qfq");
1522
1523static int __init qfq_init(void)
1524{
1525	return register_qdisc(&qfq_qdisc_ops);
1526}
1527
1528static void __exit qfq_exit(void)
1529{
1530	unregister_qdisc(&qfq_qdisc_ops);
1531}
1532
1533module_init(qfq_init);
1534module_exit(qfq_exit);
1535MODULE_LICENSE("GPL");
1536MODULE_DESCRIPTION("Quick Fair Queueing Plus qdisc");