Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * net/sched/sch_sfb.c	  Stochastic Fair Blue
  3 *
  4 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
  5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * version 2 as published by the Free Software Foundation.
 10 *
 11 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
 12 * A New Class of Active Queue Management Algorithms.
 13 * U. Michigan CSE-TR-387-99, April 1999.
 14 *
 15 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
 16 *
 17 */
 18
 19#include <linux/module.h>
 20#include <linux/types.h>
 21#include <linux/kernel.h>
 22#include <linux/errno.h>
 23#include <linux/skbuff.h>
 24#include <linux/random.h>
 25#include <linux/jhash.h>
 26#include <net/ip.h>
 27#include <net/pkt_sched.h>
 
 28#include <net/inet_ecn.h>
 29#include <net/flow_keys.h>
 30
 31/*
 32 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
 33 * This implementation uses L = 8 and N = 16
 34 * This permits us to split one 32bit hash (provided per packet by rxhash or
 35 * external classifier) into 8 subhashes of 4 bits.
 36 */
 37#define SFB_BUCKET_SHIFT 4
 38#define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
 39#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
 40#define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
 41
 42/* SFB algo uses a virtual queue, named "bin" */
 43struct sfb_bucket {
 44	u16		qlen; /* length of virtual queue */
 45	u16		p_mark; /* marking probability */
 46};
 47
 48/* We use a double buffering right before hash change
 49 * (Section 4.4 of SFB reference : moving hash functions)
 50 */
 51struct sfb_bins {
 52	u32		  perturbation; /* jhash perturbation */
 53	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
 54};
 55
 56struct sfb_sched_data {
 57	struct Qdisc	*qdisc;
 58	struct tcf_proto *filter_list;
 
 59	unsigned long	rehash_interval;
 60	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
 61	u32		max;
 62	u32		bin_size;	/* maximum queue length per bin */
 63	u32		increment;	/* d1 */
 64	u32		decrement;	/* d2 */
 65	u32		limit;		/* HARD maximal queue length */
 66	u32		penalty_rate;
 67	u32		penalty_burst;
 68	u32		tokens_avail;
 69	unsigned long	rehash_time;
 70	unsigned long	token_time;
 71
 72	u8		slot;		/* current active bins (0 or 1) */
 73	bool		double_buffering;
 74	struct sfb_bins bins[2];
 75
 76	struct {
 77		u32	earlydrop;
 78		u32	penaltydrop;
 79		u32	bucketdrop;
 80		u32	queuedrop;
 81		u32	childdrop;	/* drops in child qdisc */
 82		u32	marked;		/* ECN mark */
 83	} stats;
 84};
 85
 86/*
 87 * Each queued skb might be hashed on one or two bins
 88 * We store in skb_cb the two hash values.
 89 * (A zero value means double buffering was not used)
 90 */
 91struct sfb_skb_cb {
 92	u32 hashes[2];
 93};
 94
 95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
 96{
 97	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
 98	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
 99}
100
101/*
102 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
103 * If using external classifier, hash comes from the classid.
104 */
105static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
106{
107	return sfb_skb_cb(skb)->hashes[slot];
108}
109
110/* Probabilities are coded as Q0.16 fixed-point values,
111 * with 0xFFFF representing 65535/65536 (almost 1.0)
112 * Addition and subtraction are saturating in [0, 65535]
113 */
114static u32 prob_plus(u32 p1, u32 p2)
115{
116	u32 res = p1 + p2;
117
118	return min_t(u32, res, SFB_MAX_PROB);
119}
120
121static u32 prob_minus(u32 p1, u32 p2)
122{
123	return p1 > p2 ? p1 - p2 : 0;
124}
125
126static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
127{
128	int i;
129	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
130
131	for (i = 0; i < SFB_LEVELS; i++) {
132		u32 hash = sfbhash & SFB_BUCKET_MASK;
133
134		sfbhash >>= SFB_BUCKET_SHIFT;
135		if (b[hash].qlen < 0xFFFF)
136			b[hash].qlen++;
137		b += SFB_NUMBUCKETS; /* next level */
138	}
139}
140
141static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
142{
143	u32 sfbhash;
144
145	sfbhash = sfb_hash(skb, 0);
146	if (sfbhash)
147		increment_one_qlen(sfbhash, 0, q);
148
149	sfbhash = sfb_hash(skb, 1);
150	if (sfbhash)
151		increment_one_qlen(sfbhash, 1, q);
152}
153
154static void decrement_one_qlen(u32 sfbhash, u32 slot,
155			       struct sfb_sched_data *q)
156{
157	int i;
158	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
159
160	for (i = 0; i < SFB_LEVELS; i++) {
161		u32 hash = sfbhash & SFB_BUCKET_MASK;
162
163		sfbhash >>= SFB_BUCKET_SHIFT;
164		if (b[hash].qlen > 0)
165			b[hash].qlen--;
166		b += SFB_NUMBUCKETS; /* next level */
167	}
168}
169
170static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
171{
172	u32 sfbhash;
173
174	sfbhash = sfb_hash(skb, 0);
175	if (sfbhash)
176		decrement_one_qlen(sfbhash, 0, q);
177
178	sfbhash = sfb_hash(skb, 1);
179	if (sfbhash)
180		decrement_one_qlen(sfbhash, 1, q);
181}
182
183static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
184{
185	b->p_mark = prob_minus(b->p_mark, q->decrement);
186}
187
188static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
189{
190	b->p_mark = prob_plus(b->p_mark, q->increment);
191}
192
193static void sfb_zero_all_buckets(struct sfb_sched_data *q)
194{
195	memset(&q->bins, 0, sizeof(q->bins));
196}
197
198/*
199 * compute max qlen, max p_mark, and avg p_mark
200 */
201static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
202{
203	int i;
204	u32 qlen = 0, prob = 0, totalpm = 0;
205	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
206
207	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
208		if (qlen < b->qlen)
209			qlen = b->qlen;
210		totalpm += b->p_mark;
211		if (prob < b->p_mark)
212			prob = b->p_mark;
213		b++;
214	}
215	*prob_r = prob;
216	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
217	return qlen;
218}
219
220
221static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
222{
223	q->bins[slot].perturbation = net_random();
 
224}
225
226static void sfb_swap_slot(struct sfb_sched_data *q)
227{
228	sfb_init_perturbation(q->slot, q);
229	q->slot ^= 1;
230	q->double_buffering = false;
231}
232
233/* Non elastic flows are allowed to use part of the bandwidth, expressed
234 * in "penalty_rate" packets per second, with "penalty_burst" burst
235 */
236static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
237{
238	if (q->penalty_rate == 0 || q->penalty_burst == 0)
239		return true;
240
241	if (q->tokens_avail < 1) {
242		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
243
244		q->tokens_avail = (age * q->penalty_rate) / HZ;
245		if (q->tokens_avail > q->penalty_burst)
246			q->tokens_avail = q->penalty_burst;
247		q->token_time = jiffies;
248		if (q->tokens_avail < 1)
249			return true;
250	}
251
252	q->tokens_avail--;
253	return false;
254}
255
256static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
257			 int *qerr, u32 *salt)
258{
259	struct tcf_result res;
260	int result;
261
262	result = tc_classify(skb, q->filter_list, &res);
263	if (result >= 0) {
264#ifdef CONFIG_NET_CLS_ACT
265		switch (result) {
266		case TC_ACT_STOLEN:
267		case TC_ACT_QUEUED:
 
268			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
269		case TC_ACT_SHOT:
270			return false;
271		}
272#endif
273		*salt = TC_H_MIN(res.classid);
274		return true;
275	}
276	return false;
277}
278
279static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
280{
281
282	struct sfb_sched_data *q = qdisc_priv(sch);
283	struct Qdisc *child = q->qdisc;
 
284	int i;
285	u32 p_min = ~0;
286	u32 minqlen = ~0;
287	u32 r, slot, salt, sfbhash;
 
288	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
289	struct flow_keys keys;
290
291	if (unlikely(sch->q.qlen >= q->limit)) {
292		sch->qstats.overlimits++;
293		q->stats.queuedrop++;
294		goto drop;
295	}
296
297	if (q->rehash_interval > 0) {
298		unsigned long limit = q->rehash_time + q->rehash_interval;
299
300		if (unlikely(time_after(jiffies, limit))) {
301			sfb_swap_slot(q);
302			q->rehash_time = jiffies;
303		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
304				    time_after(jiffies, limit - q->warmup_time))) {
305			q->double_buffering = true;
306		}
307	}
308
309	if (q->filter_list) {
 
 
 
310		/* If using external classifiers, get result and record it. */
311		if (!sfb_classify(skb, q, &ret, &salt))
312			goto other_drop;
313		keys.src = salt;
314		keys.dst = 0;
315		keys.ports = 0;
316	} else {
317		skb_flow_dissect(skb, &keys);
318	}
319
320	slot = q->slot;
321
322	sfbhash = jhash_3words((__force u32)keys.dst,
323			       (__force u32)keys.src,
324			       (__force u32)keys.ports,
325			       q->bins[slot].perturbation);
326	if (!sfbhash)
327		sfbhash = 1;
328	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
329
330	for (i = 0; i < SFB_LEVELS; i++) {
331		u32 hash = sfbhash & SFB_BUCKET_MASK;
332		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
333
334		sfbhash >>= SFB_BUCKET_SHIFT;
335		if (b->qlen == 0)
336			decrement_prob(b, q);
337		else if (b->qlen >= q->bin_size)
338			increment_prob(b, q);
339		if (minqlen > b->qlen)
340			minqlen = b->qlen;
341		if (p_min > b->p_mark)
342			p_min = b->p_mark;
343	}
344
345	slot ^= 1;
346	sfb_skb_cb(skb)->hashes[slot] = 0;
347
348	if (unlikely(minqlen >= q->max)) {
349		sch->qstats.overlimits++;
350		q->stats.bucketdrop++;
351		goto drop;
352	}
353
354	if (unlikely(p_min >= SFB_MAX_PROB)) {
355		/* Inelastic flow */
356		if (q->double_buffering) {
357			sfbhash = jhash_3words((__force u32)keys.dst,
358					       (__force u32)keys.src,
359					       (__force u32)keys.ports,
360					       q->bins[slot].perturbation);
361			if (!sfbhash)
362				sfbhash = 1;
363			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
364
365			for (i = 0; i < SFB_LEVELS; i++) {
366				u32 hash = sfbhash & SFB_BUCKET_MASK;
367				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
368
369				sfbhash >>= SFB_BUCKET_SHIFT;
370				if (b->qlen == 0)
371					decrement_prob(b, q);
372				else if (b->qlen >= q->bin_size)
373					increment_prob(b, q);
374			}
375		}
376		if (sfb_rate_limit(skb, q)) {
377			sch->qstats.overlimits++;
378			q->stats.penaltydrop++;
379			goto drop;
380		}
381		goto enqueue;
382	}
383
384	r = net_random() & SFB_MAX_PROB;
385
386	if (unlikely(r < p_min)) {
387		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
388			/* If we're marking that many packets, then either
389			 * this flow is unresponsive, or we're badly congested.
390			 * In either case, we want to start dropping packets.
391			 */
392			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
393				q->stats.earlydrop++;
394				goto drop;
395			}
396		}
397		if (INET_ECN_set_ce(skb)) {
398			q->stats.marked++;
399		} else {
400			q->stats.earlydrop++;
401			goto drop;
402		}
403	}
404
405enqueue:
406	ret = qdisc_enqueue(skb, child);
407	if (likely(ret == NET_XMIT_SUCCESS)) {
 
408		sch->q.qlen++;
409		increment_qlen(skb, q);
410	} else if (net_xmit_drop_count(ret)) {
411		q->stats.childdrop++;
412		sch->qstats.drops++;
413	}
414	return ret;
415
416drop:
417	qdisc_drop(skb, sch);
418	return NET_XMIT_CN;
419other_drop:
420	if (ret & __NET_XMIT_BYPASS)
421		sch->qstats.drops++;
422	kfree_skb(skb);
423	return ret;
424}
425
426static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
427{
428	struct sfb_sched_data *q = qdisc_priv(sch);
429	struct Qdisc *child = q->qdisc;
430	struct sk_buff *skb;
431
432	skb = child->dequeue(q->qdisc);
433
434	if (skb) {
435		qdisc_bstats_update(sch, skb);
 
436		sch->q.qlen--;
437		decrement_qlen(skb, q);
438	}
439
440	return skb;
441}
442
443static struct sk_buff *sfb_peek(struct Qdisc *sch)
444{
445	struct sfb_sched_data *q = qdisc_priv(sch);
446	struct Qdisc *child = q->qdisc;
447
448	return child->ops->peek(child);
449}
450
451/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
452
453static void sfb_reset(struct Qdisc *sch)
454{
455	struct sfb_sched_data *q = qdisc_priv(sch);
456
457	qdisc_reset(q->qdisc);
 
458	sch->q.qlen = 0;
459	q->slot = 0;
460	q->double_buffering = false;
461	sfb_zero_all_buckets(q);
462	sfb_init_perturbation(0, q);
463}
464
465static void sfb_destroy(struct Qdisc *sch)
466{
467	struct sfb_sched_data *q = qdisc_priv(sch);
468
469	tcf_destroy_chain(&q->filter_list);
470	qdisc_destroy(q->qdisc);
471}
472
473static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
474	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
475};
476
477static const struct tc_sfb_qopt sfb_default_ops = {
478	.rehash_interval = 600 * MSEC_PER_SEC,
479	.warmup_time = 60 * MSEC_PER_SEC,
480	.limit = 0,
481	.max = 25,
482	.bin_size = 20,
483	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
484	.decrement = (SFB_MAX_PROB + 3000) / 6000,
485	.penalty_rate = 10,
486	.penalty_burst = 20,
487};
488
489static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
 
490{
491	struct sfb_sched_data *q = qdisc_priv(sch);
492	struct Qdisc *child;
493	struct nlattr *tb[TCA_SFB_MAX + 1];
494	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
495	u32 limit;
496	int err;
497
498	if (opt) {
499		err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
 
500		if (err < 0)
501			return -EINVAL;
502
503		if (tb[TCA_SFB_PARMS] == NULL)
504			return -EINVAL;
505
506		ctl = nla_data(tb[TCA_SFB_PARMS]);
507	}
508
509	limit = ctl->limit;
510	if (limit == 0)
511		limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
512
513	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
514	if (IS_ERR(child))
515		return PTR_ERR(child);
516
 
 
517	sch_tree_lock(sch);
518
519	qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
520	qdisc_destroy(q->qdisc);
521	q->qdisc = child;
522
523	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
524	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
525	q->rehash_time = jiffies;
526	q->limit = limit;
527	q->increment = ctl->increment;
528	q->decrement = ctl->decrement;
529	q->max = ctl->max;
530	q->bin_size = ctl->bin_size;
531	q->penalty_rate = ctl->penalty_rate;
532	q->penalty_burst = ctl->penalty_burst;
533	q->tokens_avail = ctl->penalty_burst;
534	q->token_time = jiffies;
535
536	q->slot = 0;
537	q->double_buffering = false;
538	sfb_zero_all_buckets(q);
539	sfb_init_perturbation(0, q);
540	sfb_init_perturbation(1, q);
541
542	sch_tree_unlock(sch);
 
543
544	return 0;
545}
546
547static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
 
548{
549	struct sfb_sched_data *q = qdisc_priv(sch);
 
 
 
 
 
550
551	q->qdisc = &noop_qdisc;
552	return sfb_change(sch, opt);
553}
554
555static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
556{
557	struct sfb_sched_data *q = qdisc_priv(sch);
558	struct nlattr *opts;
559	struct tc_sfb_qopt opt = {
560		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
561		.warmup_time = jiffies_to_msecs(q->warmup_time),
562		.limit = q->limit,
563		.max = q->max,
564		.bin_size = q->bin_size,
565		.increment = q->increment,
566		.decrement = q->decrement,
567		.penalty_rate = q->penalty_rate,
568		.penalty_burst = q->penalty_burst,
569	};
570
571	sch->qstats.backlog = q->qdisc->qstats.backlog;
572	opts = nla_nest_start(skb, TCA_OPTIONS);
573	if (opts == NULL)
574		goto nla_put_failure;
575	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
576		goto nla_put_failure;
577	return nla_nest_end(skb, opts);
578
579nla_put_failure:
580	nla_nest_cancel(skb, opts);
581	return -EMSGSIZE;
582}
583
584static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
585{
586	struct sfb_sched_data *q = qdisc_priv(sch);
587	struct tc_sfb_xstats st = {
588		.earlydrop = q->stats.earlydrop,
589		.penaltydrop = q->stats.penaltydrop,
590		.bucketdrop = q->stats.bucketdrop,
591		.queuedrop = q->stats.queuedrop,
592		.childdrop = q->stats.childdrop,
593		.marked = q->stats.marked,
594	};
595
596	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
597
598	return gnet_stats_copy_app(d, &st, sizeof(st));
599}
600
601static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
602			  struct sk_buff *skb, struct tcmsg *tcm)
603{
604	return -ENOSYS;
605}
606
607static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
608		     struct Qdisc **old)
609{
610	struct sfb_sched_data *q = qdisc_priv(sch);
611
612	if (new == NULL)
613		new = &noop_qdisc;
614
615	sch_tree_lock(sch);
616	*old = q->qdisc;
617	q->qdisc = new;
618	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
619	qdisc_reset(*old);
620	sch_tree_unlock(sch);
621	return 0;
622}
623
624static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
625{
626	struct sfb_sched_data *q = qdisc_priv(sch);
627
628	return q->qdisc;
629}
630
631static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
632{
633	return 1;
634}
635
636static void sfb_put(struct Qdisc *sch, unsigned long arg)
637{
638}
639
640static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
641			    struct nlattr **tca, unsigned long *arg)
 
642{
643	return -ENOSYS;
644}
645
646static int sfb_delete(struct Qdisc *sch, unsigned long cl)
 
647{
648	return -ENOSYS;
649}
650
651static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
652{
653	if (!walker->stop) {
654		if (walker->count >= walker->skip)
655			if (walker->fn(sch, 1, walker) < 0) {
656				walker->stop = 1;
657				return;
658			}
659		walker->count++;
660	}
661}
662
663static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
 
664{
665	struct sfb_sched_data *q = qdisc_priv(sch);
666
667	if (cl)
668		return NULL;
669	return &q->filter_list;
670}
671
672static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
673			      u32 classid)
674{
675	return 0;
676}
677
678
679static const struct Qdisc_class_ops sfb_class_ops = {
680	.graft		=	sfb_graft,
681	.leaf		=	sfb_leaf,
682	.get		=	sfb_get,
683	.put		=	sfb_put,
684	.change		=	sfb_change_class,
685	.delete		=	sfb_delete,
686	.walk		=	sfb_walk,
687	.tcf_chain	=	sfb_find_tcf,
688	.bind_tcf	=	sfb_bind,
689	.unbind_tcf	=	sfb_put,
690	.dump		=	sfb_dump_class,
691};
692
693static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
694	.id		=	"sfb",
695	.priv_size	=	sizeof(struct sfb_sched_data),
696	.cl_ops		=	&sfb_class_ops,
697	.enqueue	=	sfb_enqueue,
698	.dequeue	=	sfb_dequeue,
699	.peek		=	sfb_peek,
700	.init		=	sfb_init,
701	.reset		=	sfb_reset,
702	.destroy	=	sfb_destroy,
703	.change		=	sfb_change,
704	.dump		=	sfb_dump,
705	.dump_stats	=	sfb_dump_stats,
706	.owner		=	THIS_MODULE,
707};
708
709static int __init sfb_module_init(void)
710{
711	return register_qdisc(&sfb_qdisc_ops);
712}
713
714static void __exit sfb_module_exit(void)
715{
716	unregister_qdisc(&sfb_qdisc_ops);
717}
718
719module_init(sfb_module_init)
720module_exit(sfb_module_exit)
721
722MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
723MODULE_AUTHOR("Juliusz Chroboczek");
724MODULE_AUTHOR("Eric Dumazet");
725MODULE_LICENSE("GPL");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/sched/sch_sfb.c	  Stochastic Fair Blue
  4 *
  5 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
  6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
  7 *
 
 
 
 
  8 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
  9 * A New Class of Active Queue Management Algorithms.
 10 * U. Michigan CSE-TR-387-99, April 1999.
 11 *
 12 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
 
 13 */
 14
 15#include <linux/module.h>
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/skbuff.h>
 20#include <linux/random.h>
 21#include <linux/siphash.h>
 22#include <net/ip.h>
 23#include <net/pkt_sched.h>
 24#include <net/pkt_cls.h>
 25#include <net/inet_ecn.h>
 
 26
 27/*
 28 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
 29 * This implementation uses L = 8 and N = 16
 30 * This permits us to split one 32bit hash (provided per packet by rxhash or
 31 * external classifier) into 8 subhashes of 4 bits.
 32 */
 33#define SFB_BUCKET_SHIFT 4
 34#define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
 35#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
 36#define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
 37
 38/* SFB algo uses a virtual queue, named "bin" */
 39struct sfb_bucket {
 40	u16		qlen; /* length of virtual queue */
 41	u16		p_mark; /* marking probability */
 42};
 43
 44/* We use a double buffering right before hash change
 45 * (Section 4.4 of SFB reference : moving hash functions)
 46 */
 47struct sfb_bins {
 48	siphash_key_t	  perturbation; /* siphash key */
 49	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
 50};
 51
 52struct sfb_sched_data {
 53	struct Qdisc	*qdisc;
 54	struct tcf_proto __rcu *filter_list;
 55	struct tcf_block *block;
 56	unsigned long	rehash_interval;
 57	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
 58	u32		max;
 59	u32		bin_size;	/* maximum queue length per bin */
 60	u32		increment;	/* d1 */
 61	u32		decrement;	/* d2 */
 62	u32		limit;		/* HARD maximal queue length */
 63	u32		penalty_rate;
 64	u32		penalty_burst;
 65	u32		tokens_avail;
 66	unsigned long	rehash_time;
 67	unsigned long	token_time;
 68
 69	u8		slot;		/* current active bins (0 or 1) */
 70	bool		double_buffering;
 71	struct sfb_bins bins[2];
 72
 73	struct {
 74		u32	earlydrop;
 75		u32	penaltydrop;
 76		u32	bucketdrop;
 77		u32	queuedrop;
 78		u32	childdrop;	/* drops in child qdisc */
 79		u32	marked;		/* ECN mark */
 80	} stats;
 81};
 82
 83/*
 84 * Each queued skb might be hashed on one or two bins
 85 * We store in skb_cb the two hash values.
 86 * (A zero value means double buffering was not used)
 87 */
 88struct sfb_skb_cb {
 89	u32 hashes[2];
 90};
 91
 92static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
 93{
 94	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
 95	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
 96}
 97
 98/*
 99 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
100 * If using external classifier, hash comes from the classid.
101 */
102static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
103{
104	return sfb_skb_cb(skb)->hashes[slot];
105}
106
107/* Probabilities are coded as Q0.16 fixed-point values,
108 * with 0xFFFF representing 65535/65536 (almost 1.0)
109 * Addition and subtraction are saturating in [0, 65535]
110 */
111static u32 prob_plus(u32 p1, u32 p2)
112{
113	u32 res = p1 + p2;
114
115	return min_t(u32, res, SFB_MAX_PROB);
116}
117
118static u32 prob_minus(u32 p1, u32 p2)
119{
120	return p1 > p2 ? p1 - p2 : 0;
121}
122
123static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
124{
125	int i;
126	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
127
128	for (i = 0; i < SFB_LEVELS; i++) {
129		u32 hash = sfbhash & SFB_BUCKET_MASK;
130
131		sfbhash >>= SFB_BUCKET_SHIFT;
132		if (b[hash].qlen < 0xFFFF)
133			b[hash].qlen++;
134		b += SFB_NUMBUCKETS; /* next level */
135	}
136}
137
138static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
139{
140	u32 sfbhash;
141
142	sfbhash = sfb_hash(skb, 0);
143	if (sfbhash)
144		increment_one_qlen(sfbhash, 0, q);
145
146	sfbhash = sfb_hash(skb, 1);
147	if (sfbhash)
148		increment_one_qlen(sfbhash, 1, q);
149}
150
151static void decrement_one_qlen(u32 sfbhash, u32 slot,
152			       struct sfb_sched_data *q)
153{
154	int i;
155	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
156
157	for (i = 0; i < SFB_LEVELS; i++) {
158		u32 hash = sfbhash & SFB_BUCKET_MASK;
159
160		sfbhash >>= SFB_BUCKET_SHIFT;
161		if (b[hash].qlen > 0)
162			b[hash].qlen--;
163		b += SFB_NUMBUCKETS; /* next level */
164	}
165}
166
167static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
168{
169	u32 sfbhash;
170
171	sfbhash = sfb_hash(skb, 0);
172	if (sfbhash)
173		decrement_one_qlen(sfbhash, 0, q);
174
175	sfbhash = sfb_hash(skb, 1);
176	if (sfbhash)
177		decrement_one_qlen(sfbhash, 1, q);
178}
179
180static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
181{
182	b->p_mark = prob_minus(b->p_mark, q->decrement);
183}
184
185static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
186{
187	b->p_mark = prob_plus(b->p_mark, q->increment);
188}
189
190static void sfb_zero_all_buckets(struct sfb_sched_data *q)
191{
192	memset(&q->bins, 0, sizeof(q->bins));
193}
194
195/*
196 * compute max qlen, max p_mark, and avg p_mark
197 */
198static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
199{
200	int i;
201	u32 qlen = 0, prob = 0, totalpm = 0;
202	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
203
204	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
205		if (qlen < b->qlen)
206			qlen = b->qlen;
207		totalpm += b->p_mark;
208		if (prob < b->p_mark)
209			prob = b->p_mark;
210		b++;
211	}
212	*prob_r = prob;
213	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
214	return qlen;
215}
216
217
218static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
219{
220	get_random_bytes(&q->bins[slot].perturbation,
221			 sizeof(q->bins[slot].perturbation));
222}
223
224static void sfb_swap_slot(struct sfb_sched_data *q)
225{
226	sfb_init_perturbation(q->slot, q);
227	q->slot ^= 1;
228	q->double_buffering = false;
229}
230
231/* Non elastic flows are allowed to use part of the bandwidth, expressed
232 * in "penalty_rate" packets per second, with "penalty_burst" burst
233 */
234static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
235{
236	if (q->penalty_rate == 0 || q->penalty_burst == 0)
237		return true;
238
239	if (q->tokens_avail < 1) {
240		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
241
242		q->tokens_avail = (age * q->penalty_rate) / HZ;
243		if (q->tokens_avail > q->penalty_burst)
244			q->tokens_avail = q->penalty_burst;
245		q->token_time = jiffies;
246		if (q->tokens_avail < 1)
247			return true;
248	}
249
250	q->tokens_avail--;
251	return false;
252}
253
254static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
255			 int *qerr, u32 *salt)
256{
257	struct tcf_result res;
258	int result;
259
260	result = tcf_classify(skb, fl, &res, false);
261	if (result >= 0) {
262#ifdef CONFIG_NET_CLS_ACT
263		switch (result) {
264		case TC_ACT_STOLEN:
265		case TC_ACT_QUEUED:
266		case TC_ACT_TRAP:
267			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
268			fallthrough;
269		case TC_ACT_SHOT:
270			return false;
271		}
272#endif
273		*salt = TC_H_MIN(res.classid);
274		return true;
275	}
276	return false;
277}
278
279static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
280		       struct sk_buff **to_free)
281{
282
283	struct sfb_sched_data *q = qdisc_priv(sch);
284	struct Qdisc *child = q->qdisc;
285	struct tcf_proto *fl;
286	int i;
287	u32 p_min = ~0;
288	u32 minqlen = ~0;
289	u32 r, sfbhash;
290	u32 slot = q->slot;
291	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 
292
293	if (unlikely(sch->q.qlen >= q->limit)) {
294		qdisc_qstats_overlimit(sch);
295		q->stats.queuedrop++;
296		goto drop;
297	}
298
299	if (q->rehash_interval > 0) {
300		unsigned long limit = q->rehash_time + q->rehash_interval;
301
302		if (unlikely(time_after(jiffies, limit))) {
303			sfb_swap_slot(q);
304			q->rehash_time = jiffies;
305		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
306				    time_after(jiffies, limit - q->warmup_time))) {
307			q->double_buffering = true;
308		}
309	}
310
311	fl = rcu_dereference_bh(q->filter_list);
312	if (fl) {
313		u32 salt;
314
315		/* If using external classifiers, get result and record it. */
316		if (!sfb_classify(skb, fl, &ret, &salt))
317			goto other_drop;
318		sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
 
 
319	} else {
320		sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
321	}
322
 
323
 
 
 
 
324	if (!sfbhash)
325		sfbhash = 1;
326	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
327
328	for (i = 0; i < SFB_LEVELS; i++) {
329		u32 hash = sfbhash & SFB_BUCKET_MASK;
330		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
331
332		sfbhash >>= SFB_BUCKET_SHIFT;
333		if (b->qlen == 0)
334			decrement_prob(b, q);
335		else if (b->qlen >= q->bin_size)
336			increment_prob(b, q);
337		if (minqlen > b->qlen)
338			minqlen = b->qlen;
339		if (p_min > b->p_mark)
340			p_min = b->p_mark;
341	}
342
343	slot ^= 1;
344	sfb_skb_cb(skb)->hashes[slot] = 0;
345
346	if (unlikely(minqlen >= q->max)) {
347		qdisc_qstats_overlimit(sch);
348		q->stats.bucketdrop++;
349		goto drop;
350	}
351
352	if (unlikely(p_min >= SFB_MAX_PROB)) {
353		/* Inelastic flow */
354		if (q->double_buffering) {
355			sfbhash = skb_get_hash_perturb(skb,
356			    &q->bins[slot].perturbation);
 
 
357			if (!sfbhash)
358				sfbhash = 1;
359			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
360
361			for (i = 0; i < SFB_LEVELS; i++) {
362				u32 hash = sfbhash & SFB_BUCKET_MASK;
363				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
364
365				sfbhash >>= SFB_BUCKET_SHIFT;
366				if (b->qlen == 0)
367					decrement_prob(b, q);
368				else if (b->qlen >= q->bin_size)
369					increment_prob(b, q);
370			}
371		}
372		if (sfb_rate_limit(skb, q)) {
373			qdisc_qstats_overlimit(sch);
374			q->stats.penaltydrop++;
375			goto drop;
376		}
377		goto enqueue;
378	}
379
380	r = prandom_u32() & SFB_MAX_PROB;
381
382	if (unlikely(r < p_min)) {
383		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
384			/* If we're marking that many packets, then either
385			 * this flow is unresponsive, or we're badly congested.
386			 * In either case, we want to start dropping packets.
387			 */
388			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
389				q->stats.earlydrop++;
390				goto drop;
391			}
392		}
393		if (INET_ECN_set_ce(skb)) {
394			q->stats.marked++;
395		} else {
396			q->stats.earlydrop++;
397			goto drop;
398		}
399	}
400
401enqueue:
402	ret = qdisc_enqueue(skb, child, to_free);
403	if (likely(ret == NET_XMIT_SUCCESS)) {
404		qdisc_qstats_backlog_inc(sch, skb);
405		sch->q.qlen++;
406		increment_qlen(skb, q);
407	} else if (net_xmit_drop_count(ret)) {
408		q->stats.childdrop++;
409		qdisc_qstats_drop(sch);
410	}
411	return ret;
412
413drop:
414	qdisc_drop(skb, sch, to_free);
415	return NET_XMIT_CN;
416other_drop:
417	if (ret & __NET_XMIT_BYPASS)
418		qdisc_qstats_drop(sch);
419	kfree_skb(skb);
420	return ret;
421}
422
423static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
424{
425	struct sfb_sched_data *q = qdisc_priv(sch);
426	struct Qdisc *child = q->qdisc;
427	struct sk_buff *skb;
428
429	skb = child->dequeue(q->qdisc);
430
431	if (skb) {
432		qdisc_bstats_update(sch, skb);
433		qdisc_qstats_backlog_dec(sch, skb);
434		sch->q.qlen--;
435		decrement_qlen(skb, q);
436	}
437
438	return skb;
439}
440
441static struct sk_buff *sfb_peek(struct Qdisc *sch)
442{
443	struct sfb_sched_data *q = qdisc_priv(sch);
444	struct Qdisc *child = q->qdisc;
445
446	return child->ops->peek(child);
447}
448
449/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
450
451static void sfb_reset(struct Qdisc *sch)
452{
453	struct sfb_sched_data *q = qdisc_priv(sch);
454
455	qdisc_reset(q->qdisc);
456	sch->qstats.backlog = 0;
457	sch->q.qlen = 0;
458	q->slot = 0;
459	q->double_buffering = false;
460	sfb_zero_all_buckets(q);
461	sfb_init_perturbation(0, q);
462}
463
464static void sfb_destroy(struct Qdisc *sch)
465{
466	struct sfb_sched_data *q = qdisc_priv(sch);
467
468	tcf_block_put(q->block);
469	qdisc_put(q->qdisc);
470}
471
472static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
473	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
474};
475
476static const struct tc_sfb_qopt sfb_default_ops = {
477	.rehash_interval = 600 * MSEC_PER_SEC,
478	.warmup_time = 60 * MSEC_PER_SEC,
479	.limit = 0,
480	.max = 25,
481	.bin_size = 20,
482	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
483	.decrement = (SFB_MAX_PROB + 3000) / 6000,
484	.penalty_rate = 10,
485	.penalty_burst = 20,
486};
487
488static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
489		      struct netlink_ext_ack *extack)
490{
491	struct sfb_sched_data *q = qdisc_priv(sch);
492	struct Qdisc *child, *old;
493	struct nlattr *tb[TCA_SFB_MAX + 1];
494	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
495	u32 limit;
496	int err;
497
498	if (opt) {
499		err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
500						  sfb_policy, NULL);
501		if (err < 0)
502			return -EINVAL;
503
504		if (tb[TCA_SFB_PARMS] == NULL)
505			return -EINVAL;
506
507		ctl = nla_data(tb[TCA_SFB_PARMS]);
508	}
509
510	limit = ctl->limit;
511	if (limit == 0)
512		limit = qdisc_dev(sch)->tx_queue_len;
513
514	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
515	if (IS_ERR(child))
516		return PTR_ERR(child);
517
518	if (child != &noop_qdisc)
519		qdisc_hash_add(child, true);
520	sch_tree_lock(sch);
521
522	qdisc_purge_queue(q->qdisc);
523	old = q->qdisc;
524	q->qdisc = child;
525
526	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
527	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
528	q->rehash_time = jiffies;
529	q->limit = limit;
530	q->increment = ctl->increment;
531	q->decrement = ctl->decrement;
532	q->max = ctl->max;
533	q->bin_size = ctl->bin_size;
534	q->penalty_rate = ctl->penalty_rate;
535	q->penalty_burst = ctl->penalty_burst;
536	q->tokens_avail = ctl->penalty_burst;
537	q->token_time = jiffies;
538
539	q->slot = 0;
540	q->double_buffering = false;
541	sfb_zero_all_buckets(q);
542	sfb_init_perturbation(0, q);
543	sfb_init_perturbation(1, q);
544
545	sch_tree_unlock(sch);
546	qdisc_put(old);
547
548	return 0;
549}
550
551static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
552		    struct netlink_ext_ack *extack)
553{
554	struct sfb_sched_data *q = qdisc_priv(sch);
555	int err;
556
557	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
558	if (err)
559		return err;
560
561	q->qdisc = &noop_qdisc;
562	return sfb_change(sch, opt, extack);
563}
564
565static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
566{
567	struct sfb_sched_data *q = qdisc_priv(sch);
568	struct nlattr *opts;
569	struct tc_sfb_qopt opt = {
570		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
571		.warmup_time = jiffies_to_msecs(q->warmup_time),
572		.limit = q->limit,
573		.max = q->max,
574		.bin_size = q->bin_size,
575		.increment = q->increment,
576		.decrement = q->decrement,
577		.penalty_rate = q->penalty_rate,
578		.penalty_burst = q->penalty_burst,
579	};
580
581	sch->qstats.backlog = q->qdisc->qstats.backlog;
582	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
583	if (opts == NULL)
584		goto nla_put_failure;
585	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
586		goto nla_put_failure;
587	return nla_nest_end(skb, opts);
588
589nla_put_failure:
590	nla_nest_cancel(skb, opts);
591	return -EMSGSIZE;
592}
593
594static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
595{
596	struct sfb_sched_data *q = qdisc_priv(sch);
597	struct tc_sfb_xstats st = {
598		.earlydrop = q->stats.earlydrop,
599		.penaltydrop = q->stats.penaltydrop,
600		.bucketdrop = q->stats.bucketdrop,
601		.queuedrop = q->stats.queuedrop,
602		.childdrop = q->stats.childdrop,
603		.marked = q->stats.marked,
604	};
605
606	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
607
608	return gnet_stats_copy_app(d, &st, sizeof(st));
609}
610
611static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
612			  struct sk_buff *skb, struct tcmsg *tcm)
613{
614	return -ENOSYS;
615}
616
617static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
618		     struct Qdisc **old, struct netlink_ext_ack *extack)
619{
620	struct sfb_sched_data *q = qdisc_priv(sch);
621
622	if (new == NULL)
623		new = &noop_qdisc;
624
625	*old = qdisc_replace(sch, new, &q->qdisc);
 
 
 
 
 
626	return 0;
627}
628
629static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
630{
631	struct sfb_sched_data *q = qdisc_priv(sch);
632
633	return q->qdisc;
634}
635
636static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
637{
638	return 1;
639}
640
641static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
642{
643}
644
645static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
646			    struct nlattr **tca, unsigned long *arg,
647			    struct netlink_ext_ack *extack)
648{
649	return -ENOSYS;
650}
651
652static int sfb_delete(struct Qdisc *sch, unsigned long cl,
653		      struct netlink_ext_ack *extack)
654{
655	return -ENOSYS;
656}
657
658static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
659{
660	if (!walker->stop) {
661		if (walker->count >= walker->skip)
662			if (walker->fn(sch, 1, walker) < 0) {
663				walker->stop = 1;
664				return;
665			}
666		walker->count++;
667	}
668}
669
670static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
671				       struct netlink_ext_ack *extack)
672{
673	struct sfb_sched_data *q = qdisc_priv(sch);
674
675	if (cl)
676		return NULL;
677	return q->block;
678}
679
680static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
681			      u32 classid)
682{
683	return 0;
684}
685
686
687static const struct Qdisc_class_ops sfb_class_ops = {
688	.graft		=	sfb_graft,
689	.leaf		=	sfb_leaf,
690	.find		=	sfb_find,
 
691	.change		=	sfb_change_class,
692	.delete		=	sfb_delete,
693	.walk		=	sfb_walk,
694	.tcf_block	=	sfb_tcf_block,
695	.bind_tcf	=	sfb_bind,
696	.unbind_tcf	=	sfb_unbind,
697	.dump		=	sfb_dump_class,
698};
699
700static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
701	.id		=	"sfb",
702	.priv_size	=	sizeof(struct sfb_sched_data),
703	.cl_ops		=	&sfb_class_ops,
704	.enqueue	=	sfb_enqueue,
705	.dequeue	=	sfb_dequeue,
706	.peek		=	sfb_peek,
707	.init		=	sfb_init,
708	.reset		=	sfb_reset,
709	.destroy	=	sfb_destroy,
710	.change		=	sfb_change,
711	.dump		=	sfb_dump,
712	.dump_stats	=	sfb_dump_stats,
713	.owner		=	THIS_MODULE,
714};
715
716static int __init sfb_module_init(void)
717{
718	return register_qdisc(&sfb_qdisc_ops);
719}
720
721static void __exit sfb_module_exit(void)
722{
723	unregister_qdisc(&sfb_qdisc_ops);
724}
725
726module_init(sfb_module_init)
727module_exit(sfb_module_exit)
728
729MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
730MODULE_AUTHOR("Juliusz Chroboczek");
731MODULE_AUTHOR("Eric Dumazet");
732MODULE_LICENSE("GPL");