Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/types.h>
 14#include <linux/kernel.h>
 15#include <linux/jiffies.h>
 16#include <linux/string.h>
 17#include <linux/in.h>
 18#include <linux/errno.h>
 19#include <linux/init.h>
 20#include <linux/skbuff.h>
 21#include <linux/jhash.h>
 22#include <linux/slab.h>
 23#include <linux/vmalloc.h>
 24#include <net/netlink.h>
 25#include <net/pkt_sched.h>
 26#include <net/flow_keys.h>
 27#include <net/red.h>
 28
 29
 30/*	Stochastic Fairness Queuing algorithm.
 31	=======================================
 32
 33	Source:
 34	Paul E. McKenney "Stochastic Fairness Queuing",
 35	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
 36
 37	Paul E. McKenney "Stochastic Fairness Queuing",
 38	"Interworking: Research and Experience", v.2, 1991, p.113-131.
 39
 40
 41	See also:
 42	M. Shreedhar and George Varghese "Efficient Fair
 43	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
 44
 45
 46	This is not the thing that is usually called (W)FQ nowadays.
 47	It does not use any timestamp mechanism, but instead
 48	processes queues in round-robin order.
 49
 50	ADVANTAGE:
 51
 52	- It is very cheap. Both CPU and memory requirements are minimal.
 53
 54	DRAWBACKS:
 55
 56	- "Stochastic" -> It is not 100% fair.
 57	When hash collisions occur, several flows are considered as one.
 58
 59	- "Round-robin" -> It introduces larger delays than virtual clock
 60	based schemes, and should not be used for isolating interactive
 61	traffic	from non-interactive. It means, that this scheduler
 62	should be used as leaf of CBQ or P3, which put interactive traffic
 63	to higher priority band.
 64
 65	We still need true WFQ for top level CSZ, but using WFQ
 66	for the best effort traffic is absolutely pointless:
 67	SFQ is superior for this purpose.
 68
 69	IMPLEMENTATION:
 70	This implementation limits :
 71	- maximal queue length per flow to 127 packets.
 72	- max mtu to 2^18-1;
 73	- max 65408 flows,
 74	- number of hash buckets to 65536.
 75
 76	It is easy to increase these values, but not in flight.  */
 77
 78#define SFQ_MAX_DEPTH		127 /* max number of packets per flow */
 79#define SFQ_DEFAULT_FLOWS	128
 80#define SFQ_MAX_FLOWS		(0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
 81#define SFQ_EMPTY_SLOT		0xffff
 82#define SFQ_DEFAULT_HASH_DIVISOR 1024
 83
 84/* We use 16 bits to store allot, and want to handle packets up to 64K
 85 * Scale allot by 8 (1<<3) so that no overflow occurs.
 86 */
 87#define SFQ_ALLOT_SHIFT		3
 88#define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
 89
 90/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
 91typedef u16 sfq_index;
 92
 93/*
 94 * We dont use pointers to save space.
 95 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
 96 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
 97 * are 'pointers' to dep[] array
 98 */
 99struct sfq_head {
100	sfq_index	next;
101	sfq_index	prev;
102};
103
104struct sfq_slot {
105	struct sk_buff	*skblist_next;
106	struct sk_buff	*skblist_prev;
107	sfq_index	qlen; /* number of skbs in skblist */
108	sfq_index	next; /* next slot in sfq RR chain */
109	struct sfq_head dep; /* anchor in dep[] chains */
110	unsigned short	hash; /* hash value (index in ht[]) */
111	short		allot; /* credit for this slot */
112
113	unsigned int    backlog;
114	struct red_vars vars;
115};
116
117struct sfq_sched_data {
118/* frequently used fields */
119	int		limit;		/* limit of total number of packets in this qdisc */
120	unsigned int	divisor;	/* number of slots in hash table */
121	u8		headdrop;
122	u8		maxdepth;	/* limit of packets per flow */
123
124	u32		perturbation;
125	u8		cur_depth;	/* depth of longest slot */
126	u8		flags;
127	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
128	struct tcf_proto *filter_list;
129	sfq_index	*ht;		/* Hash table ('divisor' slots) */
130	struct sfq_slot	*slots;		/* Flows table ('maxflows' entries) */
131
132	struct red_parms *red_parms;
133	struct tc_sfqred_stats stats;
134	struct sfq_slot *tail;		/* current slot in round */
135
136	struct sfq_head	dep[SFQ_MAX_DEPTH + 1];
137					/* Linked lists of slots, indexed by depth
138					 * dep[0] : list of unused flows
139					 * dep[1] : list of flows with 1 packet
140					 * dep[X] : list of flows with X packets
141					 */
142
143	unsigned int	maxflows;	/* number of flows in flows array */
144	int		perturb_period;
145	unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
146	struct timer_list perturb_timer;
 
147};
148
149/*
150 * sfq_head are either in a sfq_slot or in dep[] array
151 */
152static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
153{
154	if (val < SFQ_MAX_FLOWS)
155		return &q->slots[val].dep;
156	return &q->dep[val - SFQ_MAX_FLOWS];
157}
158
159/*
160 * In order to be able to quickly rehash our queue when timer changes
161 * q->perturbation, we store flow_keys in skb->cb[]
162 */
163struct sfq_skb_cb {
164       struct flow_keys        keys;
165};
166
167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
168{
169	qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
170	return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
171}
172
173static unsigned int sfq_hash(const struct sfq_sched_data *q,
174			     const struct sk_buff *skb)
175{
176	const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
177	unsigned int hash;
178
179	hash = jhash_3words((__force u32)keys->dst,
180			    (__force u32)keys->src ^ keys->ip_proto,
181			    (__force u32)keys->ports, q->perturbation);
182	return hash & (q->divisor - 1);
183}
184
185static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
186				 int *qerr)
187{
188	struct sfq_sched_data *q = qdisc_priv(sch);
189	struct tcf_result res;
 
190	int result;
191
192	if (TC_H_MAJ(skb->priority) == sch->handle &&
193	    TC_H_MIN(skb->priority) > 0 &&
194	    TC_H_MIN(skb->priority) <= q->divisor)
195		return TC_H_MIN(skb->priority);
196
197	if (!q->filter_list) {
198		skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
199		return sfq_hash(q, skb) + 1;
200	}
201
202	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
203	result = tc_classify(skb, q->filter_list, &res);
204	if (result >= 0) {
205#ifdef CONFIG_NET_CLS_ACT
206		switch (result) {
207		case TC_ACT_STOLEN:
208		case TC_ACT_QUEUED:
 
209			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
210		case TC_ACT_SHOT:
211			return 0;
212		}
213#endif
214		if (TC_H_MIN(res.classid) <= q->divisor)
215			return TC_H_MIN(res.classid);
216	}
217	return 0;
218}
219
220/*
221 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
222 */
223static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
224{
225	sfq_index p, n;
226	struct sfq_slot *slot = &q->slots[x];
227	int qlen = slot->qlen;
228
229	p = qlen + SFQ_MAX_FLOWS;
230	n = q->dep[qlen].next;
231
232	slot->dep.next = n;
233	slot->dep.prev = p;
234
235	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
236	sfq_dep_head(q, n)->prev = x;
237}
238
239#define sfq_unlink(q, x, n, p)			\
240	n = q->slots[x].dep.next;		\
241	p = q->slots[x].dep.prev;		\
242	sfq_dep_head(q, p)->next = n;		\
243	sfq_dep_head(q, n)->prev = p
 
 
244
245
246static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
247{
248	sfq_index p, n;
249	int d;
250
251	sfq_unlink(q, x, n, p);
252
253	d = q->slots[x].qlen--;
254	if (n == p && q->cur_depth == d)
255		q->cur_depth--;
256	sfq_link(q, x);
257}
258
259static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
260{
261	sfq_index p, n;
262	int d;
263
264	sfq_unlink(q, x, n, p);
265
266	d = ++q->slots[x].qlen;
267	if (q->cur_depth < d)
268		q->cur_depth = d;
269	sfq_link(q, x);
270}
271
272/* helper functions : might be changed when/if skb use a standard list_head */
273
274/* remove one skb from tail of slot queue */
275static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
276{
277	struct sk_buff *skb = slot->skblist_prev;
278
279	slot->skblist_prev = skb->prev;
280	skb->prev->next = (struct sk_buff *)slot;
281	skb->next = skb->prev = NULL;
282	return skb;
283}
284
285/* remove one skb from head of slot queue */
286static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
287{
288	struct sk_buff *skb = slot->skblist_next;
289
290	slot->skblist_next = skb->next;
291	skb->next->prev = (struct sk_buff *)slot;
292	skb->next = skb->prev = NULL;
293	return skb;
294}
295
296static inline void slot_queue_init(struct sfq_slot *slot)
297{
298	memset(slot, 0, sizeof(*slot));
299	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
300}
301
302/* add skb to slot queue (tail add) */
303static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
304{
305	skb->prev = slot->skblist_prev;
306	skb->next = (struct sk_buff *)slot;
307	slot->skblist_prev->next = skb;
308	slot->skblist_prev = skb;
309}
310
311#define	slot_queue_walk(slot, skb)		\
312	for (skb = slot->skblist_next;		\
313	     skb != (struct sk_buff *)slot;	\
314	     skb = skb->next)
315
316static unsigned int sfq_drop(struct Qdisc *sch)
317{
318	struct sfq_sched_data *q = qdisc_priv(sch);
319	sfq_index x, d = q->cur_depth;
320	struct sk_buff *skb;
321	unsigned int len;
322	struct sfq_slot *slot;
323
324	/* Queue is full! Find the longest slot and drop tail packet from it */
325	if (d > 1) {
326		x = q->dep[d].next;
327		slot = &q->slots[x];
328drop:
329		skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
330		len = qdisc_pkt_len(skb);
331		slot->backlog -= len;
332		sfq_dec(q, x);
333		kfree_skb(skb);
334		sch->q.qlen--;
335		sch->qstats.drops++;
336		sch->qstats.backlog -= len;
337		return len;
338	}
339
340	if (d == 1) {
341		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
342		x = q->tail->next;
343		slot = &q->slots[x];
344		q->tail->next = slot->next;
345		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
346		goto drop;
347	}
348
349	return 0;
350}
351
352/* Is ECN parameter configured */
353static int sfq_prob_mark(const struct sfq_sched_data *q)
354{
355	return q->flags & TC_RED_ECN;
356}
357
358/* Should packets over max threshold just be marked */
359static int sfq_hard_mark(const struct sfq_sched_data *q)
360{
361	return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
362}
363
364static int sfq_headdrop(const struct sfq_sched_data *q)
365{
366	return q->headdrop;
367}
368
369static int
370sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
371{
372	struct sfq_sched_data *q = qdisc_priv(sch);
373	unsigned int hash;
374	sfq_index x, qlen;
375	struct sfq_slot *slot;
376	int uninitialized_var(ret);
377	struct sk_buff *head;
378	int delta;
379
380	hash = sfq_classify(skb, sch, &ret);
381	if (hash == 0) {
382		if (ret & __NET_XMIT_BYPASS)
383			sch->qstats.drops++;
384		kfree_skb(skb);
385		return ret;
386	}
387	hash--;
388
389	x = q->ht[hash];
390	slot = &q->slots[x];
391	if (x == SFQ_EMPTY_SLOT) {
392		x = q->dep[0].next; /* get a free slot */
393		if (x >= SFQ_MAX_FLOWS)
394			return qdisc_drop(skb, sch);
395		q->ht[hash] = x;
396		slot = &q->slots[x];
397		slot->hash = hash;
398		slot->backlog = 0; /* should already be 0 anyway... */
399		red_set_vars(&slot->vars);
400		goto enqueue;
401	}
402	if (q->red_parms) {
403		slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
404							&slot->vars,
405							slot->backlog);
406		switch (red_action(q->red_parms,
407				   &slot->vars,
408				   slot->vars.qavg)) {
409		case RED_DONT_MARK:
410			break;
411
412		case RED_PROB_MARK:
413			sch->qstats.overlimits++;
414			if (sfq_prob_mark(q)) {
415				/* We know we have at least one packet in queue */
416				if (sfq_headdrop(q) &&
417				    INET_ECN_set_ce(slot->skblist_next)) {
418					q->stats.prob_mark_head++;
419					break;
420				}
421				if (INET_ECN_set_ce(skb)) {
422					q->stats.prob_mark++;
423					break;
424				}
425			}
426			q->stats.prob_drop++;
427			goto congestion_drop;
428
429		case RED_HARD_MARK:
430			sch->qstats.overlimits++;
431			if (sfq_hard_mark(q)) {
432				/* We know we have at least one packet in queue */
433				if (sfq_headdrop(q) &&
434				    INET_ECN_set_ce(slot->skblist_next)) {
435					q->stats.forced_mark_head++;
436					break;
437				}
438				if (INET_ECN_set_ce(skb)) {
439					q->stats.forced_mark++;
440					break;
441				}
442			}
443			q->stats.forced_drop++;
444			goto congestion_drop;
445		}
446	}
447
448	if (slot->qlen >= q->maxdepth) {
449congestion_drop:
450		if (!sfq_headdrop(q))
451			return qdisc_drop(skb, sch);
452
453		/* We know we have at least one packet in queue */
454		head = slot_dequeue_head(slot);
455		delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
456		sch->qstats.backlog -= delta;
457		slot->backlog -= delta;
458		qdisc_drop(head, sch);
459
460		slot_queue_add(slot, skb);
 
461		return NET_XMIT_CN;
462	}
463
464enqueue:
465	sch->qstats.backlog += qdisc_pkt_len(skb);
466	slot->backlog += qdisc_pkt_len(skb);
467	slot_queue_add(slot, skb);
468	sfq_inc(q, x);
469	if (slot->qlen == 1) {		/* The flow is new */
470		if (q->tail == NULL) {	/* It is the first flow */
471			slot->next = x;
472		} else {
473			slot->next = q->tail->next;
474			q->tail->next = x;
475		}
476		/* We put this flow at the end of our flow list.
477		 * This might sound unfair for a new flow to wait after old ones,
478		 * but we could endup servicing new flows only, and freeze old ones.
479		 */
480		q->tail = slot;
481		/* We could use a bigger initial quantum for new flows */
482		slot->allot = q->scaled_quantum;
483	}
484	if (++sch->q.qlen <= q->limit)
485		return NET_XMIT_SUCCESS;
486
487	qlen = slot->qlen;
488	sfq_drop(sch);
489	/* Return Congestion Notification only if we dropped a packet
490	 * from this flow.
491	 */
492	if (qlen != slot->qlen)
 
493		return NET_XMIT_CN;
 
494
495	/* As we dropped a packet, better let upper stack know this */
496	qdisc_tree_decrease_qlen(sch, 1);
497	return NET_XMIT_SUCCESS;
498}
499
500static struct sk_buff *
501sfq_dequeue(struct Qdisc *sch)
502{
503	struct sfq_sched_data *q = qdisc_priv(sch);
504	struct sk_buff *skb;
505	sfq_index a, next_a;
506	struct sfq_slot *slot;
507
508	/* No active slots */
509	if (q->tail == NULL)
510		return NULL;
511
512next_slot:
513	a = q->tail->next;
514	slot = &q->slots[a];
515	if (slot->allot <= 0) {
516		q->tail = slot;
517		slot->allot += q->scaled_quantum;
518		goto next_slot;
519	}
520	skb = slot_dequeue_head(slot);
521	sfq_dec(q, a);
522	qdisc_bstats_update(sch, skb);
523	sch->q.qlen--;
524	sch->qstats.backlog -= qdisc_pkt_len(skb);
525	slot->backlog -= qdisc_pkt_len(skb);
526	/* Is the slot empty? */
527	if (slot->qlen == 0) {
528		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
529		next_a = slot->next;
530		if (a == next_a) {
531			q->tail = NULL; /* no more active slots */
532			return skb;
533		}
534		q->tail->next = next_a;
535	} else {
536		slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
537	}
538	return skb;
539}
540
541static void
542sfq_reset(struct Qdisc *sch)
543{
544	struct sk_buff *skb;
545
546	while ((skb = sfq_dequeue(sch)) != NULL)
547		kfree_skb(skb);
548}
549
550/*
551 * When q->perturbation is changed, we rehash all queued skbs
552 * to avoid OOO (Out Of Order) effects.
553 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
554 * counters.
555 */
556static void sfq_rehash(struct Qdisc *sch)
557{
558	struct sfq_sched_data *q = qdisc_priv(sch);
559	struct sk_buff *skb;
560	int i;
561	struct sfq_slot *slot;
562	struct sk_buff_head list;
563	int dropped = 0;
 
564
565	__skb_queue_head_init(&list);
566
567	for (i = 0; i < q->maxflows; i++) {
568		slot = &q->slots[i];
569		if (!slot->qlen)
570			continue;
571		while (slot->qlen) {
572			skb = slot_dequeue_head(slot);
573			sfq_dec(q, i);
574			__skb_queue_tail(&list, skb);
575		}
576		slot->backlog = 0;
577		red_set_vars(&slot->vars);
578		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
579	}
580	q->tail = NULL;
581
582	while ((skb = __skb_dequeue(&list)) != NULL) {
583		unsigned int hash = sfq_hash(q, skb);
584		sfq_index x = q->ht[hash];
585
586		slot = &q->slots[x];
587		if (x == SFQ_EMPTY_SLOT) {
588			x = q->dep[0].next; /* get a free slot */
589			if (x >= SFQ_MAX_FLOWS) {
590drop:				sch->qstats.backlog -= qdisc_pkt_len(skb);
 
 
591				kfree_skb(skb);
592				dropped++;
593				continue;
594			}
595			q->ht[hash] = x;
596			slot = &q->slots[x];
597			slot->hash = hash;
598		}
599		if (slot->qlen >= q->maxdepth)
600			goto drop;
601		slot_queue_add(slot, skb);
602		if (q->red_parms)
603			slot->vars.qavg = red_calc_qavg(q->red_parms,
604							&slot->vars,
605							slot->backlog);
606		slot->backlog += qdisc_pkt_len(skb);
607		sfq_inc(q, x);
608		if (slot->qlen == 1) {		/* The flow is new */
609			if (q->tail == NULL) {	/* It is the first flow */
610				slot->next = x;
611			} else {
612				slot->next = q->tail->next;
613				q->tail->next = x;
614			}
615			q->tail = slot;
616			slot->allot = q->scaled_quantum;
617		}
618	}
619	sch->q.qlen -= dropped;
620	qdisc_tree_decrease_qlen(sch, dropped);
621}
622
623static void sfq_perturbation(unsigned long arg)
624{
625	struct Qdisc *sch = (struct Qdisc *)arg;
626	struct sfq_sched_data *q = qdisc_priv(sch);
627	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
 
 
628
 
 
 
629	spin_lock(root_lock);
630	q->perturbation = net_random();
631	if (!q->filter_list && q->tail)
632		sfq_rehash(sch);
633	spin_unlock(root_lock);
634
635	if (q->perturb_period)
636		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
 
 
 
 
 
637}
638
639static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
 
640{
641	struct sfq_sched_data *q = qdisc_priv(sch);
642	struct tc_sfq_qopt *ctl = nla_data(opt);
643	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
644	unsigned int qlen;
645	struct red_parms *p = NULL;
 
 
646
647	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
648		return -EINVAL;
649	if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
650		ctl_v1 = nla_data(opt);
651	if (ctl->divisor &&
652	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
653		return -EINVAL;
 
 
 
 
 
 
 
 
654	if (ctl_v1 && ctl_v1->qth_min) {
655		p = kmalloc(sizeof(*p), GFP_KERNEL);
656		if (!p)
657			return -ENOMEM;
658	}
 
 
 
 
659	sch_tree_lock(sch);
660	if (ctl->quantum) {
661		q->quantum = ctl->quantum;
662		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
663	}
664	q->perturb_period = ctl->perturb_period * HZ;
665	if (ctl->flows)
666		q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
667	if (ctl->divisor) {
668		q->divisor = ctl->divisor;
669		q->maxflows = min_t(u32, q->maxflows, q->divisor);
670	}
671	if (ctl_v1) {
672		if (ctl_v1->depth)
673			q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
674		if (p) {
675			swap(q->red_parms, p);
676			red_set_parms(q->red_parms,
677				      ctl_v1->qth_min, ctl_v1->qth_max,
678				      ctl_v1->Wlog,
679				      ctl_v1->Plog, ctl_v1->Scell_log,
680				      NULL,
681				      ctl_v1->max_P);
682		}
683		q->flags = ctl_v1->flags;
684		q->headdrop = ctl_v1->headdrop;
685	}
686	if (ctl->limit) {
687		q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
688		q->maxflows = min_t(u32, q->maxflows, q->limit);
689	}
690
691	qlen = sch->q.qlen;
692	while (sch->q.qlen > q->limit)
693		sfq_drop(sch);
694	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
 
 
 
 
 
695
696	del_timer(&q->perturb_timer);
697	if (q->perturb_period) {
698		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
699		q->perturbation = net_random();
700	}
701	sch_tree_unlock(sch);
702	kfree(p);
703	return 0;
704}
705
706static void *sfq_alloc(size_t sz)
707{
708	void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
709
710	if (!ptr)
711		ptr = vmalloc(sz);
712	return ptr;
713}
714
715static void sfq_free(void *addr)
716{
717	if (addr) {
718		if (is_vmalloc_addr(addr))
719			vfree(addr);
720		else
721			kfree(addr);
722	}
723}
724
725static void sfq_destroy(struct Qdisc *sch)
726{
727	struct sfq_sched_data *q = qdisc_priv(sch);
728
729	tcf_destroy_chain(&q->filter_list);
730	q->perturb_period = 0;
731	del_timer_sync(&q->perturb_timer);
732	sfq_free(q->ht);
733	sfq_free(q->slots);
734	kfree(q->red_parms);
735}
736
737static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
 
738{
739	struct sfq_sched_data *q = qdisc_priv(sch);
740	int i;
 
 
 
 
741
742	q->perturb_timer.function = sfq_perturbation;
743	q->perturb_timer.data = (unsigned long)sch;
744	init_timer_deferrable(&q->perturb_timer);
745
746	for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
747		q->dep[i].next = i + SFQ_MAX_FLOWS;
748		q->dep[i].prev = i + SFQ_MAX_FLOWS;
749	}
750
751	q->limit = SFQ_MAX_DEPTH;
752	q->maxdepth = SFQ_MAX_DEPTH;
753	q->cur_depth = 0;
754	q->tail = NULL;
755	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
756	q->maxflows = SFQ_DEFAULT_FLOWS;
757	q->quantum = psched_mtu(qdisc_dev(sch));
758	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
759	q->perturb_period = 0;
760	q->perturbation = net_random();
761
762	if (opt) {
763		int err = sfq_change(sch, opt);
764		if (err)
765			return err;
766	}
767
768	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
769	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
770	if (!q->ht || !q->slots) {
771		sfq_destroy(sch);
772		return -ENOMEM;
773	}
 
774	for (i = 0; i < q->divisor; i++)
775		q->ht[i] = SFQ_EMPTY_SLOT;
776
777	for (i = 0; i < q->maxflows; i++) {
778		slot_queue_init(&q->slots[i]);
779		sfq_link(q, i);
780	}
781	if (q->limit >= 1)
782		sch->flags |= TCQ_F_CAN_BYPASS;
783	else
784		sch->flags &= ~TCQ_F_CAN_BYPASS;
785	return 0;
786}
787
788static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
789{
790	struct sfq_sched_data *q = qdisc_priv(sch);
791	unsigned char *b = skb_tail_pointer(skb);
792	struct tc_sfq_qopt_v1 opt;
793	struct red_parms *p = q->red_parms;
794
795	memset(&opt, 0, sizeof(opt));
796	opt.v0.quantum	= q->quantum;
797	opt.v0.perturb_period = q->perturb_period / HZ;
798	opt.v0.limit	= q->limit;
799	opt.v0.divisor	= q->divisor;
800	opt.v0.flows	= q->maxflows;
801	opt.depth	= q->maxdepth;
802	opt.headdrop	= q->headdrop;
803
804	if (p) {
805		opt.qth_min	= p->qth_min >> p->Wlog;
806		opt.qth_max	= p->qth_max >> p->Wlog;
807		opt.Wlog	= p->Wlog;
808		opt.Plog	= p->Plog;
809		opt.Scell_log	= p->Scell_log;
810		opt.max_P	= p->max_P;
811	}
812	memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
813	opt.flags	= q->flags;
814
815	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
816		goto nla_put_failure;
817
818	return skb->len;
819
820nla_put_failure:
821	nlmsg_trim(skb, b);
822	return -1;
823}
824
825static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
826{
827	return NULL;
828}
829
830static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
831{
832	return 0;
833}
834
835static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
836			      u32 classid)
837{
838	/* we cannot bypass queue discipline anymore */
839	sch->flags &= ~TCQ_F_CAN_BYPASS;
840	return 0;
841}
842
843static void sfq_put(struct Qdisc *q, unsigned long cl)
844{
845}
846
847static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
 
848{
849	struct sfq_sched_data *q = qdisc_priv(sch);
850
851	if (cl)
852		return NULL;
853	return &q->filter_list;
854}
855
856static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
857			  struct sk_buff *skb, struct tcmsg *tcm)
858{
859	tcm->tcm_handle |= TC_H_MIN(cl);
860	return 0;
861}
862
863static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
864				struct gnet_dump *d)
865{
866	struct sfq_sched_data *q = qdisc_priv(sch);
867	sfq_index idx = q->ht[cl - 1];
868	struct gnet_stats_queue qs = { 0 };
869	struct tc_sfq_xstats xstats = { 0 };
870
871	if (idx != SFQ_EMPTY_SLOT) {
872		const struct sfq_slot *slot = &q->slots[idx];
873
874		xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
875		qs.qlen = slot->qlen;
876		qs.backlog = slot->backlog;
877	}
878	if (gnet_stats_copy_queue(d, &qs) < 0)
879		return -1;
880	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
881}
882
883static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
884{
885	struct sfq_sched_data *q = qdisc_priv(sch);
886	unsigned int i;
887
888	if (arg->stop)
889		return;
890
891	for (i = 0; i < q->divisor; i++) {
892		if (q->ht[i] == SFQ_EMPTY_SLOT ||
893		    arg->count < arg->skip) {
894			arg->count++;
895			continue;
896		}
897		if (arg->fn(sch, i + 1, arg) < 0) {
898			arg->stop = 1;
899			break;
900		}
901		arg->count++;
902	}
903}
904
905static const struct Qdisc_class_ops sfq_class_ops = {
906	.leaf		=	sfq_leaf,
907	.get		=	sfq_get,
908	.put		=	sfq_put,
909	.tcf_chain	=	sfq_find_tcf,
910	.bind_tcf	=	sfq_bind,
911	.unbind_tcf	=	sfq_put,
912	.dump		=	sfq_dump_class,
913	.dump_stats	=	sfq_dump_class_stats,
914	.walk		=	sfq_walk,
915};
916
917static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
918	.cl_ops		=	&sfq_class_ops,
919	.id		=	"sfq",
920	.priv_size	=	sizeof(struct sfq_sched_data),
921	.enqueue	=	sfq_enqueue,
922	.dequeue	=	sfq_dequeue,
923	.peek		=	qdisc_peek_dequeued,
924	.drop		=	sfq_drop,
925	.init		=	sfq_init,
926	.reset		=	sfq_reset,
927	.destroy	=	sfq_destroy,
928	.change		=	NULL,
929	.dump		=	sfq_dump,
930	.owner		=	THIS_MODULE,
931};
 
932
933static int __init sfq_module_init(void)
934{
935	return register_qdisc(&sfq_qdisc_ops);
936}
937static void __exit sfq_module_exit(void)
938{
939	unregister_qdisc(&sfq_qdisc_ops);
940}
941module_init(sfq_module_init)
942module_exit(sfq_module_exit)
943MODULE_LICENSE("GPL");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/types.h>
 10#include <linux/kernel.h>
 11#include <linux/jiffies.h>
 12#include <linux/string.h>
 13#include <linux/in.h>
 14#include <linux/errno.h>
 15#include <linux/init.h>
 16#include <linux/skbuff.h>
 17#include <linux/siphash.h>
 18#include <linux/slab.h>
 19#include <linux/vmalloc.h>
 20#include <net/netlink.h>
 21#include <net/pkt_sched.h>
 22#include <net/pkt_cls.h>
 23#include <net/red.h>
 24
 25
 26/*	Stochastic Fairness Queuing algorithm.
 27	=======================================
 28
 29	Source:
 30	Paul E. McKenney "Stochastic Fairness Queuing",
 31	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
 32
 33	Paul E. McKenney "Stochastic Fairness Queuing",
 34	"Interworking: Research and Experience", v.2, 1991, p.113-131.
 35
 36
 37	See also:
 38	M. Shreedhar and George Varghese "Efficient Fair
 39	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
 40
 41
 42	This is not the thing that is usually called (W)FQ nowadays.
 43	It does not use any timestamp mechanism, but instead
 44	processes queues in round-robin order.
 45
 46	ADVANTAGE:
 47
 48	- It is very cheap. Both CPU and memory requirements are minimal.
 49
 50	DRAWBACKS:
 51
 52	- "Stochastic" -> It is not 100% fair.
 53	When hash collisions occur, several flows are considered as one.
 54
 55	- "Round-robin" -> It introduces larger delays than virtual clock
 56	based schemes, and should not be used for isolating interactive
 57	traffic	from non-interactive. It means, that this scheduler
 58	should be used as leaf of CBQ or P3, which put interactive traffic
 59	to higher priority band.
 60
 61	We still need true WFQ for top level CSZ, but using WFQ
 62	for the best effort traffic is absolutely pointless:
 63	SFQ is superior for this purpose.
 64
 65	IMPLEMENTATION:
 66	This implementation limits :
 67	- maximal queue length per flow to 127 packets.
 68	- max mtu to 2^18-1;
 69	- max 65408 flows,
 70	- number of hash buckets to 65536.
 71
 72	It is easy to increase these values, but not in flight.  */
 73
 74#define SFQ_MAX_DEPTH		127 /* max number of packets per flow */
 75#define SFQ_DEFAULT_FLOWS	128
 76#define SFQ_MAX_FLOWS		(0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
 77#define SFQ_EMPTY_SLOT		0xffff
 78#define SFQ_DEFAULT_HASH_DIVISOR 1024
 79
 
 
 
 
 
 
 80/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
 81typedef u16 sfq_index;
 82
 83/*
 84 * We dont use pointers to save space.
 85 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
 86 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
 87 * are 'pointers' to dep[] array
 88 */
 89struct sfq_head {
 90	sfq_index	next;
 91	sfq_index	prev;
 92};
 93
 94struct sfq_slot {
 95	struct sk_buff	*skblist_next;
 96	struct sk_buff	*skblist_prev;
 97	sfq_index	qlen; /* number of skbs in skblist */
 98	sfq_index	next; /* next slot in sfq RR chain */
 99	struct sfq_head dep; /* anchor in dep[] chains */
100	unsigned short	hash; /* hash value (index in ht[]) */
101	int		allot; /* credit for this slot */
102
103	unsigned int    backlog;
104	struct red_vars vars;
105};
106
107struct sfq_sched_data {
108/* frequently used fields */
109	int		limit;		/* limit of total number of packets in this qdisc */
110	unsigned int	divisor;	/* number of slots in hash table */
111	u8		headdrop;
112	u8		maxdepth;	/* limit of packets per flow */
113
114	siphash_key_t 	perturbation;
115	u8		cur_depth;	/* depth of longest slot */
116	u8		flags;
117	struct tcf_proto __rcu *filter_list;
118	struct tcf_block *block;
119	sfq_index	*ht;		/* Hash table ('divisor' slots) */
120	struct sfq_slot	*slots;		/* Flows table ('maxflows' entries) */
121
122	struct red_parms *red_parms;
123	struct tc_sfqred_stats stats;
124	struct sfq_slot *tail;		/* current slot in round */
125
126	struct sfq_head	dep[SFQ_MAX_DEPTH + 1];
127					/* Linked lists of slots, indexed by depth
128					 * dep[0] : list of unused flows
129					 * dep[1] : list of flows with 1 packet
130					 * dep[X] : list of flows with X packets
131					 */
132
133	unsigned int	maxflows;	/* number of flows in flows array */
134	int		perturb_period;
135	unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
136	struct timer_list perturb_timer;
137	struct Qdisc	*sch;
138};
139
140/*
141 * sfq_head are either in a sfq_slot or in dep[] array
142 */
143static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
144{
145	if (val < SFQ_MAX_FLOWS)
146		return &q->slots[val].dep;
147	return &q->dep[val - SFQ_MAX_FLOWS];
148}
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150static unsigned int sfq_hash(const struct sfq_sched_data *q,
151			     const struct sk_buff *skb)
152{
153	return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
 
 
 
 
 
 
154}
155
156static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
157				 int *qerr)
158{
159	struct sfq_sched_data *q = qdisc_priv(sch);
160	struct tcf_result res;
161	struct tcf_proto *fl;
162	int result;
163
164	if (TC_H_MAJ(skb->priority) == sch->handle &&
165	    TC_H_MIN(skb->priority) > 0 &&
166	    TC_H_MIN(skb->priority) <= q->divisor)
167		return TC_H_MIN(skb->priority);
168
169	fl = rcu_dereference_bh(q->filter_list);
170	if (!fl)
171		return sfq_hash(q, skb) + 1;
 
172
173	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
174	result = tcf_classify(skb, NULL, fl, &res, false);
175	if (result >= 0) {
176#ifdef CONFIG_NET_CLS_ACT
177		switch (result) {
178		case TC_ACT_STOLEN:
179		case TC_ACT_QUEUED:
180		case TC_ACT_TRAP:
181			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
182			fallthrough;
183		case TC_ACT_SHOT:
184			return 0;
185		}
186#endif
187		if (TC_H_MIN(res.classid) <= q->divisor)
188			return TC_H_MIN(res.classid);
189	}
190	return 0;
191}
192
193/*
194 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
195 */
196static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
197{
198	sfq_index p, n;
199	struct sfq_slot *slot = &q->slots[x];
200	int qlen = slot->qlen;
201
202	p = qlen + SFQ_MAX_FLOWS;
203	n = q->dep[qlen].next;
204
205	slot->dep.next = n;
206	slot->dep.prev = p;
207
208	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
209	sfq_dep_head(q, n)->prev = x;
210}
211
212#define sfq_unlink(q, x, n, p)			\
213	do {					\
214		n = q->slots[x].dep.next;	\
215		p = q->slots[x].dep.prev;	\
216		sfq_dep_head(q, p)->next = n;	\
217		sfq_dep_head(q, n)->prev = p;	\
218	} while (0)
219
220
221static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
222{
223	sfq_index p, n;
224	int d;
225
226	sfq_unlink(q, x, n, p);
227
228	d = q->slots[x].qlen--;
229	if (n == p && q->cur_depth == d)
230		q->cur_depth--;
231	sfq_link(q, x);
232}
233
234static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
235{
236	sfq_index p, n;
237	int d;
238
239	sfq_unlink(q, x, n, p);
240
241	d = ++q->slots[x].qlen;
242	if (q->cur_depth < d)
243		q->cur_depth = d;
244	sfq_link(q, x);
245}
246
247/* helper functions : might be changed when/if skb use a standard list_head */
248
249/* remove one skb from tail of slot queue */
250static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
251{
252	struct sk_buff *skb = slot->skblist_prev;
253
254	slot->skblist_prev = skb->prev;
255	skb->prev->next = (struct sk_buff *)slot;
256	skb->next = skb->prev = NULL;
257	return skb;
258}
259
260/* remove one skb from head of slot queue */
261static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
262{
263	struct sk_buff *skb = slot->skblist_next;
264
265	slot->skblist_next = skb->next;
266	skb->next->prev = (struct sk_buff *)slot;
267	skb->next = skb->prev = NULL;
268	return skb;
269}
270
271static inline void slot_queue_init(struct sfq_slot *slot)
272{
273	memset(slot, 0, sizeof(*slot));
274	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
275}
276
277/* add skb to slot queue (tail add) */
278static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
279{
280	skb->prev = slot->skblist_prev;
281	skb->next = (struct sk_buff *)slot;
282	slot->skblist_prev->next = skb;
283	slot->skblist_prev = skb;
284}
285
286static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
 
 
 
 
 
287{
288	struct sfq_sched_data *q = qdisc_priv(sch);
289	sfq_index x, d = q->cur_depth;
290	struct sk_buff *skb;
291	unsigned int len;
292	struct sfq_slot *slot;
293
294	/* Queue is full! Find the longest slot and drop tail packet from it */
295	if (d > 1) {
296		x = q->dep[d].next;
297		slot = &q->slots[x];
298drop:
299		skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
300		len = qdisc_pkt_len(skb);
301		slot->backlog -= len;
302		sfq_dec(q, x);
 
303		sch->q.qlen--;
304		qdisc_qstats_backlog_dec(sch, skb);
305		qdisc_drop(skb, sch, to_free);
306		return len;
307	}
308
309	if (d == 1) {
310		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
311		x = q->tail->next;
312		slot = &q->slots[x];
313		q->tail->next = slot->next;
314		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
315		goto drop;
316	}
317
318	return 0;
319}
320
321/* Is ECN parameter configured */
322static int sfq_prob_mark(const struct sfq_sched_data *q)
323{
324	return q->flags & TC_RED_ECN;
325}
326
327/* Should packets over max threshold just be marked */
328static int sfq_hard_mark(const struct sfq_sched_data *q)
329{
330	return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
331}
332
333static int sfq_headdrop(const struct sfq_sched_data *q)
334{
335	return q->headdrop;
336}
337
338static int
339sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
340{
341	struct sfq_sched_data *q = qdisc_priv(sch);
342	unsigned int hash, dropped;
343	sfq_index x, qlen;
344	struct sfq_slot *slot;
345	int ret;
346	struct sk_buff *head;
347	int delta;
348
349	hash = sfq_classify(skb, sch, &ret);
350	if (hash == 0) {
351		if (ret & __NET_XMIT_BYPASS)
352			qdisc_qstats_drop(sch);
353		__qdisc_drop(skb, to_free);
354		return ret;
355	}
356	hash--;
357
358	x = q->ht[hash];
359	slot = &q->slots[x];
360	if (x == SFQ_EMPTY_SLOT) {
361		x = q->dep[0].next; /* get a free slot */
362		if (x >= SFQ_MAX_FLOWS)
363			return qdisc_drop(skb, sch, to_free);
364		q->ht[hash] = x;
365		slot = &q->slots[x];
366		slot->hash = hash;
367		slot->backlog = 0; /* should already be 0 anyway... */
368		red_set_vars(&slot->vars);
369		goto enqueue;
370	}
371	if (q->red_parms) {
372		slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
373							&slot->vars,
374							slot->backlog);
375		switch (red_action(q->red_parms,
376				   &slot->vars,
377				   slot->vars.qavg)) {
378		case RED_DONT_MARK:
379			break;
380
381		case RED_PROB_MARK:
382			qdisc_qstats_overlimit(sch);
383			if (sfq_prob_mark(q)) {
384				/* We know we have at least one packet in queue */
385				if (sfq_headdrop(q) &&
386				    INET_ECN_set_ce(slot->skblist_next)) {
387					q->stats.prob_mark_head++;
388					break;
389				}
390				if (INET_ECN_set_ce(skb)) {
391					q->stats.prob_mark++;
392					break;
393				}
394			}
395			q->stats.prob_drop++;
396			goto congestion_drop;
397
398		case RED_HARD_MARK:
399			qdisc_qstats_overlimit(sch);
400			if (sfq_hard_mark(q)) {
401				/* We know we have at least one packet in queue */
402				if (sfq_headdrop(q) &&
403				    INET_ECN_set_ce(slot->skblist_next)) {
404					q->stats.forced_mark_head++;
405					break;
406				}
407				if (INET_ECN_set_ce(skb)) {
408					q->stats.forced_mark++;
409					break;
410				}
411			}
412			q->stats.forced_drop++;
413			goto congestion_drop;
414		}
415	}
416
417	if (slot->qlen >= q->maxdepth) {
418congestion_drop:
419		if (!sfq_headdrop(q))
420			return qdisc_drop(skb, sch, to_free);
421
422		/* We know we have at least one packet in queue */
423		head = slot_dequeue_head(slot);
424		delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
425		sch->qstats.backlog -= delta;
426		slot->backlog -= delta;
427		qdisc_drop(head, sch, to_free);
428
429		slot_queue_add(slot, skb);
430		qdisc_tree_reduce_backlog(sch, 0, delta);
431		return NET_XMIT_CN;
432	}
433
434enqueue:
435	qdisc_qstats_backlog_inc(sch, skb);
436	slot->backlog += qdisc_pkt_len(skb);
437	slot_queue_add(slot, skb);
438	sfq_inc(q, x);
439	if (slot->qlen == 1) {		/* The flow is new */
440		if (q->tail == NULL) {	/* It is the first flow */
441			slot->next = x;
442		} else {
443			slot->next = q->tail->next;
444			q->tail->next = x;
445		}
446		/* We put this flow at the end of our flow list.
447		 * This might sound unfair for a new flow to wait after old ones,
448		 * but we could endup servicing new flows only, and freeze old ones.
449		 */
450		q->tail = slot;
451		/* We could use a bigger initial quantum for new flows */
452		slot->allot = q->quantum;
453	}
454	if (++sch->q.qlen <= q->limit)
455		return NET_XMIT_SUCCESS;
456
457	qlen = slot->qlen;
458	dropped = sfq_drop(sch, to_free);
459	/* Return Congestion Notification only if we dropped a packet
460	 * from this flow.
461	 */
462	if (qlen != slot->qlen) {
463		qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
464		return NET_XMIT_CN;
465	}
466
467	/* As we dropped a packet, better let upper stack know this */
468	qdisc_tree_reduce_backlog(sch, 1, dropped);
469	return NET_XMIT_SUCCESS;
470}
471
472static struct sk_buff *
473sfq_dequeue(struct Qdisc *sch)
474{
475	struct sfq_sched_data *q = qdisc_priv(sch);
476	struct sk_buff *skb;
477	sfq_index a, next_a;
478	struct sfq_slot *slot;
479
480	/* No active slots */
481	if (q->tail == NULL)
482		return NULL;
483
484next_slot:
485	a = q->tail->next;
486	slot = &q->slots[a];
487	if (slot->allot <= 0) {
488		q->tail = slot;
489		slot->allot += q->quantum;
490		goto next_slot;
491	}
492	skb = slot_dequeue_head(slot);
493	sfq_dec(q, a);
494	qdisc_bstats_update(sch, skb);
495	sch->q.qlen--;
496	qdisc_qstats_backlog_dec(sch, skb);
497	slot->backlog -= qdisc_pkt_len(skb);
498	/* Is the slot empty? */
499	if (slot->qlen == 0) {
500		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
501		next_a = slot->next;
502		if (a == next_a) {
503			q->tail = NULL; /* no more active slots */
504			return skb;
505		}
506		q->tail->next = next_a;
507	} else {
508		slot->allot -= qdisc_pkt_len(skb);
509	}
510	return skb;
511}
512
513static void
514sfq_reset(struct Qdisc *sch)
515{
516	struct sk_buff *skb;
517
518	while ((skb = sfq_dequeue(sch)) != NULL)
519		rtnl_kfree_skbs(skb, skb);
520}
521
522/*
523 * When q->perturbation is changed, we rehash all queued skbs
524 * to avoid OOO (Out Of Order) effects.
525 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
526 * counters.
527 */
528static void sfq_rehash(struct Qdisc *sch)
529{
530	struct sfq_sched_data *q = qdisc_priv(sch);
531	struct sk_buff *skb;
532	int i;
533	struct sfq_slot *slot;
534	struct sk_buff_head list;
535	int dropped = 0;
536	unsigned int drop_len = 0;
537
538	__skb_queue_head_init(&list);
539
540	for (i = 0; i < q->maxflows; i++) {
541		slot = &q->slots[i];
542		if (!slot->qlen)
543			continue;
544		while (slot->qlen) {
545			skb = slot_dequeue_head(slot);
546			sfq_dec(q, i);
547			__skb_queue_tail(&list, skb);
548		}
549		slot->backlog = 0;
550		red_set_vars(&slot->vars);
551		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
552	}
553	q->tail = NULL;
554
555	while ((skb = __skb_dequeue(&list)) != NULL) {
556		unsigned int hash = sfq_hash(q, skb);
557		sfq_index x = q->ht[hash];
558
559		slot = &q->slots[x];
560		if (x == SFQ_EMPTY_SLOT) {
561			x = q->dep[0].next; /* get a free slot */
562			if (x >= SFQ_MAX_FLOWS) {
563drop:
564				qdisc_qstats_backlog_dec(sch, skb);
565				drop_len += qdisc_pkt_len(skb);
566				kfree_skb(skb);
567				dropped++;
568				continue;
569			}
570			q->ht[hash] = x;
571			slot = &q->slots[x];
572			slot->hash = hash;
573		}
574		if (slot->qlen >= q->maxdepth)
575			goto drop;
576		slot_queue_add(slot, skb);
577		if (q->red_parms)
578			slot->vars.qavg = red_calc_qavg(q->red_parms,
579							&slot->vars,
580							slot->backlog);
581		slot->backlog += qdisc_pkt_len(skb);
582		sfq_inc(q, x);
583		if (slot->qlen == 1) {		/* The flow is new */
584			if (q->tail == NULL) {	/* It is the first flow */
585				slot->next = x;
586			} else {
587				slot->next = q->tail->next;
588				q->tail->next = x;
589			}
590			q->tail = slot;
591			slot->allot = q->quantum;
592		}
593	}
594	sch->q.qlen -= dropped;
595	qdisc_tree_reduce_backlog(sch, dropped, drop_len);
596}
597
598static void sfq_perturbation(struct timer_list *t)
599{
600	struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
601	struct Qdisc *sch = q->sch;
602	spinlock_t *root_lock;
603	siphash_key_t nkey;
604	int period;
605
606	get_random_bytes(&nkey, sizeof(nkey));
607	rcu_read_lock();
608	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
609	spin_lock(root_lock);
610	q->perturbation = nkey;
611	if (!q->filter_list && q->tail)
612		sfq_rehash(sch);
613	spin_unlock(root_lock);
614
615	/* q->perturb_period can change under us from
616	 * sfq_change() and sfq_destroy().
617	 */
618	period = READ_ONCE(q->perturb_period);
619	if (period)
620		mod_timer(&q->perturb_timer, jiffies + period);
621	rcu_read_unlock();
622}
623
624static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
625		      struct netlink_ext_ack *extack)
626{
627	struct sfq_sched_data *q = qdisc_priv(sch);
628	struct tc_sfq_qopt *ctl = nla_data(opt);
629	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
630	unsigned int qlen, dropped = 0;
631	struct red_parms *p = NULL;
632	struct sk_buff *to_free = NULL;
633	struct sk_buff *tail = NULL;
634
635	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
636		return -EINVAL;
637	if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
638		ctl_v1 = nla_data(opt);
639	if (ctl->divisor &&
640	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
641		return -EINVAL;
642
643	if ((int)ctl->quantum < 0) {
644		NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
645		return -EINVAL;
646	}
647	if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
648					ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
649		return -EINVAL;
650	if (ctl_v1 && ctl_v1->qth_min) {
651		p = kmalloc(sizeof(*p), GFP_KERNEL);
652		if (!p)
653			return -ENOMEM;
654	}
655	if (ctl->limit == 1) {
656		NL_SET_ERR_MSG_MOD(extack, "invalid limit");
657		return -EINVAL;
658	}
659	sch_tree_lock(sch);
660	if (ctl->quantum)
661		q->quantum = ctl->quantum;
662	WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
 
 
663	if (ctl->flows)
664		q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
665	if (ctl->divisor) {
666		q->divisor = ctl->divisor;
667		q->maxflows = min_t(u32, q->maxflows, q->divisor);
668	}
669	if (ctl_v1) {
670		if (ctl_v1->depth)
671			q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
672		if (p) {
673			swap(q->red_parms, p);
674			red_set_parms(q->red_parms,
675				      ctl_v1->qth_min, ctl_v1->qth_max,
676				      ctl_v1->Wlog,
677				      ctl_v1->Plog, ctl_v1->Scell_log,
678				      NULL,
679				      ctl_v1->max_P);
680		}
681		q->flags = ctl_v1->flags;
682		q->headdrop = ctl_v1->headdrop;
683	}
684	if (ctl->limit) {
685		q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
686		q->maxflows = min_t(u32, q->maxflows, q->limit);
687	}
688
689	qlen = sch->q.qlen;
690	while (sch->q.qlen > q->limit) {
691		dropped += sfq_drop(sch, &to_free);
692		if (!tail)
693			tail = to_free;
694	}
695
696	rtnl_kfree_skbs(to_free, tail);
697	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
698
699	del_timer(&q->perturb_timer);
700	if (q->perturb_period) {
701		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
702		get_random_bytes(&q->perturbation, sizeof(q->perturbation));
703	}
704	sch_tree_unlock(sch);
705	kfree(p);
706	return 0;
707}
708
709static void *sfq_alloc(size_t sz)
710{
711	return  kvmalloc(sz, GFP_KERNEL);
 
 
 
 
712}
713
714static void sfq_free(void *addr)
715{
716	kvfree(addr);
 
 
 
 
 
717}
718
719static void sfq_destroy(struct Qdisc *sch)
720{
721	struct sfq_sched_data *q = qdisc_priv(sch);
722
723	tcf_block_put(q->block);
724	WRITE_ONCE(q->perturb_period, 0);
725	del_timer_sync(&q->perturb_timer);
726	sfq_free(q->ht);
727	sfq_free(q->slots);
728	kfree(q->red_parms);
729}
730
731static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
732		    struct netlink_ext_ack *extack)
733{
734	struct sfq_sched_data *q = qdisc_priv(sch);
735	int i;
736	int err;
737
738	q->sch = sch;
739	timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
740
741	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
742	if (err)
743		return err;
744
745	for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
746		q->dep[i].next = i + SFQ_MAX_FLOWS;
747		q->dep[i].prev = i + SFQ_MAX_FLOWS;
748	}
749
750	q->limit = SFQ_MAX_DEPTH;
751	q->maxdepth = SFQ_MAX_DEPTH;
752	q->cur_depth = 0;
753	q->tail = NULL;
754	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
755	q->maxflows = SFQ_DEFAULT_FLOWS;
756	q->quantum = psched_mtu(qdisc_dev(sch));
 
757	q->perturb_period = 0;
758	get_random_bytes(&q->perturbation, sizeof(q->perturbation));
759
760	if (opt) {
761		int err = sfq_change(sch, opt, extack);
762		if (err)
763			return err;
764	}
765
766	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
767	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
768	if (!q->ht || !q->slots) {
769		/* Note: sfq_destroy() will be called by our caller */
770		return -ENOMEM;
771	}
772
773	for (i = 0; i < q->divisor; i++)
774		q->ht[i] = SFQ_EMPTY_SLOT;
775
776	for (i = 0; i < q->maxflows; i++) {
777		slot_queue_init(&q->slots[i]);
778		sfq_link(q, i);
779	}
780	if (q->limit >= 1)
781		sch->flags |= TCQ_F_CAN_BYPASS;
782	else
783		sch->flags &= ~TCQ_F_CAN_BYPASS;
784	return 0;
785}
786
787static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
788{
789	struct sfq_sched_data *q = qdisc_priv(sch);
790	unsigned char *b = skb_tail_pointer(skb);
791	struct tc_sfq_qopt_v1 opt;
792	struct red_parms *p = q->red_parms;
793
794	memset(&opt, 0, sizeof(opt));
795	opt.v0.quantum	= q->quantum;
796	opt.v0.perturb_period = q->perturb_period / HZ;
797	opt.v0.limit	= q->limit;
798	opt.v0.divisor	= q->divisor;
799	opt.v0.flows	= q->maxflows;
800	opt.depth	= q->maxdepth;
801	opt.headdrop	= q->headdrop;
802
803	if (p) {
804		opt.qth_min	= p->qth_min >> p->Wlog;
805		opt.qth_max	= p->qth_max >> p->Wlog;
806		opt.Wlog	= p->Wlog;
807		opt.Plog	= p->Plog;
808		opt.Scell_log	= p->Scell_log;
809		opt.max_P	= p->max_P;
810	}
811	memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
812	opt.flags	= q->flags;
813
814	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
815		goto nla_put_failure;
816
817	return skb->len;
818
819nla_put_failure:
820	nlmsg_trim(skb, b);
821	return -1;
822}
823
824static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
825{
826	return NULL;
827}
828
829static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
830{
831	return 0;
832}
833
834static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
835			      u32 classid)
836{
 
 
837	return 0;
838}
839
840static void sfq_unbind(struct Qdisc *q, unsigned long cl)
841{
842}
843
844static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
845				       struct netlink_ext_ack *extack)
846{
847	struct sfq_sched_data *q = qdisc_priv(sch);
848
849	if (cl)
850		return NULL;
851	return q->block;
852}
853
854static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
855			  struct sk_buff *skb, struct tcmsg *tcm)
856{
857	tcm->tcm_handle |= TC_H_MIN(cl);
858	return 0;
859}
860
861static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
862				struct gnet_dump *d)
863{
864	struct sfq_sched_data *q = qdisc_priv(sch);
865	sfq_index idx = q->ht[cl - 1];
866	struct gnet_stats_queue qs = { 0 };
867	struct tc_sfq_xstats xstats = { 0 };
868
869	if (idx != SFQ_EMPTY_SLOT) {
870		const struct sfq_slot *slot = &q->slots[idx];
871
872		xstats.allot = slot->allot;
873		qs.qlen = slot->qlen;
874		qs.backlog = slot->backlog;
875	}
876	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
877		return -1;
878	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
879}
880
881static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
882{
883	struct sfq_sched_data *q = qdisc_priv(sch);
884	unsigned int i;
885
886	if (arg->stop)
887		return;
888
889	for (i = 0; i < q->divisor; i++) {
890		if (q->ht[i] == SFQ_EMPTY_SLOT) {
 
891			arg->count++;
892			continue;
893		}
894		if (!tc_qdisc_stats_dump(sch, i + 1, arg))
 
895			break;
 
 
896	}
897}
898
899static const struct Qdisc_class_ops sfq_class_ops = {
900	.leaf		=	sfq_leaf,
901	.find		=	sfq_find,
902	.tcf_block	=	sfq_tcf_block,
 
903	.bind_tcf	=	sfq_bind,
904	.unbind_tcf	=	sfq_unbind,
905	.dump		=	sfq_dump_class,
906	.dump_stats	=	sfq_dump_class_stats,
907	.walk		=	sfq_walk,
908};
909
910static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
911	.cl_ops		=	&sfq_class_ops,
912	.id		=	"sfq",
913	.priv_size	=	sizeof(struct sfq_sched_data),
914	.enqueue	=	sfq_enqueue,
915	.dequeue	=	sfq_dequeue,
916	.peek		=	qdisc_peek_dequeued,
 
917	.init		=	sfq_init,
918	.reset		=	sfq_reset,
919	.destroy	=	sfq_destroy,
920	.change		=	NULL,
921	.dump		=	sfq_dump,
922	.owner		=	THIS_MODULE,
923};
924MODULE_ALIAS_NET_SCH("sfq");
925
926static int __init sfq_module_init(void)
927{
928	return register_qdisc(&sfq_qdisc_ops);
929}
930static void __exit sfq_module_exit(void)
931{
932	unregister_qdisc(&sfq_qdisc_ops);
933}
934module_init(sfq_module_init)
935module_exit(sfq_module_exit)
936MODULE_LICENSE("GPL");
937MODULE_DESCRIPTION("Stochastic Fairness qdisc");