Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Fair Queue CoDel discipline
  3 *
  4 *	This program is free software; you can redistribute it and/or
  5 *	modify it under the terms of the GNU General Public License
  6 *	as published by the Free Software Foundation; either version
  7 *	2 of the License, or (at your option) any later version.
  8 *
  9 *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/types.h>
 14#include <linux/kernel.h>
 15#include <linux/jiffies.h>
 16#include <linux/string.h>
 17#include <linux/in.h>
 18#include <linux/errno.h>
 19#include <linux/init.h>
 20#include <linux/skbuff.h>
 21#include <linux/jhash.h>
 22#include <linux/slab.h>
 23#include <linux/vmalloc.h>
 24#include <net/netlink.h>
 25#include <net/pkt_sched.h>
 26#include <net/flow_keys.h>
 27#include <net/codel.h>
 28
 29/*	Fair Queue CoDel.
 30 *
 31 * Principles :
 32 * Packets are classified (internal classifier or external) on flows.
 33 * This is a Stochastic model (as we use a hash, several flows
 34 *			       might be hashed on same slot)
 35 * Each flow has a CoDel managed queue.
 36 * Flows are linked onto two (Round Robin) lists,
 37 * so that new flows have priority on old ones.
 38 *
 39 * For a given flow, packets are not reordered (CoDel uses a FIFO)
 40 * head drops only.
 41 * ECN capability is on by default.
 42 * Low memory footprint (64 bytes per flow)
 43 */
 44
 45struct fq_codel_flow {
 46	struct sk_buff	  *head;
 47	struct sk_buff	  *tail;
 48	struct list_head  flowchain;
 49	int		  deficit;
 50	u32		  dropped; /* number of drops (or ECN marks) on this flow */
 51	struct codel_vars cvars;
 52}; /* please try to keep this structure <= 64 bytes */
 53
 54struct fq_codel_sched_data {
 55	struct tcf_proto *filter_list;	/* optional external classifier */
 56	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
 57	u32		*backlogs;	/* backlog table [flows_cnt] */
 58	u32		flows_cnt;	/* number of flows */
 59	u32		perturbation;	/* hash perturbation */
 60	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
 61	struct codel_params cparams;
 62	struct codel_stats cstats;
 63	u32		drop_overlimit;
 64	u32		new_flow_count;
 65
 66	struct list_head new_flows;	/* list of new flows */
 67	struct list_head old_flows;	/* list of old flows */
 68};
 69
 70static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
 71				  const struct sk_buff *skb)
 72{
 73	struct flow_keys keys;
 74	unsigned int hash;
 75
 76	skb_flow_dissect(skb, &keys);
 77	hash = jhash_3words((__force u32)keys.dst,
 78			    (__force u32)keys.src ^ keys.ip_proto,
 79			    (__force u32)keys.ports, q->perturbation);
 80	return ((u64)hash * q->flows_cnt) >> 32;
 81}
 82
 83static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
 84				      int *qerr)
 85{
 86	struct fq_codel_sched_data *q = qdisc_priv(sch);
 87	struct tcf_result res;
 88	int result;
 89
 90	if (TC_H_MAJ(skb->priority) == sch->handle &&
 91	    TC_H_MIN(skb->priority) > 0 &&
 92	    TC_H_MIN(skb->priority) <= q->flows_cnt)
 93		return TC_H_MIN(skb->priority);
 94
 95	if (!q->filter_list)
 96		return fq_codel_hash(q, skb) + 1;
 97
 98	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 99	result = tc_classify(skb, q->filter_list, &res);
100	if (result >= 0) {
101#ifdef CONFIG_NET_CLS_ACT
102		switch (result) {
103		case TC_ACT_STOLEN:
104		case TC_ACT_QUEUED:
105			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
106		case TC_ACT_SHOT:
107			return 0;
108		}
109#endif
110		if (TC_H_MIN(res.classid) <= q->flows_cnt)
111			return TC_H_MIN(res.classid);
112	}
113	return 0;
114}
115
116/* helper functions : might be changed when/if skb use a standard list_head */
117
118/* remove one skb from head of slot queue */
119static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
120{
121	struct sk_buff *skb = flow->head;
122
123	flow->head = skb->next;
124	skb->next = NULL;
125	return skb;
126}
127
128/* add skb to flow queue (tail add) */
129static inline void flow_queue_add(struct fq_codel_flow *flow,
130				  struct sk_buff *skb)
131{
132	if (flow->head == NULL)
133		flow->head = skb;
134	else
135		flow->tail->next = skb;
136	flow->tail = skb;
137	skb->next = NULL;
138}
139
140static unsigned int fq_codel_drop(struct Qdisc *sch)
141{
142	struct fq_codel_sched_data *q = qdisc_priv(sch);
143	struct sk_buff *skb;
144	unsigned int maxbacklog = 0, idx = 0, i, len;
145	struct fq_codel_flow *flow;
146
147	/* Queue is full! Find the fat flow and drop packet from it.
148	 * This might sound expensive, but with 1024 flows, we scan
149	 * 4KB of memory, and we dont need to handle a complex tree
150	 * in fast path (packet queue/enqueue) with many cache misses.
151	 */
152	for (i = 0; i < q->flows_cnt; i++) {
153		if (q->backlogs[i] > maxbacklog) {
154			maxbacklog = q->backlogs[i];
155			idx = i;
156		}
157	}
158	flow = &q->flows[idx];
159	skb = dequeue_head(flow);
160	len = qdisc_pkt_len(skb);
161	q->backlogs[idx] -= len;
162	kfree_skb(skb);
163	sch->q.qlen--;
164	sch->qstats.drops++;
165	sch->qstats.backlog -= len;
166	flow->dropped++;
167	return idx;
168}
169
170static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
171{
172	struct fq_codel_sched_data *q = qdisc_priv(sch);
173	unsigned int idx;
174	struct fq_codel_flow *flow;
175	int uninitialized_var(ret);
176
177	idx = fq_codel_classify(skb, sch, &ret);
178	if (idx == 0) {
179		if (ret & __NET_XMIT_BYPASS)
180			sch->qstats.drops++;
181		kfree_skb(skb);
182		return ret;
183	}
184	idx--;
185
186	codel_set_enqueue_time(skb);
187	flow = &q->flows[idx];
188	flow_queue_add(flow, skb);
189	q->backlogs[idx] += qdisc_pkt_len(skb);
190	sch->qstats.backlog += qdisc_pkt_len(skb);
191
192	if (list_empty(&flow->flowchain)) {
193		list_add_tail(&flow->flowchain, &q->new_flows);
194		q->new_flow_count++;
195		flow->deficit = q->quantum;
196		flow->dropped = 0;
197	}
198	if (++sch->q.qlen <= sch->limit)
199		return NET_XMIT_SUCCESS;
200
201	q->drop_overlimit++;
202	/* Return Congestion Notification only if we dropped a packet
203	 * from this flow.
204	 */
205	if (fq_codel_drop(sch) == idx)
206		return NET_XMIT_CN;
207
208	/* As we dropped a packet, better let upper stack know this */
209	qdisc_tree_decrease_qlen(sch, 1);
210	return NET_XMIT_SUCCESS;
211}
212
213/* This is the specific function called from codel_dequeue()
214 * to dequeue a packet from queue. Note: backlog is handled in
215 * codel, we dont need to reduce it here.
216 */
217static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
218{
219	struct fq_codel_sched_data *q = qdisc_priv(sch);
220	struct fq_codel_flow *flow;
221	struct sk_buff *skb = NULL;
222
223	flow = container_of(vars, struct fq_codel_flow, cvars);
224	if (flow->head) {
225		skb = dequeue_head(flow);
226		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
227		sch->q.qlen--;
228	}
229	return skb;
230}
231
232static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
233{
234	struct fq_codel_sched_data *q = qdisc_priv(sch);
235	struct sk_buff *skb;
236	struct fq_codel_flow *flow;
237	struct list_head *head;
238	u32 prev_drop_count, prev_ecn_mark;
239
240begin:
241	head = &q->new_flows;
242	if (list_empty(head)) {
243		head = &q->old_flows;
244		if (list_empty(head))
245			return NULL;
246	}
247	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
248
249	if (flow->deficit <= 0) {
250		flow->deficit += q->quantum;
251		list_move_tail(&flow->flowchain, &q->old_flows);
252		goto begin;
253	}
254
255	prev_drop_count = q->cstats.drop_count;
256	prev_ecn_mark = q->cstats.ecn_mark;
257
258	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
259			    dequeue);
260
261	flow->dropped += q->cstats.drop_count - prev_drop_count;
262	flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
263
264	if (!skb) {
265		/* force a pass through old_flows to prevent starvation */
266		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
267			list_move_tail(&flow->flowchain, &q->old_flows);
268		else
269			list_del_init(&flow->flowchain);
270		goto begin;
271	}
272	qdisc_bstats_update(sch, skb);
273	flow->deficit -= qdisc_pkt_len(skb);
274	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
275	 * or HTB crashes. Defer it for next round.
276	 */
277	if (q->cstats.drop_count && sch->q.qlen) {
278		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
279		q->cstats.drop_count = 0;
280	}
281	return skb;
282}
283
284static void fq_codel_reset(struct Qdisc *sch)
285{
286	struct sk_buff *skb;
287
288	while ((skb = fq_codel_dequeue(sch)) != NULL)
289		kfree_skb(skb);
290}
291
292static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
293	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
294	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
295	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
296	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
297	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
298	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
299};
300
301static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
302{
303	struct fq_codel_sched_data *q = qdisc_priv(sch);
304	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
305	int err;
306
307	if (!opt)
308		return -EINVAL;
309
310	err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
311	if (err < 0)
312		return err;
313	if (tb[TCA_FQ_CODEL_FLOWS]) {
314		if (q->flows)
315			return -EINVAL;
316		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
317		if (!q->flows_cnt ||
318		    q->flows_cnt > 65536)
319			return -EINVAL;
320	}
321	sch_tree_lock(sch);
322
323	if (tb[TCA_FQ_CODEL_TARGET]) {
324		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
325
326		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
327	}
328
329	if (tb[TCA_FQ_CODEL_INTERVAL]) {
330		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
331
332		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
333	}
334
335	if (tb[TCA_FQ_CODEL_LIMIT])
336		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
337
338	if (tb[TCA_FQ_CODEL_ECN])
339		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
340
341	if (tb[TCA_FQ_CODEL_QUANTUM])
342		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
343
344	while (sch->q.qlen > sch->limit) {
345		struct sk_buff *skb = fq_codel_dequeue(sch);
346
347		kfree_skb(skb);
348		q->cstats.drop_count++;
349	}
350	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
351	q->cstats.drop_count = 0;
352
353	sch_tree_unlock(sch);
354	return 0;
355}
356
357static void *fq_codel_zalloc(size_t sz)
358{
359	void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
360
361	if (!ptr)
362		ptr = vzalloc(sz);
363	return ptr;
364}
365
366static void fq_codel_free(void *addr)
367{
368	if (addr) {
369		if (is_vmalloc_addr(addr))
370			vfree(addr);
371		else
372			kfree(addr);
373	}
374}
375
376static void fq_codel_destroy(struct Qdisc *sch)
377{
378	struct fq_codel_sched_data *q = qdisc_priv(sch);
379
380	tcf_destroy_chain(&q->filter_list);
381	fq_codel_free(q->backlogs);
382	fq_codel_free(q->flows);
383}
384
385static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
386{
387	struct fq_codel_sched_data *q = qdisc_priv(sch);
388	int i;
389
390	sch->limit = 10*1024;
391	q->flows_cnt = 1024;
392	q->quantum = psched_mtu(qdisc_dev(sch));
393	q->perturbation = prandom_u32();
394	INIT_LIST_HEAD(&q->new_flows);
395	INIT_LIST_HEAD(&q->old_flows);
396	codel_params_init(&q->cparams);
397	codel_stats_init(&q->cstats);
398	q->cparams.ecn = true;
399
400	if (opt) {
401		int err = fq_codel_change(sch, opt);
402		if (err)
403			return err;
404	}
405
406	if (!q->flows) {
407		q->flows = fq_codel_zalloc(q->flows_cnt *
408					   sizeof(struct fq_codel_flow));
409		if (!q->flows)
410			return -ENOMEM;
411		q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
412		if (!q->backlogs) {
413			fq_codel_free(q->flows);
414			return -ENOMEM;
415		}
416		for (i = 0; i < q->flows_cnt; i++) {
417			struct fq_codel_flow *flow = q->flows + i;
418
419			INIT_LIST_HEAD(&flow->flowchain);
420			codel_vars_init(&flow->cvars);
421		}
422	}
423	if (sch->limit >= 1)
424		sch->flags |= TCQ_F_CAN_BYPASS;
425	else
426		sch->flags &= ~TCQ_F_CAN_BYPASS;
427	return 0;
428}
429
430static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
431{
432	struct fq_codel_sched_data *q = qdisc_priv(sch);
433	struct nlattr *opts;
434
435	opts = nla_nest_start(skb, TCA_OPTIONS);
436	if (opts == NULL)
437		goto nla_put_failure;
438
439	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
440			codel_time_to_us(q->cparams.target)) ||
441	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
442			sch->limit) ||
443	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
444			codel_time_to_us(q->cparams.interval)) ||
445	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
446			q->cparams.ecn) ||
447	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
448			q->quantum) ||
449	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
450			q->flows_cnt))
451		goto nla_put_failure;
452
453	return nla_nest_end(skb, opts);
454
455nla_put_failure:
456	return -1;
457}
458
459static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
460{
461	struct fq_codel_sched_data *q = qdisc_priv(sch);
462	struct tc_fq_codel_xstats st = {
463		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
464	};
465	struct list_head *pos;
466
467	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
468	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
469	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
470	st.qdisc_stats.new_flow_count = q->new_flow_count;
471
472	list_for_each(pos, &q->new_flows)
473		st.qdisc_stats.new_flows_len++;
474
475	list_for_each(pos, &q->old_flows)
476		st.qdisc_stats.old_flows_len++;
477
478	return gnet_stats_copy_app(d, &st, sizeof(st));
479}
480
481static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
482{
483	return NULL;
484}
485
486static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
487{
488	return 0;
489}
490
491static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
492			      u32 classid)
493{
494	/* we cannot bypass queue discipline anymore */
495	sch->flags &= ~TCQ_F_CAN_BYPASS;
496	return 0;
497}
498
499static void fq_codel_put(struct Qdisc *q, unsigned long cl)
500{
501}
502
503static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
504{
505	struct fq_codel_sched_data *q = qdisc_priv(sch);
506
507	if (cl)
508		return NULL;
509	return &q->filter_list;
510}
511
512static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
513			  struct sk_buff *skb, struct tcmsg *tcm)
514{
515	tcm->tcm_handle |= TC_H_MIN(cl);
516	return 0;
517}
518
519static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
520				     struct gnet_dump *d)
521{
522	struct fq_codel_sched_data *q = qdisc_priv(sch);
523	u32 idx = cl - 1;
524	struct gnet_stats_queue qs = { 0 };
525	struct tc_fq_codel_xstats xstats;
526
527	if (idx < q->flows_cnt) {
528		const struct fq_codel_flow *flow = &q->flows[idx];
529		const struct sk_buff *skb = flow->head;
530
531		memset(&xstats, 0, sizeof(xstats));
532		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
533		xstats.class_stats.deficit = flow->deficit;
534		xstats.class_stats.ldelay =
535			codel_time_to_us(flow->cvars.ldelay);
536		xstats.class_stats.count = flow->cvars.count;
537		xstats.class_stats.lastcount = flow->cvars.lastcount;
538		xstats.class_stats.dropping = flow->cvars.dropping;
539		if (flow->cvars.dropping) {
540			codel_tdiff_t delta = flow->cvars.drop_next -
541					      codel_get_time();
542
543			xstats.class_stats.drop_next = (delta >= 0) ?
544				codel_time_to_us(delta) :
545				-codel_time_to_us(-delta);
546		}
547		while (skb) {
548			qs.qlen++;
549			skb = skb->next;
550		}
551		qs.backlog = q->backlogs[idx];
552		qs.drops = flow->dropped;
553	}
554	if (gnet_stats_copy_queue(d, &qs) < 0)
555		return -1;
556	if (idx < q->flows_cnt)
557		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
558	return 0;
559}
560
561static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
562{
563	struct fq_codel_sched_data *q = qdisc_priv(sch);
564	unsigned int i;
565
566	if (arg->stop)
567		return;
568
569	for (i = 0; i < q->flows_cnt; i++) {
570		if (list_empty(&q->flows[i].flowchain) ||
571		    arg->count < arg->skip) {
572			arg->count++;
573			continue;
574		}
575		if (arg->fn(sch, i + 1, arg) < 0) {
576			arg->stop = 1;
577			break;
578		}
579		arg->count++;
580	}
581}
582
583static const struct Qdisc_class_ops fq_codel_class_ops = {
584	.leaf		=	fq_codel_leaf,
585	.get		=	fq_codel_get,
586	.put		=	fq_codel_put,
587	.tcf_chain	=	fq_codel_find_tcf,
588	.bind_tcf	=	fq_codel_bind,
589	.unbind_tcf	=	fq_codel_put,
590	.dump		=	fq_codel_dump_class,
591	.dump_stats	=	fq_codel_dump_class_stats,
592	.walk		=	fq_codel_walk,
593};
594
595static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
596	.cl_ops		=	&fq_codel_class_ops,
597	.id		=	"fq_codel",
598	.priv_size	=	sizeof(struct fq_codel_sched_data),
599	.enqueue	=	fq_codel_enqueue,
600	.dequeue	=	fq_codel_dequeue,
601	.peek		=	qdisc_peek_dequeued,
602	.drop		=	fq_codel_drop,
603	.init		=	fq_codel_init,
604	.reset		=	fq_codel_reset,
605	.destroy	=	fq_codel_destroy,
606	.change		=	fq_codel_change,
607	.dump		=	fq_codel_dump,
608	.dump_stats =	fq_codel_dump_stats,
609	.owner		=	THIS_MODULE,
610};
611
612static int __init fq_codel_module_init(void)
613{
614	return register_qdisc(&fq_codel_qdisc_ops);
615}
616
617static void __exit fq_codel_module_exit(void)
618{
619	unregister_qdisc(&fq_codel_qdisc_ops);
620}
621
622module_init(fq_codel_module_init)
623module_exit(fq_codel_module_exit)
624MODULE_AUTHOR("Eric Dumazet");
625MODULE_LICENSE("GPL");