Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Fair Queue CoDel discipline
  4 *
 
 
 
 
 
  5 *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/types.h>
 10#include <linux/kernel.h>
 11#include <linux/jiffies.h>
 12#include <linux/string.h>
 13#include <linux/in.h>
 14#include <linux/errno.h>
 15#include <linux/init.h>
 16#include <linux/skbuff.h>
 
 17#include <linux/slab.h>
 18#include <linux/vmalloc.h>
 19#include <net/netlink.h>
 20#include <net/pkt_sched.h>
 21#include <net/pkt_cls.h>
 22#include <net/codel.h>
 23#include <net/codel_impl.h>
 24#include <net/codel_qdisc.h>
 25
 26/*	Fair Queue CoDel.
 27 *
 28 * Principles :
 29 * Packets are classified (internal classifier or external) on flows.
 30 * This is a Stochastic model (as we use a hash, several flows
 31 *			       might be hashed on same slot)
 32 * Each flow has a CoDel managed queue.
 33 * Flows are linked onto two (Round Robin) lists,
 34 * so that new flows have priority on old ones.
 35 *
 36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
 37 * head drops only.
 38 * ECN capability is on by default.
 39 * Low memory footprint (64 bytes per flow)
 40 */
 41
 42struct fq_codel_flow {
 43	struct sk_buff	  *head;
 44	struct sk_buff	  *tail;
 45	struct list_head  flowchain;
 46	int		  deficit;
 
 47	struct codel_vars cvars;
 48}; /* please try to keep this structure <= 64 bytes */
 49
 50struct fq_codel_sched_data {
 51	struct tcf_proto __rcu *filter_list; /* optional external classifier */
 52	struct tcf_block *block;
 53	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
 54	u32		*backlogs;	/* backlog table [flows_cnt] */
 55	u32		flows_cnt;	/* number of flows */
 
 56	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
 57	u32		drop_batch_size;
 58	u32		memory_limit;
 59	struct codel_params cparams;
 60	struct codel_stats cstats;
 61	u32		memory_usage;
 62	u32		drop_overmemory;
 63	u32		drop_overlimit;
 64	u32		new_flow_count;
 65
 66	struct list_head new_flows;	/* list of new flows */
 67	struct list_head old_flows;	/* list of old flows */
 68};
 69
 70static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
 71				  struct sk_buff *skb)
 72{
 73	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
 
 
 74}
 75
 76static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
 77				      int *qerr)
 78{
 79	struct fq_codel_sched_data *q = qdisc_priv(sch);
 80	struct tcf_proto *filter;
 81	struct tcf_result res;
 82	int result;
 83
 84	if (TC_H_MAJ(skb->priority) == sch->handle &&
 85	    TC_H_MIN(skb->priority) > 0 &&
 86	    TC_H_MIN(skb->priority) <= q->flows_cnt)
 87		return TC_H_MIN(skb->priority);
 88
 89	filter = rcu_dereference_bh(q->filter_list);
 90	if (!filter)
 91		return fq_codel_hash(q, skb) + 1;
 92
 93	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 94	result = tcf_classify(skb, NULL, filter, &res, false);
 95	if (result >= 0) {
 96#ifdef CONFIG_NET_CLS_ACT
 97		switch (result) {
 98		case TC_ACT_STOLEN:
 99		case TC_ACT_QUEUED:
100		case TC_ACT_TRAP:
101			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102			fallthrough;
103		case TC_ACT_SHOT:
104			return 0;
105		}
106#endif
107		if (TC_H_MIN(res.classid) <= q->flows_cnt)
108			return TC_H_MIN(res.classid);
109	}
110	return 0;
111}
112
113/* helper functions : might be changed when/if skb use a standard list_head */
114
115/* remove one skb from head of slot queue */
116static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
117{
118	struct sk_buff *skb = flow->head;
119
120	flow->head = skb->next;
121	skb_mark_not_on_list(skb);
122	return skb;
123}
124
125/* add skb to flow queue (tail add) */
126static inline void flow_queue_add(struct fq_codel_flow *flow,
127				  struct sk_buff *skb)
128{
129	if (flow->head == NULL)
130		flow->head = skb;
131	else
132		flow->tail->next = skb;
133	flow->tail = skb;
134	skb->next = NULL;
135}
136
137static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
138				  struct sk_buff **to_free)
139{
140	struct fq_codel_sched_data *q = qdisc_priv(sch);
141	struct sk_buff *skb;
142	unsigned int maxbacklog = 0, idx = 0, i, len;
143	struct fq_codel_flow *flow;
144	unsigned int threshold;
145	unsigned int mem = 0;
146
147	/* Queue is full! Find the fat flow and drop packet(s) from it.
148	 * This might sound expensive, but with 1024 flows, we scan
149	 * 4KB of memory, and we dont need to handle a complex tree
150	 * in fast path (packet queue/enqueue) with many cache misses.
151	 * In stress mode, we'll try to drop 64 packets from the flow,
152	 * amortizing this linear lookup to one cache line per drop.
153	 */
154	for (i = 0; i < q->flows_cnt; i++) {
155		if (q->backlogs[i] > maxbacklog) {
156			maxbacklog = q->backlogs[i];
157			idx = i;
158		}
159	}
160
161	/* Our goal is to drop half of this fat flow backlog */
162	threshold = maxbacklog >> 1;
163
164	flow = &q->flows[idx];
165	len = 0;
166	i = 0;
167	do {
168		skb = dequeue_head(flow);
169		len += qdisc_pkt_len(skb);
170		mem += get_codel_cb(skb)->mem_usage;
171		__qdisc_drop(skb, to_free);
172	} while (++i < max_packets && len < threshold);
173
174	/* Tell codel to increase its signal strength also */
175	flow->cvars.count += i;
176	q->backlogs[idx] -= len;
177	q->memory_usage -= mem;
178	sch->qstats.drops += i;
179	sch->qstats.backlog -= len;
180	sch->q.qlen -= i;
 
181	return idx;
182}
183
184static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185			    struct sk_buff **to_free)
 
 
 
 
 
 
 
 
186{
187	struct fq_codel_sched_data *q = qdisc_priv(sch);
188	unsigned int idx, prev_backlog, prev_qlen;
189	struct fq_codel_flow *flow;
190	int ret;
191	unsigned int pkt_len;
192	bool memory_limited;
193
194	idx = fq_codel_classify(skb, sch, &ret);
195	if (idx == 0) {
196		if (ret & __NET_XMIT_BYPASS)
197			qdisc_qstats_drop(sch);
198		__qdisc_drop(skb, to_free);
199		return ret;
200	}
201	idx--;
202
203	codel_set_enqueue_time(skb);
204	flow = &q->flows[idx];
205	flow_queue_add(flow, skb);
206	q->backlogs[idx] += qdisc_pkt_len(skb);
207	qdisc_qstats_backlog_inc(sch, skb);
208
209	if (list_empty(&flow->flowchain)) {
210		list_add_tail(&flow->flowchain, &q->new_flows);
211		q->new_flow_count++;
212		flow->deficit = q->quantum;
 
213	}
214	get_codel_cb(skb)->mem_usage = skb->truesize;
215	q->memory_usage += get_codel_cb(skb)->mem_usage;
216	memory_limited = q->memory_usage > q->memory_limit;
217	if (++sch->q.qlen <= sch->limit && !memory_limited)
218		return NET_XMIT_SUCCESS;
219
220	prev_backlog = sch->qstats.backlog;
221	prev_qlen = sch->q.qlen;
222
223	/* save this packet length as it might be dropped by fq_codel_drop() */
224	pkt_len = qdisc_pkt_len(skb);
225	/* fq_codel_drop() is quite expensive, as it performs a linear search
226	 * in q->backlogs[] to find a fat flow.
227	 * So instead of dropping a single packet, drop half of its backlog
228	 * with a 64 packets limit to not add a too big cpu spike here.
229	 */
230	ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
231
232	prev_qlen -= sch->q.qlen;
233	prev_backlog -= sch->qstats.backlog;
234	q->drop_overlimit += prev_qlen;
235	if (memory_limited)
236		q->drop_overmemory += prev_qlen;
237
238	/* As we dropped packet(s), better let upper stack know this.
239	 * If we dropped a packet for this flow, return NET_XMIT_CN,
240	 * but in this case, our parents wont increase their backlogs.
241	 */
242	if (ret == idx) {
243		qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
244					  prev_backlog - pkt_len);
245		return NET_XMIT_CN;
246	}
247	qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
 
248	return NET_XMIT_SUCCESS;
249}
250
251/* This is the specific function called from codel_dequeue()
252 * to dequeue a packet from queue. Note: backlog is handled in
253 * codel, we dont need to reduce it here.
254 */
255static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
256{
257	struct Qdisc *sch = ctx;
258	struct fq_codel_sched_data *q = qdisc_priv(sch);
259	struct fq_codel_flow *flow;
260	struct sk_buff *skb = NULL;
261
262	flow = container_of(vars, struct fq_codel_flow, cvars);
263	if (flow->head) {
264		skb = dequeue_head(flow);
265		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
266		q->memory_usage -= get_codel_cb(skb)->mem_usage;
267		sch->q.qlen--;
268		sch->qstats.backlog -= qdisc_pkt_len(skb);
269	}
270	return skb;
271}
272
273static void drop_func(struct sk_buff *skb, void *ctx)
274{
275	struct Qdisc *sch = ctx;
276
277	kfree_skb(skb);
278	qdisc_qstats_drop(sch);
279}
280
281static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
282{
283	struct fq_codel_sched_data *q = qdisc_priv(sch);
284	struct sk_buff *skb;
285	struct fq_codel_flow *flow;
286	struct list_head *head;
 
 
287
288begin:
289	head = &q->new_flows;
290	if (list_empty(head)) {
291		head = &q->old_flows;
292		if (list_empty(head))
293			return NULL;
294	}
295	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
296
297	if (flow->deficit <= 0) {
298		flow->deficit += q->quantum;
299		list_move_tail(&flow->flowchain, &q->old_flows);
300		goto begin;
301	}
302
303	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
304			    &flow->cvars, &q->cstats, qdisc_pkt_len,
305			    codel_get_enqueue_time, drop_func, dequeue_func);
 
 
 
 
 
 
306
307	if (!skb) {
308		/* force a pass through old_flows to prevent starvation */
309		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
310			list_move_tail(&flow->flowchain, &q->old_flows);
311		else
312			list_del_init(&flow->flowchain);
313		goto begin;
314	}
315	qdisc_bstats_update(sch, skb);
316	flow->deficit -= qdisc_pkt_len(skb);
317	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
318	 * or HTB crashes. Defer it for next round.
319	 */
320	if (q->cstats.drop_count && sch->q.qlen) {
321		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
322					  q->cstats.drop_len);
323		q->cstats.drop_count = 0;
324		q->cstats.drop_len = 0;
325	}
326	return skb;
327}
328
329static void fq_codel_flow_purge(struct fq_codel_flow *flow)
330{
331	rtnl_kfree_skbs(flow->head, flow->tail);
332	flow->head = NULL;
333}
334
335static void fq_codel_reset(struct Qdisc *sch)
336{
337	struct fq_codel_sched_data *q = qdisc_priv(sch);
338	int i;
339
340	INIT_LIST_HEAD(&q->new_flows);
341	INIT_LIST_HEAD(&q->old_flows);
342	for (i = 0; i < q->flows_cnt; i++) {
343		struct fq_codel_flow *flow = q->flows + i;
344
345		fq_codel_flow_purge(flow);
 
 
 
 
 
 
346		INIT_LIST_HEAD(&flow->flowchain);
347		codel_vars_init(&flow->cvars);
348	}
349	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
350	q->memory_usage = 0;
351}
352
353static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
354	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
355	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
356	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
357	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
358	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
359	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
360	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
361	[TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
362	[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
363	[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 },
364	[TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 },
365};
366
367static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
368			   struct netlink_ext_ack *extack)
369{
370	struct fq_codel_sched_data *q = qdisc_priv(sch);
371	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
372	u32 quantum = 0;
373	int err;
374
375	err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
376					  fq_codel_policy, NULL);
 
 
377	if (err < 0)
378		return err;
379	if (tb[TCA_FQ_CODEL_FLOWS]) {
380		if (q->flows)
381			return -EINVAL;
382		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
383		if (!q->flows_cnt ||
384		    q->flows_cnt > 65536)
385			return -EINVAL;
386	}
387	if (tb[TCA_FQ_CODEL_QUANTUM]) {
388		quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
389		if (quantum > FQ_CODEL_QUANTUM_MAX) {
390			NL_SET_ERR_MSG(extack, "Invalid quantum");
391			return -EINVAL;
392		}
393	}
394	sch_tree_lock(sch);
395
396	if (tb[TCA_FQ_CODEL_TARGET]) {
397		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
398
399		WRITE_ONCE(q->cparams.target,
400			   (target * NSEC_PER_USEC) >> CODEL_SHIFT);
401	}
402
403	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
404		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
405
406		WRITE_ONCE(q->cparams.ce_threshold,
407			   (val * NSEC_PER_USEC) >> CODEL_SHIFT);
408	}
409
410	if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
411		WRITE_ONCE(q->cparams.ce_threshold_selector,
412			   nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]));
413	if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
414		WRITE_ONCE(q->cparams.ce_threshold_mask,
415			   nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]));
416
417	if (tb[TCA_FQ_CODEL_INTERVAL]) {
418		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
419
420		WRITE_ONCE(q->cparams.interval,
421			   (interval * NSEC_PER_USEC) >> CODEL_SHIFT);
422	}
423
424	if (tb[TCA_FQ_CODEL_LIMIT])
425		WRITE_ONCE(sch->limit,
426			   nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]));
427
428	if (tb[TCA_FQ_CODEL_ECN])
429		WRITE_ONCE(q->cparams.ecn,
430			   !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]));
431
432	if (quantum)
433		WRITE_ONCE(q->quantum, quantum);
434
435	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
436		WRITE_ONCE(q->drop_batch_size,
437			   max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])));
438
439	if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
440		WRITE_ONCE(q->memory_limit,
441			   min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])));
442
443	while (sch->q.qlen > sch->limit ||
444	       q->memory_usage > q->memory_limit) {
445		struct sk_buff *skb = fq_codel_dequeue(sch);
446
447		q->cstats.drop_len += qdisc_pkt_len(skb);
448		rtnl_kfree_skbs(skb, skb);
449		q->cstats.drop_count++;
450	}
451	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
452	q->cstats.drop_count = 0;
453	q->cstats.drop_len = 0;
454
455	sch_tree_unlock(sch);
456	return 0;
457}
458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
459static void fq_codel_destroy(struct Qdisc *sch)
460{
461	struct fq_codel_sched_data *q = qdisc_priv(sch);
462
463	tcf_block_put(q->block);
464	kvfree(q->backlogs);
465	kvfree(q->flows);
466}
467
468static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
469			 struct netlink_ext_ack *extack)
470{
471	struct fq_codel_sched_data *q = qdisc_priv(sch);
472	int i;
473	int err;
474
475	sch->limit = 10*1024;
476	q->flows_cnt = 1024;
477	q->memory_limit = 32 << 20; /* 32 MBytes */
478	q->drop_batch_size = 64;
479	q->quantum = psched_mtu(qdisc_dev(sch));
 
480	INIT_LIST_HEAD(&q->new_flows);
481	INIT_LIST_HEAD(&q->old_flows);
482	codel_params_init(&q->cparams);
483	codel_stats_init(&q->cstats);
484	q->cparams.ecn = true;
485	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
486
487	if (opt) {
488		err = fq_codel_change(sch, opt, extack);
489		if (err)
490			goto init_failure;
491	}
492
493	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
494	if (err)
495		goto init_failure;
496
497	if (!q->flows) {
498		q->flows = kvcalloc(q->flows_cnt,
499				    sizeof(struct fq_codel_flow),
500				    GFP_KERNEL);
501		if (!q->flows) {
502			err = -ENOMEM;
503			goto init_failure;
504		}
505		q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
506		if (!q->backlogs) {
507			err = -ENOMEM;
508			goto alloc_failure;
509		}
510		for (i = 0; i < q->flows_cnt; i++) {
511			struct fq_codel_flow *flow = q->flows + i;
512
513			INIT_LIST_HEAD(&flow->flowchain);
514			codel_vars_init(&flow->cvars);
515		}
516	}
517	if (sch->limit >= 1)
518		sch->flags |= TCQ_F_CAN_BYPASS;
519	else
520		sch->flags &= ~TCQ_F_CAN_BYPASS;
521	return 0;
522
523alloc_failure:
524	kvfree(q->flows);
525	q->flows = NULL;
526init_failure:
527	q->flows_cnt = 0;
528	return err;
529}
530
531static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
532{
533	struct fq_codel_sched_data *q = qdisc_priv(sch);
534	codel_time_t ce_threshold;
535	struct nlattr *opts;
536
537	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
538	if (opts == NULL)
539		goto nla_put_failure;
540
541	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
542			codel_time_to_us(READ_ONCE(q->cparams.target))) ||
543	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
544			READ_ONCE(sch->limit)) ||
545	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
546			codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
547	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
548			READ_ONCE(q->cparams.ecn)) ||
549	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
550			READ_ONCE(q->quantum)) ||
551	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
552			READ_ONCE(q->drop_batch_size)) ||
553	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
554			READ_ONCE(q->memory_limit)) ||
555	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
556			READ_ONCE(q->flows_cnt)))
557		goto nla_put_failure;
558
559	ce_threshold = READ_ONCE(q->cparams.ce_threshold);
560	if (ce_threshold != CODEL_DISABLED_THRESHOLD) {
561		if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
562				codel_time_to_us(ce_threshold)))
563			goto nla_put_failure;
564		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
565			       READ_ONCE(q->cparams.ce_threshold_selector)))
566			goto nla_put_failure;
567		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK,
568			       READ_ONCE(q->cparams.ce_threshold_mask)))
569			goto nla_put_failure;
570	}
571
572	return nla_nest_end(skb, opts);
573
574nla_put_failure:
575	return -1;
576}
577
578static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
579{
580	struct fq_codel_sched_data *q = qdisc_priv(sch);
581	struct tc_fq_codel_xstats st = {
582		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
583	};
584	struct list_head *pos;
585
586	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
587	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
588	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
589	st.qdisc_stats.new_flow_count = q->new_flow_count;
590	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
591	st.qdisc_stats.memory_usage  = q->memory_usage;
592	st.qdisc_stats.drop_overmemory = q->drop_overmemory;
593
594	sch_tree_lock(sch);
595	list_for_each(pos, &q->new_flows)
596		st.qdisc_stats.new_flows_len++;
597
598	list_for_each(pos, &q->old_flows)
599		st.qdisc_stats.old_flows_len++;
600	sch_tree_unlock(sch);
601
602	return gnet_stats_copy_app(d, &st, sizeof(st));
603}
604
605static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
606{
607	return NULL;
608}
609
610static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
611{
612	return 0;
613}
614
615static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
616			      u32 classid)
617{
 
 
618	return 0;
619}
620
621static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
622{
623}
624
625static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
626					    struct netlink_ext_ack *extack)
627{
628	struct fq_codel_sched_data *q = qdisc_priv(sch);
629
630	if (cl)
631		return NULL;
632	return q->block;
633}
634
635static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
636			  struct sk_buff *skb, struct tcmsg *tcm)
637{
638	tcm->tcm_handle |= TC_H_MIN(cl);
639	return 0;
640}
641
642static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
643				     struct gnet_dump *d)
644{
645	struct fq_codel_sched_data *q = qdisc_priv(sch);
646	u32 idx = cl - 1;
647	struct gnet_stats_queue qs = { 0 };
648	struct tc_fq_codel_xstats xstats;
649
650	if (idx < q->flows_cnt) {
651		const struct fq_codel_flow *flow = &q->flows[idx];
652		const struct sk_buff *skb;
653
654		memset(&xstats, 0, sizeof(xstats));
655		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
656		xstats.class_stats.deficit = flow->deficit;
657		xstats.class_stats.ldelay =
658			codel_time_to_us(flow->cvars.ldelay);
659		xstats.class_stats.count = flow->cvars.count;
660		xstats.class_stats.lastcount = flow->cvars.lastcount;
661		xstats.class_stats.dropping = flow->cvars.dropping;
662		if (flow->cvars.dropping) {
663			codel_tdiff_t delta = flow->cvars.drop_next -
664					      codel_get_time();
665
666			xstats.class_stats.drop_next = (delta >= 0) ?
667				codel_time_to_us(delta) :
668				-codel_time_to_us(-delta);
669		}
670		if (flow->head) {
671			sch_tree_lock(sch);
672			skb = flow->head;
673			while (skb) {
674				qs.qlen++;
675				skb = skb->next;
676			}
677			sch_tree_unlock(sch);
678		}
679		qs.backlog = q->backlogs[idx];
680		qs.drops = 0;
681	}
682	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
683		return -1;
684	if (idx < q->flows_cnt)
685		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
686	return 0;
687}
688
689static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
690{
691	struct fq_codel_sched_data *q = qdisc_priv(sch);
692	unsigned int i;
693
694	if (arg->stop)
695		return;
696
697	for (i = 0; i < q->flows_cnt; i++) {
698		if (list_empty(&q->flows[i].flowchain)) {
 
699			arg->count++;
700			continue;
701		}
702		if (!tc_qdisc_stats_dump(sch, i + 1, arg))
 
703			break;
 
 
704	}
705}
706
707static const struct Qdisc_class_ops fq_codel_class_ops = {
708	.leaf		=	fq_codel_leaf,
709	.find		=	fq_codel_find,
710	.tcf_block	=	fq_codel_tcf_block,
 
711	.bind_tcf	=	fq_codel_bind,
712	.unbind_tcf	=	fq_codel_unbind,
713	.dump		=	fq_codel_dump_class,
714	.dump_stats	=	fq_codel_dump_class_stats,
715	.walk		=	fq_codel_walk,
716};
717
718static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
719	.cl_ops		=	&fq_codel_class_ops,
720	.id		=	"fq_codel",
721	.priv_size	=	sizeof(struct fq_codel_sched_data),
722	.enqueue	=	fq_codel_enqueue,
723	.dequeue	=	fq_codel_dequeue,
724	.peek		=	qdisc_peek_dequeued,
 
725	.init		=	fq_codel_init,
726	.reset		=	fq_codel_reset,
727	.destroy	=	fq_codel_destroy,
728	.change		=	fq_codel_change,
729	.dump		=	fq_codel_dump,
730	.dump_stats =	fq_codel_dump_stats,
731	.owner		=	THIS_MODULE,
732};
733MODULE_ALIAS_NET_SCH("fq_codel");
734
735static int __init fq_codel_module_init(void)
736{
737	return register_qdisc(&fq_codel_qdisc_ops);
738}
739
740static void __exit fq_codel_module_exit(void)
741{
742	unregister_qdisc(&fq_codel_qdisc_ops);
743}
744
745module_init(fq_codel_module_init)
746module_exit(fq_codel_module_exit)
747MODULE_AUTHOR("Eric Dumazet");
748MODULE_LICENSE("GPL");
749MODULE_DESCRIPTION("Fair Queue CoDel discipline");
v4.6
 
  1/*
  2 * Fair Queue CoDel discipline
  3 *
  4 *	This program is free software; you can redistribute it and/or
  5 *	modify it under the terms of the GNU General Public License
  6 *	as published by the Free Software Foundation; either version
  7 *	2 of the License, or (at your option) any later version.
  8 *
  9 *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/types.h>
 14#include <linux/kernel.h>
 15#include <linux/jiffies.h>
 16#include <linux/string.h>
 17#include <linux/in.h>
 18#include <linux/errno.h>
 19#include <linux/init.h>
 20#include <linux/skbuff.h>
 21#include <linux/jhash.h>
 22#include <linux/slab.h>
 23#include <linux/vmalloc.h>
 24#include <net/netlink.h>
 25#include <net/pkt_sched.h>
 
 26#include <net/codel.h>
 
 
 27
 28/*	Fair Queue CoDel.
 29 *
 30 * Principles :
 31 * Packets are classified (internal classifier or external) on flows.
 32 * This is a Stochastic model (as we use a hash, several flows
 33 *			       might be hashed on same slot)
 34 * Each flow has a CoDel managed queue.
 35 * Flows are linked onto two (Round Robin) lists,
 36 * so that new flows have priority on old ones.
 37 *
 38 * For a given flow, packets are not reordered (CoDel uses a FIFO)
 39 * head drops only.
 40 * ECN capability is on by default.
 41 * Low memory footprint (64 bytes per flow)
 42 */
 43
 44struct fq_codel_flow {
 45	struct sk_buff	  *head;
 46	struct sk_buff	  *tail;
 47	struct list_head  flowchain;
 48	int		  deficit;
 49	u32		  dropped; /* number of drops (or ECN marks) on this flow */
 50	struct codel_vars cvars;
 51}; /* please try to keep this structure <= 64 bytes */
 52
 53struct fq_codel_sched_data {
 54	struct tcf_proto __rcu *filter_list; /* optional external classifier */
 
 55	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
 56	u32		*backlogs;	/* backlog table [flows_cnt] */
 57	u32		flows_cnt;	/* number of flows */
 58	u32		perturbation;	/* hash perturbation */
 59	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
 
 
 60	struct codel_params cparams;
 61	struct codel_stats cstats;
 
 
 62	u32		drop_overlimit;
 63	u32		new_flow_count;
 64
 65	struct list_head new_flows;	/* list of new flows */
 66	struct list_head old_flows;	/* list of old flows */
 67};
 68
 69static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
 70				  struct sk_buff *skb)
 71{
 72	u32 hash = skb_get_hash_perturb(skb, q->perturbation);
 73
 74	return reciprocal_scale(hash, q->flows_cnt);
 75}
 76
 77static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
 78				      int *qerr)
 79{
 80	struct fq_codel_sched_data *q = qdisc_priv(sch);
 81	struct tcf_proto *filter;
 82	struct tcf_result res;
 83	int result;
 84
 85	if (TC_H_MAJ(skb->priority) == sch->handle &&
 86	    TC_H_MIN(skb->priority) > 0 &&
 87	    TC_H_MIN(skb->priority) <= q->flows_cnt)
 88		return TC_H_MIN(skb->priority);
 89
 90	filter = rcu_dereference_bh(q->filter_list);
 91	if (!filter)
 92		return fq_codel_hash(q, skb) + 1;
 93
 94	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 95	result = tc_classify(skb, filter, &res, false);
 96	if (result >= 0) {
 97#ifdef CONFIG_NET_CLS_ACT
 98		switch (result) {
 99		case TC_ACT_STOLEN:
100		case TC_ACT_QUEUED:
 
101			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
102		case TC_ACT_SHOT:
103			return 0;
104		}
105#endif
106		if (TC_H_MIN(res.classid) <= q->flows_cnt)
107			return TC_H_MIN(res.classid);
108	}
109	return 0;
110}
111
112/* helper functions : might be changed when/if skb use a standard list_head */
113
114/* remove one skb from head of slot queue */
115static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
116{
117	struct sk_buff *skb = flow->head;
118
119	flow->head = skb->next;
120	skb->next = NULL;
121	return skb;
122}
123
124/* add skb to flow queue (tail add) */
125static inline void flow_queue_add(struct fq_codel_flow *flow,
126				  struct sk_buff *skb)
127{
128	if (flow->head == NULL)
129		flow->head = skb;
130	else
131		flow->tail->next = skb;
132	flow->tail = skb;
133	skb->next = NULL;
134}
135
136static unsigned int fq_codel_drop(struct Qdisc *sch)
 
137{
138	struct fq_codel_sched_data *q = qdisc_priv(sch);
139	struct sk_buff *skb;
140	unsigned int maxbacklog = 0, idx = 0, i, len;
141	struct fq_codel_flow *flow;
 
 
142
143	/* Queue is full! Find the fat flow and drop packet from it.
144	 * This might sound expensive, but with 1024 flows, we scan
145	 * 4KB of memory, and we dont need to handle a complex tree
146	 * in fast path (packet queue/enqueue) with many cache misses.
 
 
147	 */
148	for (i = 0; i < q->flows_cnt; i++) {
149		if (q->backlogs[i] > maxbacklog) {
150			maxbacklog = q->backlogs[i];
151			idx = i;
152		}
153	}
 
 
 
 
154	flow = &q->flows[idx];
155	skb = dequeue_head(flow);
156	len = qdisc_pkt_len(skb);
 
 
 
 
 
 
 
 
 
157	q->backlogs[idx] -= len;
158	sch->q.qlen--;
159	qdisc_qstats_drop(sch);
160	qdisc_qstats_backlog_dec(sch, skb);
161	kfree_skb(skb);
162	flow->dropped++;
163	return idx;
164}
165
166static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
167{
168	unsigned int prev_backlog;
169
170	prev_backlog = sch->qstats.backlog;
171	fq_codel_drop(sch);
172	return prev_backlog - sch->qstats.backlog;
173}
174
175static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176{
177	struct fq_codel_sched_data *q = qdisc_priv(sch);
178	unsigned int idx, prev_backlog;
179	struct fq_codel_flow *flow;
180	int uninitialized_var(ret);
 
 
181
182	idx = fq_codel_classify(skb, sch, &ret);
183	if (idx == 0) {
184		if (ret & __NET_XMIT_BYPASS)
185			qdisc_qstats_drop(sch);
186		kfree_skb(skb);
187		return ret;
188	}
189	idx--;
190
191	codel_set_enqueue_time(skb);
192	flow = &q->flows[idx];
193	flow_queue_add(flow, skb);
194	q->backlogs[idx] += qdisc_pkt_len(skb);
195	qdisc_qstats_backlog_inc(sch, skb);
196
197	if (list_empty(&flow->flowchain)) {
198		list_add_tail(&flow->flowchain, &q->new_flows);
199		q->new_flow_count++;
200		flow->deficit = q->quantum;
201		flow->dropped = 0;
202	}
203	if (++sch->q.qlen <= sch->limit)
 
 
 
204		return NET_XMIT_SUCCESS;
205
206	prev_backlog = sch->qstats.backlog;
207	q->drop_overlimit++;
208	/* Return Congestion Notification only if we dropped a packet
209	 * from this flow.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210	 */
211	if (fq_codel_drop(sch) == idx)
 
 
212		return NET_XMIT_CN;
213
214	/* As we dropped a packet, better let upper stack know this */
215	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
216	return NET_XMIT_SUCCESS;
217}
218
219/* This is the specific function called from codel_dequeue()
220 * to dequeue a packet from queue. Note: backlog is handled in
221 * codel, we dont need to reduce it here.
222 */
223static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
224{
 
225	struct fq_codel_sched_data *q = qdisc_priv(sch);
226	struct fq_codel_flow *flow;
227	struct sk_buff *skb = NULL;
228
229	flow = container_of(vars, struct fq_codel_flow, cvars);
230	if (flow->head) {
231		skb = dequeue_head(flow);
232		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
 
233		sch->q.qlen--;
 
234	}
235	return skb;
236}
237
 
 
 
 
 
 
 
 
238static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
239{
240	struct fq_codel_sched_data *q = qdisc_priv(sch);
241	struct sk_buff *skb;
242	struct fq_codel_flow *flow;
243	struct list_head *head;
244	u32 prev_drop_count, prev_ecn_mark;
245	unsigned int prev_backlog;
246
247begin:
248	head = &q->new_flows;
249	if (list_empty(head)) {
250		head = &q->old_flows;
251		if (list_empty(head))
252			return NULL;
253	}
254	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
255
256	if (flow->deficit <= 0) {
257		flow->deficit += q->quantum;
258		list_move_tail(&flow->flowchain, &q->old_flows);
259		goto begin;
260	}
261
262	prev_drop_count = q->cstats.drop_count;
263	prev_ecn_mark = q->cstats.ecn_mark;
264	prev_backlog = sch->qstats.backlog;
265
266	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
267			    dequeue);
268
269	flow->dropped += q->cstats.drop_count - prev_drop_count;
270	flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
271
272	if (!skb) {
273		/* force a pass through old_flows to prevent starvation */
274		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
275			list_move_tail(&flow->flowchain, &q->old_flows);
276		else
277			list_del_init(&flow->flowchain);
278		goto begin;
279	}
280	qdisc_bstats_update(sch, skb);
281	flow->deficit -= qdisc_pkt_len(skb);
282	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
283	 * or HTB crashes. Defer it for next round.
284	 */
285	if (q->cstats.drop_count && sch->q.qlen) {
286		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
287					  q->cstats.drop_len);
288		q->cstats.drop_count = 0;
289		q->cstats.drop_len = 0;
290	}
291	return skb;
292}
293
 
 
 
 
 
 
294static void fq_codel_reset(struct Qdisc *sch)
295{
296	struct fq_codel_sched_data *q = qdisc_priv(sch);
297	int i;
298
299	INIT_LIST_HEAD(&q->new_flows);
300	INIT_LIST_HEAD(&q->old_flows);
301	for (i = 0; i < q->flows_cnt; i++) {
302		struct fq_codel_flow *flow = q->flows + i;
303
304		while (flow->head) {
305			struct sk_buff *skb = dequeue_head(flow);
306
307			qdisc_qstats_backlog_dec(sch, skb);
308			kfree_skb(skb);
309		}
310
311		INIT_LIST_HEAD(&flow->flowchain);
312		codel_vars_init(&flow->cvars);
313	}
314	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
315	sch->q.qlen = 0;
316}
317
318static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
319	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
320	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
321	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
322	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
323	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
324	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
325	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
 
 
 
 
326};
327
328static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
 
329{
330	struct fq_codel_sched_data *q = qdisc_priv(sch);
331	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
 
332	int err;
333
334	if (!opt)
335		return -EINVAL;
336
337	err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
338	if (err < 0)
339		return err;
340	if (tb[TCA_FQ_CODEL_FLOWS]) {
341		if (q->flows)
342			return -EINVAL;
343		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
344		if (!q->flows_cnt ||
345		    q->flows_cnt > 65536)
346			return -EINVAL;
347	}
 
 
 
 
 
 
 
348	sch_tree_lock(sch);
349
350	if (tb[TCA_FQ_CODEL_TARGET]) {
351		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
352
353		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
 
354	}
355
356	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
357		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
358
359		q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
 
360	}
361
 
 
 
 
 
 
 
362	if (tb[TCA_FQ_CODEL_INTERVAL]) {
363		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
364
365		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
 
366	}
367
368	if (tb[TCA_FQ_CODEL_LIMIT])
369		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
 
370
371	if (tb[TCA_FQ_CODEL_ECN])
372		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
 
373
374	if (tb[TCA_FQ_CODEL_QUANTUM])
375		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
376
377	while (sch->q.qlen > sch->limit) {
 
 
 
 
 
 
 
 
 
378		struct sk_buff *skb = fq_codel_dequeue(sch);
379
380		q->cstats.drop_len += qdisc_pkt_len(skb);
381		kfree_skb(skb);
382		q->cstats.drop_count++;
383	}
384	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
385	q->cstats.drop_count = 0;
386	q->cstats.drop_len = 0;
387
388	sch_tree_unlock(sch);
389	return 0;
390}
391
392static void *fq_codel_zalloc(size_t sz)
393{
394	void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
395
396	if (!ptr)
397		ptr = vzalloc(sz);
398	return ptr;
399}
400
401static void fq_codel_free(void *addr)
402{
403	kvfree(addr);
404}
405
406static void fq_codel_destroy(struct Qdisc *sch)
407{
408	struct fq_codel_sched_data *q = qdisc_priv(sch);
409
410	tcf_destroy_chain(&q->filter_list);
411	fq_codel_free(q->backlogs);
412	fq_codel_free(q->flows);
413}
414
415static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
 
416{
417	struct fq_codel_sched_data *q = qdisc_priv(sch);
418	int i;
 
419
420	sch->limit = 10*1024;
421	q->flows_cnt = 1024;
 
 
422	q->quantum = psched_mtu(qdisc_dev(sch));
423	q->perturbation = prandom_u32();
424	INIT_LIST_HEAD(&q->new_flows);
425	INIT_LIST_HEAD(&q->old_flows);
426	codel_params_init(&q->cparams, sch);
427	codel_stats_init(&q->cstats);
428	q->cparams.ecn = true;
 
429
430	if (opt) {
431		int err = fq_codel_change(sch, opt);
432		if (err)
433			return err;
434	}
435
 
 
 
 
436	if (!q->flows) {
437		q->flows = fq_codel_zalloc(q->flows_cnt *
438					   sizeof(struct fq_codel_flow));
439		if (!q->flows)
440			return -ENOMEM;
441		q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
 
 
 
442		if (!q->backlogs) {
443			fq_codel_free(q->flows);
444			return -ENOMEM;
445		}
446		for (i = 0; i < q->flows_cnt; i++) {
447			struct fq_codel_flow *flow = q->flows + i;
448
449			INIT_LIST_HEAD(&flow->flowchain);
450			codel_vars_init(&flow->cvars);
451		}
452	}
453	if (sch->limit >= 1)
454		sch->flags |= TCQ_F_CAN_BYPASS;
455	else
456		sch->flags &= ~TCQ_F_CAN_BYPASS;
457	return 0;
 
 
 
 
 
 
 
458}
459
460static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
461{
462	struct fq_codel_sched_data *q = qdisc_priv(sch);
 
463	struct nlattr *opts;
464
465	opts = nla_nest_start(skb, TCA_OPTIONS);
466	if (opts == NULL)
467		goto nla_put_failure;
468
469	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
470			codel_time_to_us(q->cparams.target)) ||
471	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
472			sch->limit) ||
473	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
474			codel_time_to_us(q->cparams.interval)) ||
475	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
476			q->cparams.ecn) ||
477	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
478			q->quantum) ||
 
 
 
 
479	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
480			q->flows_cnt))
481		goto nla_put_failure;
482
483	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
484	    nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
485			codel_time_to_us(q->cparams.ce_threshold)))
486		goto nla_put_failure;
 
 
 
 
 
 
 
 
487
488	return nla_nest_end(skb, opts);
489
490nla_put_failure:
491	return -1;
492}
493
494static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
495{
496	struct fq_codel_sched_data *q = qdisc_priv(sch);
497	struct tc_fq_codel_xstats st = {
498		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
499	};
500	struct list_head *pos;
501
502	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
503	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
504	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
505	st.qdisc_stats.new_flow_count = q->new_flow_count;
506	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
 
 
507
 
508	list_for_each(pos, &q->new_flows)
509		st.qdisc_stats.new_flows_len++;
510
511	list_for_each(pos, &q->old_flows)
512		st.qdisc_stats.old_flows_len++;
 
513
514	return gnet_stats_copy_app(d, &st, sizeof(st));
515}
516
517static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
518{
519	return NULL;
520}
521
522static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
523{
524	return 0;
525}
526
527static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
528			      u32 classid)
529{
530	/* we cannot bypass queue discipline anymore */
531	sch->flags &= ~TCQ_F_CAN_BYPASS;
532	return 0;
533}
534
535static void fq_codel_put(struct Qdisc *q, unsigned long cl)
536{
537}
538
539static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
540						  unsigned long cl)
541{
542	struct fq_codel_sched_data *q = qdisc_priv(sch);
543
544	if (cl)
545		return NULL;
546	return &q->filter_list;
547}
548
549static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
550			  struct sk_buff *skb, struct tcmsg *tcm)
551{
552	tcm->tcm_handle |= TC_H_MIN(cl);
553	return 0;
554}
555
556static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
557				     struct gnet_dump *d)
558{
559	struct fq_codel_sched_data *q = qdisc_priv(sch);
560	u32 idx = cl - 1;
561	struct gnet_stats_queue qs = { 0 };
562	struct tc_fq_codel_xstats xstats;
563
564	if (idx < q->flows_cnt) {
565		const struct fq_codel_flow *flow = &q->flows[idx];
566		const struct sk_buff *skb = flow->head;
567
568		memset(&xstats, 0, sizeof(xstats));
569		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
570		xstats.class_stats.deficit = flow->deficit;
571		xstats.class_stats.ldelay =
572			codel_time_to_us(flow->cvars.ldelay);
573		xstats.class_stats.count = flow->cvars.count;
574		xstats.class_stats.lastcount = flow->cvars.lastcount;
575		xstats.class_stats.dropping = flow->cvars.dropping;
576		if (flow->cvars.dropping) {
577			codel_tdiff_t delta = flow->cvars.drop_next -
578					      codel_get_time();
579
580			xstats.class_stats.drop_next = (delta >= 0) ?
581				codel_time_to_us(delta) :
582				-codel_time_to_us(-delta);
583		}
584		while (skb) {
585			qs.qlen++;
586			skb = skb->next;
 
 
 
 
 
587		}
588		qs.backlog = q->backlogs[idx];
589		qs.drops = flow->dropped;
590	}
591	if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
592		return -1;
593	if (idx < q->flows_cnt)
594		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
595	return 0;
596}
597
598static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
599{
600	struct fq_codel_sched_data *q = qdisc_priv(sch);
601	unsigned int i;
602
603	if (arg->stop)
604		return;
605
606	for (i = 0; i < q->flows_cnt; i++) {
607		if (list_empty(&q->flows[i].flowchain) ||
608		    arg->count < arg->skip) {
609			arg->count++;
610			continue;
611		}
612		if (arg->fn(sch, i + 1, arg) < 0) {
613			arg->stop = 1;
614			break;
615		}
616		arg->count++;
617	}
618}
619
620static const struct Qdisc_class_ops fq_codel_class_ops = {
621	.leaf		=	fq_codel_leaf,
622	.get		=	fq_codel_get,
623	.put		=	fq_codel_put,
624	.tcf_chain	=	fq_codel_find_tcf,
625	.bind_tcf	=	fq_codel_bind,
626	.unbind_tcf	=	fq_codel_put,
627	.dump		=	fq_codel_dump_class,
628	.dump_stats	=	fq_codel_dump_class_stats,
629	.walk		=	fq_codel_walk,
630};
631
632static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
633	.cl_ops		=	&fq_codel_class_ops,
634	.id		=	"fq_codel",
635	.priv_size	=	sizeof(struct fq_codel_sched_data),
636	.enqueue	=	fq_codel_enqueue,
637	.dequeue	=	fq_codel_dequeue,
638	.peek		=	qdisc_peek_dequeued,
639	.drop		=	fq_codel_qdisc_drop,
640	.init		=	fq_codel_init,
641	.reset		=	fq_codel_reset,
642	.destroy	=	fq_codel_destroy,
643	.change		=	fq_codel_change,
644	.dump		=	fq_codel_dump,
645	.dump_stats =	fq_codel_dump_stats,
646	.owner		=	THIS_MODULE,
647};
 
648
649static int __init fq_codel_module_init(void)
650{
651	return register_qdisc(&fq_codel_qdisc_ops);
652}
653
654static void __exit fq_codel_module_exit(void)
655{
656	unregister_qdisc(&fq_codel_qdisc_ops);
657}
658
659module_init(fq_codel_module_init)
660module_exit(fq_codel_module_exit)
661MODULE_AUTHOR("Eric Dumazet");
662MODULE_LICENSE("GPL");