Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Fair Queue CoDel discipline
  4 *
  5 *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/types.h>
 10#include <linux/kernel.h>
 11#include <linux/jiffies.h>
 12#include <linux/string.h>
 13#include <linux/in.h>
 14#include <linux/errno.h>
 15#include <linux/init.h>
 16#include <linux/skbuff.h>
 
 17#include <linux/slab.h>
 18#include <linux/vmalloc.h>
 19#include <net/netlink.h>
 20#include <net/pkt_sched.h>
 21#include <net/pkt_cls.h>
 22#include <net/codel.h>
 23#include <net/codel_impl.h>
 24#include <net/codel_qdisc.h>
 25
 26/*	Fair Queue CoDel.
 27 *
 28 * Principles :
 29 * Packets are classified (internal classifier or external) on flows.
 30 * This is a Stochastic model (as we use a hash, several flows
 31 *			       might be hashed on same slot)
 32 * Each flow has a CoDel managed queue.
 33 * Flows are linked onto two (Round Robin) lists,
 34 * so that new flows have priority on old ones.
 35 *
 36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
 37 * head drops only.
 38 * ECN capability is on by default.
 39 * Low memory footprint (64 bytes per flow)
 40 */
 41
 42struct fq_codel_flow {
 43	struct sk_buff	  *head;
 44	struct sk_buff	  *tail;
 45	struct list_head  flowchain;
 46	int		  deficit;
 47	struct codel_vars cvars;
 48}; /* please try to keep this structure <= 64 bytes */
 49
 50struct fq_codel_sched_data {
 51	struct tcf_proto __rcu *filter_list; /* optional external classifier */
 52	struct tcf_block *block;
 53	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
 54	u32		*backlogs;	/* backlog table [flows_cnt] */
 55	u32		flows_cnt;	/* number of flows */
 56	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
 57	u32		drop_batch_size;
 58	u32		memory_limit;
 59	struct codel_params cparams;
 60	struct codel_stats cstats;
 61	u32		memory_usage;
 62	u32		drop_overmemory;
 63	u32		drop_overlimit;
 64	u32		new_flow_count;
 65
 66	struct list_head new_flows;	/* list of new flows */
 67	struct list_head old_flows;	/* list of old flows */
 68};
 69
 70static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
 71				  struct sk_buff *skb)
 72{
 73	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
 74}
 75
 76static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
 77				      int *qerr)
 78{
 79	struct fq_codel_sched_data *q = qdisc_priv(sch);
 80	struct tcf_proto *filter;
 81	struct tcf_result res;
 82	int result;
 83
 84	if (TC_H_MAJ(skb->priority) == sch->handle &&
 85	    TC_H_MIN(skb->priority) > 0 &&
 86	    TC_H_MIN(skb->priority) <= q->flows_cnt)
 87		return TC_H_MIN(skb->priority);
 88
 89	filter = rcu_dereference_bh(q->filter_list);
 90	if (!filter)
 91		return fq_codel_hash(q, skb) + 1;
 92
 93	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 94	result = tcf_classify(skb, filter, &res, false);
 95	if (result >= 0) {
 96#ifdef CONFIG_NET_CLS_ACT
 97		switch (result) {
 98		case TC_ACT_STOLEN:
 99		case TC_ACT_QUEUED:
100		case TC_ACT_TRAP:
101			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102			fallthrough;
103		case TC_ACT_SHOT:
104			return 0;
105		}
106#endif
107		if (TC_H_MIN(res.classid) <= q->flows_cnt)
108			return TC_H_MIN(res.classid);
109	}
110	return 0;
111}
112
113/* helper functions : might be changed when/if skb use a standard list_head */
114
115/* remove one skb from head of slot queue */
116static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
117{
118	struct sk_buff *skb = flow->head;
119
120	flow->head = skb->next;
121	skb_mark_not_on_list(skb);
122	return skb;
123}
124
125/* add skb to flow queue (tail add) */
126static inline void flow_queue_add(struct fq_codel_flow *flow,
127				  struct sk_buff *skb)
128{
129	if (flow->head == NULL)
130		flow->head = skb;
131	else
132		flow->tail->next = skb;
133	flow->tail = skb;
134	skb->next = NULL;
135}
136
137static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
138				  struct sk_buff **to_free)
139{
140	struct fq_codel_sched_data *q = qdisc_priv(sch);
141	struct sk_buff *skb;
142	unsigned int maxbacklog = 0, idx = 0, i, len;
143	struct fq_codel_flow *flow;
144	unsigned int threshold;
145	unsigned int mem = 0;
146
147	/* Queue is full! Find the fat flow and drop packet(s) from it.
148	 * This might sound expensive, but with 1024 flows, we scan
149	 * 4KB of memory, and we dont need to handle a complex tree
150	 * in fast path (packet queue/enqueue) with many cache misses.
151	 * In stress mode, we'll try to drop 64 packets from the flow,
152	 * amortizing this linear lookup to one cache line per drop.
153	 */
154	for (i = 0; i < q->flows_cnt; i++) {
155		if (q->backlogs[i] > maxbacklog) {
156			maxbacklog = q->backlogs[i];
157			idx = i;
158		}
159	}
160
161	/* Our goal is to drop half of this fat flow backlog */
162	threshold = maxbacklog >> 1;
163
164	flow = &q->flows[idx];
165	len = 0;
166	i = 0;
167	do {
168		skb = dequeue_head(flow);
169		len += qdisc_pkt_len(skb);
170		mem += get_codel_cb(skb)->mem_usage;
171		__qdisc_drop(skb, to_free);
172	} while (++i < max_packets && len < threshold);
173
174	/* Tell codel to increase its signal strength also */
175	flow->cvars.count += i;
176	q->backlogs[idx] -= len;
177	q->memory_usage -= mem;
178	sch->qstats.drops += i;
179	sch->qstats.backlog -= len;
180	sch->q.qlen -= i;
181	return idx;
182}
183
184static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185			    struct sk_buff **to_free)
186{
187	struct fq_codel_sched_data *q = qdisc_priv(sch);
188	unsigned int idx, prev_backlog, prev_qlen;
189	struct fq_codel_flow *flow;
190	int ret;
191	unsigned int pkt_len;
192	bool memory_limited;
193
194	idx = fq_codel_classify(skb, sch, &ret);
195	if (idx == 0) {
196		if (ret & __NET_XMIT_BYPASS)
197			qdisc_qstats_drop(sch);
198		__qdisc_drop(skb, to_free);
199		return ret;
200	}
201	idx--;
202
203	codel_set_enqueue_time(skb);
204	flow = &q->flows[idx];
205	flow_queue_add(flow, skb);
206	q->backlogs[idx] += qdisc_pkt_len(skb);
207	qdisc_qstats_backlog_inc(sch, skb);
208
209	if (list_empty(&flow->flowchain)) {
210		list_add_tail(&flow->flowchain, &q->new_flows);
211		q->new_flow_count++;
212		flow->deficit = q->quantum;
213	}
214	get_codel_cb(skb)->mem_usage = skb->truesize;
215	q->memory_usage += get_codel_cb(skb)->mem_usage;
216	memory_limited = q->memory_usage > q->memory_limit;
217	if (++sch->q.qlen <= sch->limit && !memory_limited)
218		return NET_XMIT_SUCCESS;
219
220	prev_backlog = sch->qstats.backlog;
221	prev_qlen = sch->q.qlen;
222
223	/* save this packet length as it might be dropped by fq_codel_drop() */
224	pkt_len = qdisc_pkt_len(skb);
225	/* fq_codel_drop() is quite expensive, as it performs a linear search
226	 * in q->backlogs[] to find a fat flow.
227	 * So instead of dropping a single packet, drop half of its backlog
228	 * with a 64 packets limit to not add a too big cpu spike here.
229	 */
230	ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
231
232	prev_qlen -= sch->q.qlen;
233	prev_backlog -= sch->qstats.backlog;
234	q->drop_overlimit += prev_qlen;
235	if (memory_limited)
236		q->drop_overmemory += prev_qlen;
237
238	/* As we dropped packet(s), better let upper stack know this.
239	 * If we dropped a packet for this flow, return NET_XMIT_CN,
240	 * but in this case, our parents wont increase their backlogs.
241	 */
242	if (ret == idx) {
243		qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
244					  prev_backlog - pkt_len);
245		return NET_XMIT_CN;
246	}
247	qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
248	return NET_XMIT_SUCCESS;
249}
250
251/* This is the specific function called from codel_dequeue()
252 * to dequeue a packet from queue. Note: backlog is handled in
253 * codel, we dont need to reduce it here.
254 */
255static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
256{
257	struct Qdisc *sch = ctx;
258	struct fq_codel_sched_data *q = qdisc_priv(sch);
259	struct fq_codel_flow *flow;
260	struct sk_buff *skb = NULL;
261
262	flow = container_of(vars, struct fq_codel_flow, cvars);
263	if (flow->head) {
264		skb = dequeue_head(flow);
265		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
266		q->memory_usage -= get_codel_cb(skb)->mem_usage;
267		sch->q.qlen--;
268		sch->qstats.backlog -= qdisc_pkt_len(skb);
269	}
270	return skb;
271}
272
273static void drop_func(struct sk_buff *skb, void *ctx)
274{
275	struct Qdisc *sch = ctx;
276
277	kfree_skb(skb);
278	qdisc_qstats_drop(sch);
279}
280
281static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
282{
283	struct fq_codel_sched_data *q = qdisc_priv(sch);
284	struct sk_buff *skb;
285	struct fq_codel_flow *flow;
286	struct list_head *head;
287
288begin:
289	head = &q->new_flows;
290	if (list_empty(head)) {
291		head = &q->old_flows;
292		if (list_empty(head))
293			return NULL;
294	}
295	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
296
297	if (flow->deficit <= 0) {
298		flow->deficit += q->quantum;
299		list_move_tail(&flow->flowchain, &q->old_flows);
300		goto begin;
301	}
302
303	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
304			    &flow->cvars, &q->cstats, qdisc_pkt_len,
305			    codel_get_enqueue_time, drop_func, dequeue_func);
306
307	if (!skb) {
308		/* force a pass through old_flows to prevent starvation */
309		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
310			list_move_tail(&flow->flowchain, &q->old_flows);
311		else
312			list_del_init(&flow->flowchain);
313		goto begin;
314	}
315	qdisc_bstats_update(sch, skb);
316	flow->deficit -= qdisc_pkt_len(skb);
317	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
318	 * or HTB crashes. Defer it for next round.
319	 */
320	if (q->cstats.drop_count && sch->q.qlen) {
321		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
322					  q->cstats.drop_len);
323		q->cstats.drop_count = 0;
324		q->cstats.drop_len = 0;
325	}
326	return skb;
327}
328
329static void fq_codel_flow_purge(struct fq_codel_flow *flow)
330{
331	rtnl_kfree_skbs(flow->head, flow->tail);
332	flow->head = NULL;
333}
334
335static void fq_codel_reset(struct Qdisc *sch)
336{
337	struct fq_codel_sched_data *q = qdisc_priv(sch);
338	int i;
339
340	INIT_LIST_HEAD(&q->new_flows);
341	INIT_LIST_HEAD(&q->old_flows);
342	for (i = 0; i < q->flows_cnt; i++) {
343		struct fq_codel_flow *flow = q->flows + i;
344
345		fq_codel_flow_purge(flow);
346		INIT_LIST_HEAD(&flow->flowchain);
347		codel_vars_init(&flow->cvars);
348	}
349	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
350	sch->q.qlen = 0;
351	sch->qstats.backlog = 0;
352	q->memory_usage = 0;
353}
354
355static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
356	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
357	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
358	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
359	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
360	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
361	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
362	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
363	[TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
364	[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
365};
366
367static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
368			   struct netlink_ext_ack *extack)
369{
370	struct fq_codel_sched_data *q = qdisc_priv(sch);
371	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
372	int err;
373
374	if (!opt)
375		return -EINVAL;
376
377	err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
378					  fq_codel_policy, NULL);
379	if (err < 0)
380		return err;
381	if (tb[TCA_FQ_CODEL_FLOWS]) {
382		if (q->flows)
383			return -EINVAL;
384		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
385		if (!q->flows_cnt ||
386		    q->flows_cnt > 65536)
387			return -EINVAL;
388	}
389	sch_tree_lock(sch);
390
391	if (tb[TCA_FQ_CODEL_TARGET]) {
392		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
393
394		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
395	}
396
397	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
398		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
399
400		q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
401	}
402
403	if (tb[TCA_FQ_CODEL_INTERVAL]) {
404		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
405
406		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
407	}
408
409	if (tb[TCA_FQ_CODEL_LIMIT])
410		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
411
412	if (tb[TCA_FQ_CODEL_ECN])
413		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
414
415	if (tb[TCA_FQ_CODEL_QUANTUM])
416		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
417
418	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
419		q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
420
421	if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
422		q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
423
424	while (sch->q.qlen > sch->limit ||
425	       q->memory_usage > q->memory_limit) {
426		struct sk_buff *skb = fq_codel_dequeue(sch);
427
428		q->cstats.drop_len += qdisc_pkt_len(skb);
429		rtnl_kfree_skbs(skb, skb);
430		q->cstats.drop_count++;
431	}
432	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
433	q->cstats.drop_count = 0;
434	q->cstats.drop_len = 0;
435
436	sch_tree_unlock(sch);
437	return 0;
438}
439
440static void fq_codel_destroy(struct Qdisc *sch)
441{
442	struct fq_codel_sched_data *q = qdisc_priv(sch);
443
444	tcf_block_put(q->block);
445	kvfree(q->backlogs);
446	kvfree(q->flows);
447}
448
449static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
450			 struct netlink_ext_ack *extack)
451{
452	struct fq_codel_sched_data *q = qdisc_priv(sch);
453	int i;
454	int err;
455
456	sch->limit = 10*1024;
457	q->flows_cnt = 1024;
458	q->memory_limit = 32 << 20; /* 32 MBytes */
459	q->drop_batch_size = 64;
460	q->quantum = psched_mtu(qdisc_dev(sch));
461	INIT_LIST_HEAD(&q->new_flows);
462	INIT_LIST_HEAD(&q->old_flows);
463	codel_params_init(&q->cparams);
464	codel_stats_init(&q->cstats);
465	q->cparams.ecn = true;
466	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
467
468	if (opt) {
469		err = fq_codel_change(sch, opt, extack);
470		if (err)
471			goto init_failure;
472	}
473
474	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
475	if (err)
476		goto init_failure;
477
478	if (!q->flows) {
479		q->flows = kvcalloc(q->flows_cnt,
480				    sizeof(struct fq_codel_flow),
481				    GFP_KERNEL);
482		if (!q->flows) {
483			err = -ENOMEM;
484			goto init_failure;
485		}
486		q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
487		if (!q->backlogs) {
488			err = -ENOMEM;
489			goto alloc_failure;
490		}
491		for (i = 0; i < q->flows_cnt; i++) {
492			struct fq_codel_flow *flow = q->flows + i;
493
494			INIT_LIST_HEAD(&flow->flowchain);
495			codel_vars_init(&flow->cvars);
496		}
497	}
498	if (sch->limit >= 1)
499		sch->flags |= TCQ_F_CAN_BYPASS;
500	else
501		sch->flags &= ~TCQ_F_CAN_BYPASS;
502	return 0;
503
504alloc_failure:
505	kvfree(q->flows);
506	q->flows = NULL;
507init_failure:
508	q->flows_cnt = 0;
509	return err;
510}
511
512static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
513{
514	struct fq_codel_sched_data *q = qdisc_priv(sch);
515	struct nlattr *opts;
516
517	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
518	if (opts == NULL)
519		goto nla_put_failure;
520
521	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
522			codel_time_to_us(q->cparams.target)) ||
523	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
524			sch->limit) ||
525	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
526			codel_time_to_us(q->cparams.interval)) ||
527	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
528			q->cparams.ecn) ||
529	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
530			q->quantum) ||
531	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
532			q->drop_batch_size) ||
533	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
534			q->memory_limit) ||
535	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
536			q->flows_cnt))
537		goto nla_put_failure;
538
539	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
540	    nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
541			codel_time_to_us(q->cparams.ce_threshold)))
542		goto nla_put_failure;
543
544	return nla_nest_end(skb, opts);
545
546nla_put_failure:
547	return -1;
548}
549
550static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
551{
552	struct fq_codel_sched_data *q = qdisc_priv(sch);
553	struct tc_fq_codel_xstats st = {
554		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
555	};
556	struct list_head *pos;
557
558	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
559	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
560	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
561	st.qdisc_stats.new_flow_count = q->new_flow_count;
562	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
563	st.qdisc_stats.memory_usage  = q->memory_usage;
564	st.qdisc_stats.drop_overmemory = q->drop_overmemory;
565
566	sch_tree_lock(sch);
567	list_for_each(pos, &q->new_flows)
568		st.qdisc_stats.new_flows_len++;
569
570	list_for_each(pos, &q->old_flows)
571		st.qdisc_stats.old_flows_len++;
572	sch_tree_unlock(sch);
573
574	return gnet_stats_copy_app(d, &st, sizeof(st));
575}
576
577static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
578{
579	return NULL;
580}
581
582static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
583{
584	return 0;
585}
586
587static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
588			      u32 classid)
589{
590	return 0;
591}
592
593static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
594{
595}
596
597static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
598					    struct netlink_ext_ack *extack)
599{
600	struct fq_codel_sched_data *q = qdisc_priv(sch);
601
602	if (cl)
603		return NULL;
604	return q->block;
605}
606
607static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
608			  struct sk_buff *skb, struct tcmsg *tcm)
609{
610	tcm->tcm_handle |= TC_H_MIN(cl);
611	return 0;
612}
613
614static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
615				     struct gnet_dump *d)
616{
617	struct fq_codel_sched_data *q = qdisc_priv(sch);
618	u32 idx = cl - 1;
619	struct gnet_stats_queue qs = { 0 };
620	struct tc_fq_codel_xstats xstats;
621
622	if (idx < q->flows_cnt) {
623		const struct fq_codel_flow *flow = &q->flows[idx];
624		const struct sk_buff *skb;
625
626		memset(&xstats, 0, sizeof(xstats));
627		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
628		xstats.class_stats.deficit = flow->deficit;
629		xstats.class_stats.ldelay =
630			codel_time_to_us(flow->cvars.ldelay);
631		xstats.class_stats.count = flow->cvars.count;
632		xstats.class_stats.lastcount = flow->cvars.lastcount;
633		xstats.class_stats.dropping = flow->cvars.dropping;
634		if (flow->cvars.dropping) {
635			codel_tdiff_t delta = flow->cvars.drop_next -
636					      codel_get_time();
637
638			xstats.class_stats.drop_next = (delta >= 0) ?
639				codel_time_to_us(delta) :
640				-codel_time_to_us(-delta);
641		}
642		if (flow->head) {
643			sch_tree_lock(sch);
644			skb = flow->head;
645			while (skb) {
646				qs.qlen++;
647				skb = skb->next;
648			}
649			sch_tree_unlock(sch);
650		}
651		qs.backlog = q->backlogs[idx];
652		qs.drops = 0;
653	}
654	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
655		return -1;
656	if (idx < q->flows_cnt)
657		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
658	return 0;
659}
660
661static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
662{
663	struct fq_codel_sched_data *q = qdisc_priv(sch);
664	unsigned int i;
665
666	if (arg->stop)
667		return;
668
669	for (i = 0; i < q->flows_cnt; i++) {
670		if (list_empty(&q->flows[i].flowchain) ||
671		    arg->count < arg->skip) {
672			arg->count++;
673			continue;
674		}
675		if (arg->fn(sch, i + 1, arg) < 0) {
676			arg->stop = 1;
677			break;
678		}
679		arg->count++;
680	}
681}
682
683static const struct Qdisc_class_ops fq_codel_class_ops = {
684	.leaf		=	fq_codel_leaf,
685	.find		=	fq_codel_find,
686	.tcf_block	=	fq_codel_tcf_block,
687	.bind_tcf	=	fq_codel_bind,
688	.unbind_tcf	=	fq_codel_unbind,
689	.dump		=	fq_codel_dump_class,
690	.dump_stats	=	fq_codel_dump_class_stats,
691	.walk		=	fq_codel_walk,
692};
693
694static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
695	.cl_ops		=	&fq_codel_class_ops,
696	.id		=	"fq_codel",
697	.priv_size	=	sizeof(struct fq_codel_sched_data),
698	.enqueue	=	fq_codel_enqueue,
699	.dequeue	=	fq_codel_dequeue,
700	.peek		=	qdisc_peek_dequeued,
701	.init		=	fq_codel_init,
702	.reset		=	fq_codel_reset,
703	.destroy	=	fq_codel_destroy,
704	.change		=	fq_codel_change,
705	.dump		=	fq_codel_dump,
706	.dump_stats =	fq_codel_dump_stats,
707	.owner		=	THIS_MODULE,
708};
709
710static int __init fq_codel_module_init(void)
711{
712	return register_qdisc(&fq_codel_qdisc_ops);
713}
714
715static void __exit fq_codel_module_exit(void)
716{
717	unregister_qdisc(&fq_codel_qdisc_ops);
718}
719
720module_init(fq_codel_module_init)
721module_exit(fq_codel_module_exit)
722MODULE_AUTHOR("Eric Dumazet");
723MODULE_LICENSE("GPL");
724MODULE_DESCRIPTION("Fair Queue CoDel discipline");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Fair Queue CoDel discipline
  4 *
  5 *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/types.h>
 10#include <linux/kernel.h>
 11#include <linux/jiffies.h>
 12#include <linux/string.h>
 13#include <linux/in.h>
 14#include <linux/errno.h>
 15#include <linux/init.h>
 16#include <linux/skbuff.h>
 17#include <linux/jhash.h>
 18#include <linux/slab.h>
 19#include <linux/vmalloc.h>
 20#include <net/netlink.h>
 21#include <net/pkt_sched.h>
 22#include <net/pkt_cls.h>
 23#include <net/codel.h>
 24#include <net/codel_impl.h>
 25#include <net/codel_qdisc.h>
 26
 27/*	Fair Queue CoDel.
 28 *
 29 * Principles :
 30 * Packets are classified (internal classifier or external) on flows.
 31 * This is a Stochastic model (as we use a hash, several flows
 32 *			       might be hashed on same slot)
 33 * Each flow has a CoDel managed queue.
 34 * Flows are linked onto two (Round Robin) lists,
 35 * so that new flows have priority on old ones.
 36 *
 37 * For a given flow, packets are not reordered (CoDel uses a FIFO)
 38 * head drops only.
 39 * ECN capability is on by default.
 40 * Low memory footprint (64 bytes per flow)
 41 */
 42
 43struct fq_codel_flow {
 44	struct sk_buff	  *head;
 45	struct sk_buff	  *tail;
 46	struct list_head  flowchain;
 47	int		  deficit;
 48	struct codel_vars cvars;
 49}; /* please try to keep this structure <= 64 bytes */
 50
 51struct fq_codel_sched_data {
 52	struct tcf_proto __rcu *filter_list; /* optional external classifier */
 53	struct tcf_block *block;
 54	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
 55	u32		*backlogs;	/* backlog table [flows_cnt] */
 56	u32		flows_cnt;	/* number of flows */
 57	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
 58	u32		drop_batch_size;
 59	u32		memory_limit;
 60	struct codel_params cparams;
 61	struct codel_stats cstats;
 62	u32		memory_usage;
 63	u32		drop_overmemory;
 64	u32		drop_overlimit;
 65	u32		new_flow_count;
 66
 67	struct list_head new_flows;	/* list of new flows */
 68	struct list_head old_flows;	/* list of old flows */
 69};
 70
 71static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
 72				  struct sk_buff *skb)
 73{
 74	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
 75}
 76
 77static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
 78				      int *qerr)
 79{
 80	struct fq_codel_sched_data *q = qdisc_priv(sch);
 81	struct tcf_proto *filter;
 82	struct tcf_result res;
 83	int result;
 84
 85	if (TC_H_MAJ(skb->priority) == sch->handle &&
 86	    TC_H_MIN(skb->priority) > 0 &&
 87	    TC_H_MIN(skb->priority) <= q->flows_cnt)
 88		return TC_H_MIN(skb->priority);
 89
 90	filter = rcu_dereference_bh(q->filter_list);
 91	if (!filter)
 92		return fq_codel_hash(q, skb) + 1;
 93
 94	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 95	result = tcf_classify(skb, filter, &res, false);
 96	if (result >= 0) {
 97#ifdef CONFIG_NET_CLS_ACT
 98		switch (result) {
 99		case TC_ACT_STOLEN:
100		case TC_ACT_QUEUED:
101		case TC_ACT_TRAP:
102			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
103			/* fall through */
104		case TC_ACT_SHOT:
105			return 0;
106		}
107#endif
108		if (TC_H_MIN(res.classid) <= q->flows_cnt)
109			return TC_H_MIN(res.classid);
110	}
111	return 0;
112}
113
114/* helper functions : might be changed when/if skb use a standard list_head */
115
116/* remove one skb from head of slot queue */
117static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
118{
119	struct sk_buff *skb = flow->head;
120
121	flow->head = skb->next;
122	skb_mark_not_on_list(skb);
123	return skb;
124}
125
126/* add skb to flow queue (tail add) */
127static inline void flow_queue_add(struct fq_codel_flow *flow,
128				  struct sk_buff *skb)
129{
130	if (flow->head == NULL)
131		flow->head = skb;
132	else
133		flow->tail->next = skb;
134	flow->tail = skb;
135	skb->next = NULL;
136}
137
138static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
139				  struct sk_buff **to_free)
140{
141	struct fq_codel_sched_data *q = qdisc_priv(sch);
142	struct sk_buff *skb;
143	unsigned int maxbacklog = 0, idx = 0, i, len;
144	struct fq_codel_flow *flow;
145	unsigned int threshold;
146	unsigned int mem = 0;
147
148	/* Queue is full! Find the fat flow and drop packet(s) from it.
149	 * This might sound expensive, but with 1024 flows, we scan
150	 * 4KB of memory, and we dont need to handle a complex tree
151	 * in fast path (packet queue/enqueue) with many cache misses.
152	 * In stress mode, we'll try to drop 64 packets from the flow,
153	 * amortizing this linear lookup to one cache line per drop.
154	 */
155	for (i = 0; i < q->flows_cnt; i++) {
156		if (q->backlogs[i] > maxbacklog) {
157			maxbacklog = q->backlogs[i];
158			idx = i;
159		}
160	}
161
162	/* Our goal is to drop half of this fat flow backlog */
163	threshold = maxbacklog >> 1;
164
165	flow = &q->flows[idx];
166	len = 0;
167	i = 0;
168	do {
169		skb = dequeue_head(flow);
170		len += qdisc_pkt_len(skb);
171		mem += get_codel_cb(skb)->mem_usage;
172		__qdisc_drop(skb, to_free);
173	} while (++i < max_packets && len < threshold);
174
175	/* Tell codel to increase its signal strength also */
176	flow->cvars.count += i;
177	q->backlogs[idx] -= len;
178	q->memory_usage -= mem;
179	sch->qstats.drops += i;
180	sch->qstats.backlog -= len;
181	sch->q.qlen -= i;
182	return idx;
183}
184
185static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
186			    struct sk_buff **to_free)
187{
188	struct fq_codel_sched_data *q = qdisc_priv(sch);
189	unsigned int idx, prev_backlog, prev_qlen;
190	struct fq_codel_flow *flow;
191	int uninitialized_var(ret);
192	unsigned int pkt_len;
193	bool memory_limited;
194
195	idx = fq_codel_classify(skb, sch, &ret);
196	if (idx == 0) {
197		if (ret & __NET_XMIT_BYPASS)
198			qdisc_qstats_drop(sch);
199		__qdisc_drop(skb, to_free);
200		return ret;
201	}
202	idx--;
203
204	codel_set_enqueue_time(skb);
205	flow = &q->flows[idx];
206	flow_queue_add(flow, skb);
207	q->backlogs[idx] += qdisc_pkt_len(skb);
208	qdisc_qstats_backlog_inc(sch, skb);
209
210	if (list_empty(&flow->flowchain)) {
211		list_add_tail(&flow->flowchain, &q->new_flows);
212		q->new_flow_count++;
213		flow->deficit = q->quantum;
214	}
215	get_codel_cb(skb)->mem_usage = skb->truesize;
216	q->memory_usage += get_codel_cb(skb)->mem_usage;
217	memory_limited = q->memory_usage > q->memory_limit;
218	if (++sch->q.qlen <= sch->limit && !memory_limited)
219		return NET_XMIT_SUCCESS;
220
221	prev_backlog = sch->qstats.backlog;
222	prev_qlen = sch->q.qlen;
223
224	/* save this packet length as it might be dropped by fq_codel_drop() */
225	pkt_len = qdisc_pkt_len(skb);
226	/* fq_codel_drop() is quite expensive, as it performs a linear search
227	 * in q->backlogs[] to find a fat flow.
228	 * So instead of dropping a single packet, drop half of its backlog
229	 * with a 64 packets limit to not add a too big cpu spike here.
230	 */
231	ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
232
233	prev_qlen -= sch->q.qlen;
234	prev_backlog -= sch->qstats.backlog;
235	q->drop_overlimit += prev_qlen;
236	if (memory_limited)
237		q->drop_overmemory += prev_qlen;
238
239	/* As we dropped packet(s), better let upper stack know this.
240	 * If we dropped a packet for this flow, return NET_XMIT_CN,
241	 * but in this case, our parents wont increase their backlogs.
242	 */
243	if (ret == idx) {
244		qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
245					  prev_backlog - pkt_len);
246		return NET_XMIT_CN;
247	}
248	qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
249	return NET_XMIT_SUCCESS;
250}
251
252/* This is the specific function called from codel_dequeue()
253 * to dequeue a packet from queue. Note: backlog is handled in
254 * codel, we dont need to reduce it here.
255 */
256static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
257{
258	struct Qdisc *sch = ctx;
259	struct fq_codel_sched_data *q = qdisc_priv(sch);
260	struct fq_codel_flow *flow;
261	struct sk_buff *skb = NULL;
262
263	flow = container_of(vars, struct fq_codel_flow, cvars);
264	if (flow->head) {
265		skb = dequeue_head(flow);
266		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
267		q->memory_usage -= get_codel_cb(skb)->mem_usage;
268		sch->q.qlen--;
269		sch->qstats.backlog -= qdisc_pkt_len(skb);
270	}
271	return skb;
272}
273
274static void drop_func(struct sk_buff *skb, void *ctx)
275{
276	struct Qdisc *sch = ctx;
277
278	kfree_skb(skb);
279	qdisc_qstats_drop(sch);
280}
281
282static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
283{
284	struct fq_codel_sched_data *q = qdisc_priv(sch);
285	struct sk_buff *skb;
286	struct fq_codel_flow *flow;
287	struct list_head *head;
288
289begin:
290	head = &q->new_flows;
291	if (list_empty(head)) {
292		head = &q->old_flows;
293		if (list_empty(head))
294			return NULL;
295	}
296	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
297
298	if (flow->deficit <= 0) {
299		flow->deficit += q->quantum;
300		list_move_tail(&flow->flowchain, &q->old_flows);
301		goto begin;
302	}
303
304	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
305			    &flow->cvars, &q->cstats, qdisc_pkt_len,
306			    codel_get_enqueue_time, drop_func, dequeue_func);
307
308	if (!skb) {
309		/* force a pass through old_flows to prevent starvation */
310		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
311			list_move_tail(&flow->flowchain, &q->old_flows);
312		else
313			list_del_init(&flow->flowchain);
314		goto begin;
315	}
316	qdisc_bstats_update(sch, skb);
317	flow->deficit -= qdisc_pkt_len(skb);
318	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
319	 * or HTB crashes. Defer it for next round.
320	 */
321	if (q->cstats.drop_count && sch->q.qlen) {
322		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
323					  q->cstats.drop_len);
324		q->cstats.drop_count = 0;
325		q->cstats.drop_len = 0;
326	}
327	return skb;
328}
329
330static void fq_codel_flow_purge(struct fq_codel_flow *flow)
331{
332	rtnl_kfree_skbs(flow->head, flow->tail);
333	flow->head = NULL;
334}
335
336static void fq_codel_reset(struct Qdisc *sch)
337{
338	struct fq_codel_sched_data *q = qdisc_priv(sch);
339	int i;
340
341	INIT_LIST_HEAD(&q->new_flows);
342	INIT_LIST_HEAD(&q->old_flows);
343	for (i = 0; i < q->flows_cnt; i++) {
344		struct fq_codel_flow *flow = q->flows + i;
345
346		fq_codel_flow_purge(flow);
347		INIT_LIST_HEAD(&flow->flowchain);
348		codel_vars_init(&flow->cvars);
349	}
350	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
351	sch->q.qlen = 0;
352	sch->qstats.backlog = 0;
353	q->memory_usage = 0;
354}
355
356static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
357	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
358	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
359	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
360	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
361	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
362	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
363	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
364	[TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
365	[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
366};
367
368static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
369			   struct netlink_ext_ack *extack)
370{
371	struct fq_codel_sched_data *q = qdisc_priv(sch);
372	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
373	int err;
374
375	if (!opt)
376		return -EINVAL;
377
378	err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
379					  fq_codel_policy, NULL);
380	if (err < 0)
381		return err;
382	if (tb[TCA_FQ_CODEL_FLOWS]) {
383		if (q->flows)
384			return -EINVAL;
385		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
386		if (!q->flows_cnt ||
387		    q->flows_cnt > 65536)
388			return -EINVAL;
389	}
390	sch_tree_lock(sch);
391
392	if (tb[TCA_FQ_CODEL_TARGET]) {
393		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
394
395		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
396	}
397
398	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
399		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
400
401		q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
402	}
403
404	if (tb[TCA_FQ_CODEL_INTERVAL]) {
405		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
406
407		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
408	}
409
410	if (tb[TCA_FQ_CODEL_LIMIT])
411		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
412
413	if (tb[TCA_FQ_CODEL_ECN])
414		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
415
416	if (tb[TCA_FQ_CODEL_QUANTUM])
417		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
418
419	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
420		q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
421
422	if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
423		q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
424
425	while (sch->q.qlen > sch->limit ||
426	       q->memory_usage > q->memory_limit) {
427		struct sk_buff *skb = fq_codel_dequeue(sch);
428
429		q->cstats.drop_len += qdisc_pkt_len(skb);
430		rtnl_kfree_skbs(skb, skb);
431		q->cstats.drop_count++;
432	}
433	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
434	q->cstats.drop_count = 0;
435	q->cstats.drop_len = 0;
436
437	sch_tree_unlock(sch);
438	return 0;
439}
440
441static void fq_codel_destroy(struct Qdisc *sch)
442{
443	struct fq_codel_sched_data *q = qdisc_priv(sch);
444
445	tcf_block_put(q->block);
446	kvfree(q->backlogs);
447	kvfree(q->flows);
448}
449
450static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
451			 struct netlink_ext_ack *extack)
452{
453	struct fq_codel_sched_data *q = qdisc_priv(sch);
454	int i;
455	int err;
456
457	sch->limit = 10*1024;
458	q->flows_cnt = 1024;
459	q->memory_limit = 32 << 20; /* 32 MBytes */
460	q->drop_batch_size = 64;
461	q->quantum = psched_mtu(qdisc_dev(sch));
462	INIT_LIST_HEAD(&q->new_flows);
463	INIT_LIST_HEAD(&q->old_flows);
464	codel_params_init(&q->cparams);
465	codel_stats_init(&q->cstats);
466	q->cparams.ecn = true;
467	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
468
469	if (opt) {
470		err = fq_codel_change(sch, opt, extack);
471		if (err)
472			goto init_failure;
473	}
474
475	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
476	if (err)
477		goto init_failure;
478
479	if (!q->flows) {
480		q->flows = kvcalloc(q->flows_cnt,
481				    sizeof(struct fq_codel_flow),
482				    GFP_KERNEL);
483		if (!q->flows) {
484			err = -ENOMEM;
485			goto init_failure;
486		}
487		q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
488		if (!q->backlogs) {
489			err = -ENOMEM;
490			goto alloc_failure;
491		}
492		for (i = 0; i < q->flows_cnt; i++) {
493			struct fq_codel_flow *flow = q->flows + i;
494
495			INIT_LIST_HEAD(&flow->flowchain);
496			codel_vars_init(&flow->cvars);
497		}
498	}
499	if (sch->limit >= 1)
500		sch->flags |= TCQ_F_CAN_BYPASS;
501	else
502		sch->flags &= ~TCQ_F_CAN_BYPASS;
503	return 0;
504
505alloc_failure:
506	kvfree(q->flows);
507	q->flows = NULL;
508init_failure:
509	q->flows_cnt = 0;
510	return err;
511}
512
513static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
514{
515	struct fq_codel_sched_data *q = qdisc_priv(sch);
516	struct nlattr *opts;
517
518	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
519	if (opts == NULL)
520		goto nla_put_failure;
521
522	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
523			codel_time_to_us(q->cparams.target)) ||
524	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
525			sch->limit) ||
526	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
527			codel_time_to_us(q->cparams.interval)) ||
528	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
529			q->cparams.ecn) ||
530	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
531			q->quantum) ||
532	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
533			q->drop_batch_size) ||
534	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
535			q->memory_limit) ||
536	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
537			q->flows_cnt))
538		goto nla_put_failure;
539
540	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
541	    nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
542			codel_time_to_us(q->cparams.ce_threshold)))
543		goto nla_put_failure;
544
545	return nla_nest_end(skb, opts);
546
547nla_put_failure:
548	return -1;
549}
550
551static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
552{
553	struct fq_codel_sched_data *q = qdisc_priv(sch);
554	struct tc_fq_codel_xstats st = {
555		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
556	};
557	struct list_head *pos;
558
559	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
560	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
561	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
562	st.qdisc_stats.new_flow_count = q->new_flow_count;
563	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
564	st.qdisc_stats.memory_usage  = q->memory_usage;
565	st.qdisc_stats.drop_overmemory = q->drop_overmemory;
566
567	sch_tree_lock(sch);
568	list_for_each(pos, &q->new_flows)
569		st.qdisc_stats.new_flows_len++;
570
571	list_for_each(pos, &q->old_flows)
572		st.qdisc_stats.old_flows_len++;
573	sch_tree_unlock(sch);
574
575	return gnet_stats_copy_app(d, &st, sizeof(st));
576}
577
578static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
579{
580	return NULL;
581}
582
583static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
584{
585	return 0;
586}
587
588static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
589			      u32 classid)
590{
591	return 0;
592}
593
594static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
595{
596}
597
598static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
599					    struct netlink_ext_ack *extack)
600{
601	struct fq_codel_sched_data *q = qdisc_priv(sch);
602
603	if (cl)
604		return NULL;
605	return q->block;
606}
607
608static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
609			  struct sk_buff *skb, struct tcmsg *tcm)
610{
611	tcm->tcm_handle |= TC_H_MIN(cl);
612	return 0;
613}
614
615static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
616				     struct gnet_dump *d)
617{
618	struct fq_codel_sched_data *q = qdisc_priv(sch);
619	u32 idx = cl - 1;
620	struct gnet_stats_queue qs = { 0 };
621	struct tc_fq_codel_xstats xstats;
622
623	if (idx < q->flows_cnt) {
624		const struct fq_codel_flow *flow = &q->flows[idx];
625		const struct sk_buff *skb;
626
627		memset(&xstats, 0, sizeof(xstats));
628		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
629		xstats.class_stats.deficit = flow->deficit;
630		xstats.class_stats.ldelay =
631			codel_time_to_us(flow->cvars.ldelay);
632		xstats.class_stats.count = flow->cvars.count;
633		xstats.class_stats.lastcount = flow->cvars.lastcount;
634		xstats.class_stats.dropping = flow->cvars.dropping;
635		if (flow->cvars.dropping) {
636			codel_tdiff_t delta = flow->cvars.drop_next -
637					      codel_get_time();
638
639			xstats.class_stats.drop_next = (delta >= 0) ?
640				codel_time_to_us(delta) :
641				-codel_time_to_us(-delta);
642		}
643		if (flow->head) {
644			sch_tree_lock(sch);
645			skb = flow->head;
646			while (skb) {
647				qs.qlen++;
648				skb = skb->next;
649			}
650			sch_tree_unlock(sch);
651		}
652		qs.backlog = q->backlogs[idx];
653		qs.drops = 0;
654	}
655	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
656		return -1;
657	if (idx < q->flows_cnt)
658		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
659	return 0;
660}
661
662static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
663{
664	struct fq_codel_sched_data *q = qdisc_priv(sch);
665	unsigned int i;
666
667	if (arg->stop)
668		return;
669
670	for (i = 0; i < q->flows_cnt; i++) {
671		if (list_empty(&q->flows[i].flowchain) ||
672		    arg->count < arg->skip) {
673			arg->count++;
674			continue;
675		}
676		if (arg->fn(sch, i + 1, arg) < 0) {
677			arg->stop = 1;
678			break;
679		}
680		arg->count++;
681	}
682}
683
684static const struct Qdisc_class_ops fq_codel_class_ops = {
685	.leaf		=	fq_codel_leaf,
686	.find		=	fq_codel_find,
687	.tcf_block	=	fq_codel_tcf_block,
688	.bind_tcf	=	fq_codel_bind,
689	.unbind_tcf	=	fq_codel_unbind,
690	.dump		=	fq_codel_dump_class,
691	.dump_stats	=	fq_codel_dump_class_stats,
692	.walk		=	fq_codel_walk,
693};
694
695static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
696	.cl_ops		=	&fq_codel_class_ops,
697	.id		=	"fq_codel",
698	.priv_size	=	sizeof(struct fq_codel_sched_data),
699	.enqueue	=	fq_codel_enqueue,
700	.dequeue	=	fq_codel_dequeue,
701	.peek		=	qdisc_peek_dequeued,
702	.init		=	fq_codel_init,
703	.reset		=	fq_codel_reset,
704	.destroy	=	fq_codel_destroy,
705	.change		=	fq_codel_change,
706	.dump		=	fq_codel_dump,
707	.dump_stats =	fq_codel_dump_stats,
708	.owner		=	THIS_MODULE,
709};
710
711static int __init fq_codel_module_init(void)
712{
713	return register_qdisc(&fq_codel_qdisc_ops);
714}
715
716static void __exit fq_codel_module_exit(void)
717{
718	unregister_qdisc(&fq_codel_qdisc_ops);
719}
720
721module_init(fq_codel_module_init)
722module_exit(fq_codel_module_exit)
723MODULE_AUTHOR("Eric Dumazet");
724MODULE_LICENSE("GPL");