Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Fair Queue CoDel discipline
4 *
5 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
6 */
7
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/jiffies.h>
12#include <linux/string.h>
13#include <linux/in.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/skbuff.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/codel.h>
23#include <net/codel_impl.h>
24#include <net/codel_qdisc.h>
25
26/* Fair Queue CoDel.
27 *
28 * Principles :
29 * Packets are classified (internal classifier or external) on flows.
30 * This is a Stochastic model (as we use a hash, several flows
31 * might be hashed on same slot)
32 * Each flow has a CoDel managed queue.
33 * Flows are linked onto two (Round Robin) lists,
34 * so that new flows have priority on old ones.
35 *
36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
37 * head drops only.
38 * ECN capability is on by default.
39 * Low memory footprint (64 bytes per flow)
40 */
41
42struct fq_codel_flow {
43 struct sk_buff *head;
44 struct sk_buff *tail;
45 struct list_head flowchain;
46 int deficit;
47 struct codel_vars cvars;
48}; /* please try to keep this structure <= 64 bytes */
49
50struct fq_codel_sched_data {
51 struct tcf_proto __rcu *filter_list; /* optional external classifier */
52 struct tcf_block *block;
53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
54 u32 *backlogs; /* backlog table [flows_cnt] */
55 u32 flows_cnt; /* number of flows */
56 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
57 u32 drop_batch_size;
58 u32 memory_limit;
59 struct codel_params cparams;
60 struct codel_stats cstats;
61 u32 memory_usage;
62 u32 drop_overmemory;
63 u32 drop_overlimit;
64 u32 new_flow_count;
65
66 struct list_head new_flows; /* list of new flows */
67 struct list_head old_flows; /* list of old flows */
68};
69
70static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 struct sk_buff *skb)
72{
73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
74}
75
76static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
77 int *qerr)
78{
79 struct fq_codel_sched_data *q = qdisc_priv(sch);
80 struct tcf_proto *filter;
81 struct tcf_result res;
82 int result;
83
84 if (TC_H_MAJ(skb->priority) == sch->handle &&
85 TC_H_MIN(skb->priority) > 0 &&
86 TC_H_MIN(skb->priority) <= q->flows_cnt)
87 return TC_H_MIN(skb->priority);
88
89 filter = rcu_dereference_bh(q->filter_list);
90 if (!filter)
91 return fq_codel_hash(q, skb) + 1;
92
93 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
94 result = tcf_classify(skb, NULL, filter, &res, false);
95 if (result >= 0) {
96#ifdef CONFIG_NET_CLS_ACT
97 switch (result) {
98 case TC_ACT_STOLEN:
99 case TC_ACT_QUEUED:
100 case TC_ACT_TRAP:
101 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102 fallthrough;
103 case TC_ACT_SHOT:
104 return 0;
105 }
106#endif
107 if (TC_H_MIN(res.classid) <= q->flows_cnt)
108 return TC_H_MIN(res.classid);
109 }
110 return 0;
111}
112
113/* helper functions : might be changed when/if skb use a standard list_head */
114
115/* remove one skb from head of slot queue */
116static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
117{
118 struct sk_buff *skb = flow->head;
119
120 flow->head = skb->next;
121 skb_mark_not_on_list(skb);
122 return skb;
123}
124
125/* add skb to flow queue (tail add) */
126static inline void flow_queue_add(struct fq_codel_flow *flow,
127 struct sk_buff *skb)
128{
129 if (flow->head == NULL)
130 flow->head = skb;
131 else
132 flow->tail->next = skb;
133 flow->tail = skb;
134 skb->next = NULL;
135}
136
137static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
138 struct sk_buff **to_free)
139{
140 struct fq_codel_sched_data *q = qdisc_priv(sch);
141 struct sk_buff *skb;
142 unsigned int maxbacklog = 0, idx = 0, i, len;
143 struct fq_codel_flow *flow;
144 unsigned int threshold;
145 unsigned int mem = 0;
146
147 /* Queue is full! Find the fat flow and drop packet(s) from it.
148 * This might sound expensive, but with 1024 flows, we scan
149 * 4KB of memory, and we dont need to handle a complex tree
150 * in fast path (packet queue/enqueue) with many cache misses.
151 * In stress mode, we'll try to drop 64 packets from the flow,
152 * amortizing this linear lookup to one cache line per drop.
153 */
154 for (i = 0; i < q->flows_cnt; i++) {
155 if (q->backlogs[i] > maxbacklog) {
156 maxbacklog = q->backlogs[i];
157 idx = i;
158 }
159 }
160
161 /* Our goal is to drop half of this fat flow backlog */
162 threshold = maxbacklog >> 1;
163
164 flow = &q->flows[idx];
165 len = 0;
166 i = 0;
167 do {
168 skb = dequeue_head(flow);
169 len += qdisc_pkt_len(skb);
170 mem += get_codel_cb(skb)->mem_usage;
171 __qdisc_drop(skb, to_free);
172 } while (++i < max_packets && len < threshold);
173
174 /* Tell codel to increase its signal strength also */
175 flow->cvars.count += i;
176 q->backlogs[idx] -= len;
177 q->memory_usage -= mem;
178 sch->qstats.drops += i;
179 sch->qstats.backlog -= len;
180 sch->q.qlen -= i;
181 return idx;
182}
183
184static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185 struct sk_buff **to_free)
186{
187 struct fq_codel_sched_data *q = qdisc_priv(sch);
188 unsigned int idx, prev_backlog, prev_qlen;
189 struct fq_codel_flow *flow;
190 int ret;
191 unsigned int pkt_len;
192 bool memory_limited;
193
194 idx = fq_codel_classify(skb, sch, &ret);
195 if (idx == 0) {
196 if (ret & __NET_XMIT_BYPASS)
197 qdisc_qstats_drop(sch);
198 __qdisc_drop(skb, to_free);
199 return ret;
200 }
201 idx--;
202
203 codel_set_enqueue_time(skb);
204 flow = &q->flows[idx];
205 flow_queue_add(flow, skb);
206 q->backlogs[idx] += qdisc_pkt_len(skb);
207 qdisc_qstats_backlog_inc(sch, skb);
208
209 if (list_empty(&flow->flowchain)) {
210 list_add_tail(&flow->flowchain, &q->new_flows);
211 q->new_flow_count++;
212 flow->deficit = q->quantum;
213 }
214 get_codel_cb(skb)->mem_usage = skb->truesize;
215 q->memory_usage += get_codel_cb(skb)->mem_usage;
216 memory_limited = q->memory_usage > q->memory_limit;
217 if (++sch->q.qlen <= sch->limit && !memory_limited)
218 return NET_XMIT_SUCCESS;
219
220 prev_backlog = sch->qstats.backlog;
221 prev_qlen = sch->q.qlen;
222
223 /* save this packet length as it might be dropped by fq_codel_drop() */
224 pkt_len = qdisc_pkt_len(skb);
225 /* fq_codel_drop() is quite expensive, as it performs a linear search
226 * in q->backlogs[] to find a fat flow.
227 * So instead of dropping a single packet, drop half of its backlog
228 * with a 64 packets limit to not add a too big cpu spike here.
229 */
230 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
231
232 prev_qlen -= sch->q.qlen;
233 prev_backlog -= sch->qstats.backlog;
234 q->drop_overlimit += prev_qlen;
235 if (memory_limited)
236 q->drop_overmemory += prev_qlen;
237
238 /* As we dropped packet(s), better let upper stack know this.
239 * If we dropped a packet for this flow, return NET_XMIT_CN,
240 * but in this case, our parents wont increase their backlogs.
241 */
242 if (ret == idx) {
243 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
244 prev_backlog - pkt_len);
245 return NET_XMIT_CN;
246 }
247 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
248 return NET_XMIT_SUCCESS;
249}
250
251/* This is the specific function called from codel_dequeue()
252 * to dequeue a packet from queue. Note: backlog is handled in
253 * codel, we dont need to reduce it here.
254 */
255static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
256{
257 struct Qdisc *sch = ctx;
258 struct fq_codel_sched_data *q = qdisc_priv(sch);
259 struct fq_codel_flow *flow;
260 struct sk_buff *skb = NULL;
261
262 flow = container_of(vars, struct fq_codel_flow, cvars);
263 if (flow->head) {
264 skb = dequeue_head(flow);
265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
266 q->memory_usage -= get_codel_cb(skb)->mem_usage;
267 sch->q.qlen--;
268 sch->qstats.backlog -= qdisc_pkt_len(skb);
269 }
270 return skb;
271}
272
273static void drop_func(struct sk_buff *skb, void *ctx)
274{
275 struct Qdisc *sch = ctx;
276
277 kfree_skb(skb);
278 qdisc_qstats_drop(sch);
279}
280
281static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
282{
283 struct fq_codel_sched_data *q = qdisc_priv(sch);
284 struct sk_buff *skb;
285 struct fq_codel_flow *flow;
286 struct list_head *head;
287
288begin:
289 head = &q->new_flows;
290 if (list_empty(head)) {
291 head = &q->old_flows;
292 if (list_empty(head))
293 return NULL;
294 }
295 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
296
297 if (flow->deficit <= 0) {
298 flow->deficit += q->quantum;
299 list_move_tail(&flow->flowchain, &q->old_flows);
300 goto begin;
301 }
302
303 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
304 &flow->cvars, &q->cstats, qdisc_pkt_len,
305 codel_get_enqueue_time, drop_func, dequeue_func);
306
307 if (!skb) {
308 /* force a pass through old_flows to prevent starvation */
309 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
310 list_move_tail(&flow->flowchain, &q->old_flows);
311 else
312 list_del_init(&flow->flowchain);
313 goto begin;
314 }
315 qdisc_bstats_update(sch, skb);
316 flow->deficit -= qdisc_pkt_len(skb);
317 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
318 * or HTB crashes. Defer it for next round.
319 */
320 if (q->cstats.drop_count && sch->q.qlen) {
321 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
322 q->cstats.drop_len);
323 q->cstats.drop_count = 0;
324 q->cstats.drop_len = 0;
325 }
326 return skb;
327}
328
329static void fq_codel_flow_purge(struct fq_codel_flow *flow)
330{
331 rtnl_kfree_skbs(flow->head, flow->tail);
332 flow->head = NULL;
333}
334
335static void fq_codel_reset(struct Qdisc *sch)
336{
337 struct fq_codel_sched_data *q = qdisc_priv(sch);
338 int i;
339
340 INIT_LIST_HEAD(&q->new_flows);
341 INIT_LIST_HEAD(&q->old_flows);
342 for (i = 0; i < q->flows_cnt; i++) {
343 struct fq_codel_flow *flow = q->flows + i;
344
345 fq_codel_flow_purge(flow);
346 INIT_LIST_HEAD(&flow->flowchain);
347 codel_vars_init(&flow->cvars);
348 }
349 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
350 q->memory_usage = 0;
351}
352
353static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
354 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
355 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
356 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
357 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
358 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
359 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
360 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
361 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
362 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
363 [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 },
364 [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 },
365};
366
367static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
368 struct netlink_ext_ack *extack)
369{
370 struct fq_codel_sched_data *q = qdisc_priv(sch);
371 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
372 u32 quantum = 0;
373 int err;
374
375 err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
376 fq_codel_policy, NULL);
377 if (err < 0)
378 return err;
379 if (tb[TCA_FQ_CODEL_FLOWS]) {
380 if (q->flows)
381 return -EINVAL;
382 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
383 if (!q->flows_cnt ||
384 q->flows_cnt > 65536)
385 return -EINVAL;
386 }
387 if (tb[TCA_FQ_CODEL_QUANTUM]) {
388 quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
389 if (quantum > FQ_CODEL_QUANTUM_MAX) {
390 NL_SET_ERR_MSG(extack, "Invalid quantum");
391 return -EINVAL;
392 }
393 }
394 sch_tree_lock(sch);
395
396 if (tb[TCA_FQ_CODEL_TARGET]) {
397 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
398
399 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
400 }
401
402 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
403 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
404
405 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
406 }
407
408 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
409 q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]);
410 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
411 q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]);
412
413 if (tb[TCA_FQ_CODEL_INTERVAL]) {
414 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
415
416 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
417 }
418
419 if (tb[TCA_FQ_CODEL_LIMIT])
420 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
421
422 if (tb[TCA_FQ_CODEL_ECN])
423 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
424
425 if (quantum)
426 q->quantum = quantum;
427
428 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
429 q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
430
431 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
432 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
433
434 while (sch->q.qlen > sch->limit ||
435 q->memory_usage > q->memory_limit) {
436 struct sk_buff *skb = fq_codel_dequeue(sch);
437
438 q->cstats.drop_len += qdisc_pkt_len(skb);
439 rtnl_kfree_skbs(skb, skb);
440 q->cstats.drop_count++;
441 }
442 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
443 q->cstats.drop_count = 0;
444 q->cstats.drop_len = 0;
445
446 sch_tree_unlock(sch);
447 return 0;
448}
449
450static void fq_codel_destroy(struct Qdisc *sch)
451{
452 struct fq_codel_sched_data *q = qdisc_priv(sch);
453
454 tcf_block_put(q->block);
455 kvfree(q->backlogs);
456 kvfree(q->flows);
457}
458
459static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
460 struct netlink_ext_ack *extack)
461{
462 struct fq_codel_sched_data *q = qdisc_priv(sch);
463 int i;
464 int err;
465
466 sch->limit = 10*1024;
467 q->flows_cnt = 1024;
468 q->memory_limit = 32 << 20; /* 32 MBytes */
469 q->drop_batch_size = 64;
470 q->quantum = psched_mtu(qdisc_dev(sch));
471 INIT_LIST_HEAD(&q->new_flows);
472 INIT_LIST_HEAD(&q->old_flows);
473 codel_params_init(&q->cparams);
474 codel_stats_init(&q->cstats);
475 q->cparams.ecn = true;
476 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
477
478 if (opt) {
479 err = fq_codel_change(sch, opt, extack);
480 if (err)
481 goto init_failure;
482 }
483
484 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
485 if (err)
486 goto init_failure;
487
488 if (!q->flows) {
489 q->flows = kvcalloc(q->flows_cnt,
490 sizeof(struct fq_codel_flow),
491 GFP_KERNEL);
492 if (!q->flows) {
493 err = -ENOMEM;
494 goto init_failure;
495 }
496 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
497 if (!q->backlogs) {
498 err = -ENOMEM;
499 goto alloc_failure;
500 }
501 for (i = 0; i < q->flows_cnt; i++) {
502 struct fq_codel_flow *flow = q->flows + i;
503
504 INIT_LIST_HEAD(&flow->flowchain);
505 codel_vars_init(&flow->cvars);
506 }
507 }
508 if (sch->limit >= 1)
509 sch->flags |= TCQ_F_CAN_BYPASS;
510 else
511 sch->flags &= ~TCQ_F_CAN_BYPASS;
512 return 0;
513
514alloc_failure:
515 kvfree(q->flows);
516 q->flows = NULL;
517init_failure:
518 q->flows_cnt = 0;
519 return err;
520}
521
522static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
523{
524 struct fq_codel_sched_data *q = qdisc_priv(sch);
525 struct nlattr *opts;
526
527 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
528 if (opts == NULL)
529 goto nla_put_failure;
530
531 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
532 codel_time_to_us(q->cparams.target)) ||
533 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
534 sch->limit) ||
535 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
536 codel_time_to_us(q->cparams.interval)) ||
537 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
538 q->cparams.ecn) ||
539 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
540 q->quantum) ||
541 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
542 q->drop_batch_size) ||
543 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
544 q->memory_limit) ||
545 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
546 q->flows_cnt))
547 goto nla_put_failure;
548
549 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
550 if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
551 codel_time_to_us(q->cparams.ce_threshold)))
552 goto nla_put_failure;
553 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
554 goto nla_put_failure;
555 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
556 goto nla_put_failure;
557 }
558
559 return nla_nest_end(skb, opts);
560
561nla_put_failure:
562 return -1;
563}
564
565static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
566{
567 struct fq_codel_sched_data *q = qdisc_priv(sch);
568 struct tc_fq_codel_xstats st = {
569 .type = TCA_FQ_CODEL_XSTATS_QDISC,
570 };
571 struct list_head *pos;
572
573 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
574 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
575 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
576 st.qdisc_stats.new_flow_count = q->new_flow_count;
577 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
578 st.qdisc_stats.memory_usage = q->memory_usage;
579 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
580
581 sch_tree_lock(sch);
582 list_for_each(pos, &q->new_flows)
583 st.qdisc_stats.new_flows_len++;
584
585 list_for_each(pos, &q->old_flows)
586 st.qdisc_stats.old_flows_len++;
587 sch_tree_unlock(sch);
588
589 return gnet_stats_copy_app(d, &st, sizeof(st));
590}
591
592static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
593{
594 return NULL;
595}
596
597static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
598{
599 return 0;
600}
601
602static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
603 u32 classid)
604{
605 return 0;
606}
607
608static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
609{
610}
611
612static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
613 struct netlink_ext_ack *extack)
614{
615 struct fq_codel_sched_data *q = qdisc_priv(sch);
616
617 if (cl)
618 return NULL;
619 return q->block;
620}
621
622static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
623 struct sk_buff *skb, struct tcmsg *tcm)
624{
625 tcm->tcm_handle |= TC_H_MIN(cl);
626 return 0;
627}
628
629static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
630 struct gnet_dump *d)
631{
632 struct fq_codel_sched_data *q = qdisc_priv(sch);
633 u32 idx = cl - 1;
634 struct gnet_stats_queue qs = { 0 };
635 struct tc_fq_codel_xstats xstats;
636
637 if (idx < q->flows_cnt) {
638 const struct fq_codel_flow *flow = &q->flows[idx];
639 const struct sk_buff *skb;
640
641 memset(&xstats, 0, sizeof(xstats));
642 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
643 xstats.class_stats.deficit = flow->deficit;
644 xstats.class_stats.ldelay =
645 codel_time_to_us(flow->cvars.ldelay);
646 xstats.class_stats.count = flow->cvars.count;
647 xstats.class_stats.lastcount = flow->cvars.lastcount;
648 xstats.class_stats.dropping = flow->cvars.dropping;
649 if (flow->cvars.dropping) {
650 codel_tdiff_t delta = flow->cvars.drop_next -
651 codel_get_time();
652
653 xstats.class_stats.drop_next = (delta >= 0) ?
654 codel_time_to_us(delta) :
655 -codel_time_to_us(-delta);
656 }
657 if (flow->head) {
658 sch_tree_lock(sch);
659 skb = flow->head;
660 while (skb) {
661 qs.qlen++;
662 skb = skb->next;
663 }
664 sch_tree_unlock(sch);
665 }
666 qs.backlog = q->backlogs[idx];
667 qs.drops = 0;
668 }
669 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
670 return -1;
671 if (idx < q->flows_cnt)
672 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
673 return 0;
674}
675
676static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
677{
678 struct fq_codel_sched_data *q = qdisc_priv(sch);
679 unsigned int i;
680
681 if (arg->stop)
682 return;
683
684 for (i = 0; i < q->flows_cnt; i++) {
685 if (list_empty(&q->flows[i].flowchain)) {
686 arg->count++;
687 continue;
688 }
689 if (!tc_qdisc_stats_dump(sch, i + 1, arg))
690 break;
691 }
692}
693
694static const struct Qdisc_class_ops fq_codel_class_ops = {
695 .leaf = fq_codel_leaf,
696 .find = fq_codel_find,
697 .tcf_block = fq_codel_tcf_block,
698 .bind_tcf = fq_codel_bind,
699 .unbind_tcf = fq_codel_unbind,
700 .dump = fq_codel_dump_class,
701 .dump_stats = fq_codel_dump_class_stats,
702 .walk = fq_codel_walk,
703};
704
705static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
706 .cl_ops = &fq_codel_class_ops,
707 .id = "fq_codel",
708 .priv_size = sizeof(struct fq_codel_sched_data),
709 .enqueue = fq_codel_enqueue,
710 .dequeue = fq_codel_dequeue,
711 .peek = qdisc_peek_dequeued,
712 .init = fq_codel_init,
713 .reset = fq_codel_reset,
714 .destroy = fq_codel_destroy,
715 .change = fq_codel_change,
716 .dump = fq_codel_dump,
717 .dump_stats = fq_codel_dump_stats,
718 .owner = THIS_MODULE,
719};
720
721static int __init fq_codel_module_init(void)
722{
723 return register_qdisc(&fq_codel_qdisc_ops);
724}
725
726static void __exit fq_codel_module_exit(void)
727{
728 unregister_qdisc(&fq_codel_qdisc_ops);
729}
730
731module_init(fq_codel_module_init)
732module_exit(fq_codel_module_exit)
733MODULE_AUTHOR("Eric Dumazet");
734MODULE_LICENSE("GPL");
735MODULE_DESCRIPTION("Fair Queue CoDel discipline");
1/*
2 * Fair Queue CoDel discipline
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/jhash.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <net/netlink.h>
25#include <net/pkt_sched.h>
26#include <net/pkt_cls.h>
27#include <net/codel.h>
28#include <net/codel_impl.h>
29#include <net/codel_qdisc.h>
30
31/* Fair Queue CoDel.
32 *
33 * Principles :
34 * Packets are classified (internal classifier or external) on flows.
35 * This is a Stochastic model (as we use a hash, several flows
36 * might be hashed on same slot)
37 * Each flow has a CoDel managed queue.
38 * Flows are linked onto two (Round Robin) lists,
39 * so that new flows have priority on old ones.
40 *
41 * For a given flow, packets are not reordered (CoDel uses a FIFO)
42 * head drops only.
43 * ECN capability is on by default.
44 * Low memory footprint (64 bytes per flow)
45 */
46
47struct fq_codel_flow {
48 struct sk_buff *head;
49 struct sk_buff *tail;
50 struct list_head flowchain;
51 int deficit;
52 u32 dropped; /* number of drops (or ECN marks) on this flow */
53 struct codel_vars cvars;
54}; /* please try to keep this structure <= 64 bytes */
55
56struct fq_codel_sched_data {
57 struct tcf_proto __rcu *filter_list; /* optional external classifier */
58 struct tcf_block *block;
59 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
60 u32 *backlogs; /* backlog table [flows_cnt] */
61 u32 flows_cnt; /* number of flows */
62 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
63 u32 drop_batch_size;
64 u32 memory_limit;
65 struct codel_params cparams;
66 struct codel_stats cstats;
67 u32 memory_usage;
68 u32 drop_overmemory;
69 u32 drop_overlimit;
70 u32 new_flow_count;
71
72 struct list_head new_flows; /* list of new flows */
73 struct list_head old_flows; /* list of old flows */
74};
75
76static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
77 struct sk_buff *skb)
78{
79 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
80}
81
82static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
83 int *qerr)
84{
85 struct fq_codel_sched_data *q = qdisc_priv(sch);
86 struct tcf_proto *filter;
87 struct tcf_result res;
88 int result;
89
90 if (TC_H_MAJ(skb->priority) == sch->handle &&
91 TC_H_MIN(skb->priority) > 0 &&
92 TC_H_MIN(skb->priority) <= q->flows_cnt)
93 return TC_H_MIN(skb->priority);
94
95 filter = rcu_dereference_bh(q->filter_list);
96 if (!filter)
97 return fq_codel_hash(q, skb) + 1;
98
99 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
100 result = tcf_classify(skb, filter, &res, false);
101 if (result >= 0) {
102#ifdef CONFIG_NET_CLS_ACT
103 switch (result) {
104 case TC_ACT_STOLEN:
105 case TC_ACT_QUEUED:
106 case TC_ACT_TRAP:
107 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
108 /* fall through */
109 case TC_ACT_SHOT:
110 return 0;
111 }
112#endif
113 if (TC_H_MIN(res.classid) <= q->flows_cnt)
114 return TC_H_MIN(res.classid);
115 }
116 return 0;
117}
118
119/* helper functions : might be changed when/if skb use a standard list_head */
120
121/* remove one skb from head of slot queue */
122static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
123{
124 struct sk_buff *skb = flow->head;
125
126 flow->head = skb->next;
127 skb->next = NULL;
128 return skb;
129}
130
131/* add skb to flow queue (tail add) */
132static inline void flow_queue_add(struct fq_codel_flow *flow,
133 struct sk_buff *skb)
134{
135 if (flow->head == NULL)
136 flow->head = skb;
137 else
138 flow->tail->next = skb;
139 flow->tail = skb;
140 skb->next = NULL;
141}
142
143static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
144 struct sk_buff **to_free)
145{
146 struct fq_codel_sched_data *q = qdisc_priv(sch);
147 struct sk_buff *skb;
148 unsigned int maxbacklog = 0, idx = 0, i, len;
149 struct fq_codel_flow *flow;
150 unsigned int threshold;
151 unsigned int mem = 0;
152
153 /* Queue is full! Find the fat flow and drop packet(s) from it.
154 * This might sound expensive, but with 1024 flows, we scan
155 * 4KB of memory, and we dont need to handle a complex tree
156 * in fast path (packet queue/enqueue) with many cache misses.
157 * In stress mode, we'll try to drop 64 packets from the flow,
158 * amortizing this linear lookup to one cache line per drop.
159 */
160 for (i = 0; i < q->flows_cnt; i++) {
161 if (q->backlogs[i] > maxbacklog) {
162 maxbacklog = q->backlogs[i];
163 idx = i;
164 }
165 }
166
167 /* Our goal is to drop half of this fat flow backlog */
168 threshold = maxbacklog >> 1;
169
170 flow = &q->flows[idx];
171 len = 0;
172 i = 0;
173 do {
174 skb = dequeue_head(flow);
175 len += qdisc_pkt_len(skb);
176 mem += get_codel_cb(skb)->mem_usage;
177 __qdisc_drop(skb, to_free);
178 } while (++i < max_packets && len < threshold);
179
180 flow->dropped += i;
181 q->backlogs[idx] -= len;
182 q->memory_usage -= mem;
183 sch->qstats.drops += i;
184 sch->qstats.backlog -= len;
185 sch->q.qlen -= i;
186 return idx;
187}
188
189static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
190 struct sk_buff **to_free)
191{
192 struct fq_codel_sched_data *q = qdisc_priv(sch);
193 unsigned int idx, prev_backlog, prev_qlen;
194 struct fq_codel_flow *flow;
195 int uninitialized_var(ret);
196 unsigned int pkt_len;
197 bool memory_limited;
198
199 idx = fq_codel_classify(skb, sch, &ret);
200 if (idx == 0) {
201 if (ret & __NET_XMIT_BYPASS)
202 qdisc_qstats_drop(sch);
203 __qdisc_drop(skb, to_free);
204 return ret;
205 }
206 idx--;
207
208 codel_set_enqueue_time(skb);
209 flow = &q->flows[idx];
210 flow_queue_add(flow, skb);
211 q->backlogs[idx] += qdisc_pkt_len(skb);
212 qdisc_qstats_backlog_inc(sch, skb);
213
214 if (list_empty(&flow->flowchain)) {
215 list_add_tail(&flow->flowchain, &q->new_flows);
216 q->new_flow_count++;
217 flow->deficit = q->quantum;
218 flow->dropped = 0;
219 }
220 get_codel_cb(skb)->mem_usage = skb->truesize;
221 q->memory_usage += get_codel_cb(skb)->mem_usage;
222 memory_limited = q->memory_usage > q->memory_limit;
223 if (++sch->q.qlen <= sch->limit && !memory_limited)
224 return NET_XMIT_SUCCESS;
225
226 prev_backlog = sch->qstats.backlog;
227 prev_qlen = sch->q.qlen;
228
229 /* save this packet length as it might be dropped by fq_codel_drop() */
230 pkt_len = qdisc_pkt_len(skb);
231 /* fq_codel_drop() is quite expensive, as it performs a linear search
232 * in q->backlogs[] to find a fat flow.
233 * So instead of dropping a single packet, drop half of its backlog
234 * with a 64 packets limit to not add a too big cpu spike here.
235 */
236 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
237
238 prev_qlen -= sch->q.qlen;
239 prev_backlog -= sch->qstats.backlog;
240 q->drop_overlimit += prev_qlen;
241 if (memory_limited)
242 q->drop_overmemory += prev_qlen;
243
244 /* As we dropped packet(s), better let upper stack know this.
245 * If we dropped a packet for this flow, return NET_XMIT_CN,
246 * but in this case, our parents wont increase their backlogs.
247 */
248 if (ret == idx) {
249 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
250 prev_backlog - pkt_len);
251 return NET_XMIT_CN;
252 }
253 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
254 return NET_XMIT_SUCCESS;
255}
256
257/* This is the specific function called from codel_dequeue()
258 * to dequeue a packet from queue. Note: backlog is handled in
259 * codel, we dont need to reduce it here.
260 */
261static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
262{
263 struct Qdisc *sch = ctx;
264 struct fq_codel_sched_data *q = qdisc_priv(sch);
265 struct fq_codel_flow *flow;
266 struct sk_buff *skb = NULL;
267
268 flow = container_of(vars, struct fq_codel_flow, cvars);
269 if (flow->head) {
270 skb = dequeue_head(flow);
271 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
272 q->memory_usage -= get_codel_cb(skb)->mem_usage;
273 sch->q.qlen--;
274 sch->qstats.backlog -= qdisc_pkt_len(skb);
275 }
276 return skb;
277}
278
279static void drop_func(struct sk_buff *skb, void *ctx)
280{
281 struct Qdisc *sch = ctx;
282
283 kfree_skb(skb);
284 qdisc_qstats_drop(sch);
285}
286
287static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
288{
289 struct fq_codel_sched_data *q = qdisc_priv(sch);
290 struct sk_buff *skb;
291 struct fq_codel_flow *flow;
292 struct list_head *head;
293 u32 prev_drop_count, prev_ecn_mark;
294
295begin:
296 head = &q->new_flows;
297 if (list_empty(head)) {
298 head = &q->old_flows;
299 if (list_empty(head))
300 return NULL;
301 }
302 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
303
304 if (flow->deficit <= 0) {
305 flow->deficit += q->quantum;
306 list_move_tail(&flow->flowchain, &q->old_flows);
307 goto begin;
308 }
309
310 prev_drop_count = q->cstats.drop_count;
311 prev_ecn_mark = q->cstats.ecn_mark;
312
313 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
314 &flow->cvars, &q->cstats, qdisc_pkt_len,
315 codel_get_enqueue_time, drop_func, dequeue_func);
316
317 flow->dropped += q->cstats.drop_count - prev_drop_count;
318 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
319
320 if (!skb) {
321 /* force a pass through old_flows to prevent starvation */
322 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
323 list_move_tail(&flow->flowchain, &q->old_flows);
324 else
325 list_del_init(&flow->flowchain);
326 goto begin;
327 }
328 qdisc_bstats_update(sch, skb);
329 flow->deficit -= qdisc_pkt_len(skb);
330 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
331 * or HTB crashes. Defer it for next round.
332 */
333 if (q->cstats.drop_count && sch->q.qlen) {
334 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
335 q->cstats.drop_len);
336 q->cstats.drop_count = 0;
337 q->cstats.drop_len = 0;
338 }
339 return skb;
340}
341
342static void fq_codel_flow_purge(struct fq_codel_flow *flow)
343{
344 rtnl_kfree_skbs(flow->head, flow->tail);
345 flow->head = NULL;
346}
347
348static void fq_codel_reset(struct Qdisc *sch)
349{
350 struct fq_codel_sched_data *q = qdisc_priv(sch);
351 int i;
352
353 INIT_LIST_HEAD(&q->new_flows);
354 INIT_LIST_HEAD(&q->old_flows);
355 for (i = 0; i < q->flows_cnt; i++) {
356 struct fq_codel_flow *flow = q->flows + i;
357
358 fq_codel_flow_purge(flow);
359 INIT_LIST_HEAD(&flow->flowchain);
360 codel_vars_init(&flow->cvars);
361 }
362 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
363 sch->q.qlen = 0;
364 sch->qstats.backlog = 0;
365 q->memory_usage = 0;
366}
367
368static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
369 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
370 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
371 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
372 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
373 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
374 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
375 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
376 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
377 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
378};
379
380static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
381 struct netlink_ext_ack *extack)
382{
383 struct fq_codel_sched_data *q = qdisc_priv(sch);
384 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
385 int err;
386
387 if (!opt)
388 return -EINVAL;
389
390 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy,
391 NULL);
392 if (err < 0)
393 return err;
394 if (tb[TCA_FQ_CODEL_FLOWS]) {
395 if (q->flows)
396 return -EINVAL;
397 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
398 if (!q->flows_cnt ||
399 q->flows_cnt > 65536)
400 return -EINVAL;
401 }
402 sch_tree_lock(sch);
403
404 if (tb[TCA_FQ_CODEL_TARGET]) {
405 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
406
407 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
408 }
409
410 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
411 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
412
413 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
414 }
415
416 if (tb[TCA_FQ_CODEL_INTERVAL]) {
417 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
418
419 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
420 }
421
422 if (tb[TCA_FQ_CODEL_LIMIT])
423 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
424
425 if (tb[TCA_FQ_CODEL_ECN])
426 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
427
428 if (tb[TCA_FQ_CODEL_QUANTUM])
429 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
430
431 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
432 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
433
434 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
435 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
436
437 while (sch->q.qlen > sch->limit ||
438 q->memory_usage > q->memory_limit) {
439 struct sk_buff *skb = fq_codel_dequeue(sch);
440
441 q->cstats.drop_len += qdisc_pkt_len(skb);
442 rtnl_kfree_skbs(skb, skb);
443 q->cstats.drop_count++;
444 }
445 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
446 q->cstats.drop_count = 0;
447 q->cstats.drop_len = 0;
448
449 sch_tree_unlock(sch);
450 return 0;
451}
452
453static void fq_codel_destroy(struct Qdisc *sch)
454{
455 struct fq_codel_sched_data *q = qdisc_priv(sch);
456
457 tcf_block_put(q->block);
458 kvfree(q->backlogs);
459 kvfree(q->flows);
460}
461
462static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
463 struct netlink_ext_ack *extack)
464{
465 struct fq_codel_sched_data *q = qdisc_priv(sch);
466 int i;
467 int err;
468
469 sch->limit = 10*1024;
470 q->flows_cnt = 1024;
471 q->memory_limit = 32 << 20; /* 32 MBytes */
472 q->drop_batch_size = 64;
473 q->quantum = psched_mtu(qdisc_dev(sch));
474 INIT_LIST_HEAD(&q->new_flows);
475 INIT_LIST_HEAD(&q->old_flows);
476 codel_params_init(&q->cparams);
477 codel_stats_init(&q->cstats);
478 q->cparams.ecn = true;
479 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
480
481 if (opt) {
482 int err = fq_codel_change(sch, opt, extack);
483 if (err)
484 return err;
485 }
486
487 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
488 if (err)
489 return err;
490
491 if (!q->flows) {
492 q->flows = kvzalloc(q->flows_cnt *
493 sizeof(struct fq_codel_flow), GFP_KERNEL);
494 if (!q->flows)
495 return -ENOMEM;
496 q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
497 if (!q->backlogs)
498 return -ENOMEM;
499 for (i = 0; i < q->flows_cnt; i++) {
500 struct fq_codel_flow *flow = q->flows + i;
501
502 INIT_LIST_HEAD(&flow->flowchain);
503 codel_vars_init(&flow->cvars);
504 }
505 }
506 if (sch->limit >= 1)
507 sch->flags |= TCQ_F_CAN_BYPASS;
508 else
509 sch->flags &= ~TCQ_F_CAN_BYPASS;
510 return 0;
511}
512
513static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
514{
515 struct fq_codel_sched_data *q = qdisc_priv(sch);
516 struct nlattr *opts;
517
518 opts = nla_nest_start(skb, TCA_OPTIONS);
519 if (opts == NULL)
520 goto nla_put_failure;
521
522 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
523 codel_time_to_us(q->cparams.target)) ||
524 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
525 sch->limit) ||
526 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
527 codel_time_to_us(q->cparams.interval)) ||
528 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
529 q->cparams.ecn) ||
530 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
531 q->quantum) ||
532 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
533 q->drop_batch_size) ||
534 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
535 q->memory_limit) ||
536 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
537 q->flows_cnt))
538 goto nla_put_failure;
539
540 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
541 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
542 codel_time_to_us(q->cparams.ce_threshold)))
543 goto nla_put_failure;
544
545 return nla_nest_end(skb, opts);
546
547nla_put_failure:
548 return -1;
549}
550
551static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
552{
553 struct fq_codel_sched_data *q = qdisc_priv(sch);
554 struct tc_fq_codel_xstats st = {
555 .type = TCA_FQ_CODEL_XSTATS_QDISC,
556 };
557 struct list_head *pos;
558
559 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
560 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
561 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
562 st.qdisc_stats.new_flow_count = q->new_flow_count;
563 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
564 st.qdisc_stats.memory_usage = q->memory_usage;
565 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
566
567 sch_tree_lock(sch);
568 list_for_each(pos, &q->new_flows)
569 st.qdisc_stats.new_flows_len++;
570
571 list_for_each(pos, &q->old_flows)
572 st.qdisc_stats.old_flows_len++;
573 sch_tree_unlock(sch);
574
575 return gnet_stats_copy_app(d, &st, sizeof(st));
576}
577
578static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
579{
580 return NULL;
581}
582
583static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
584{
585 return 0;
586}
587
588static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
589 u32 classid)
590{
591 /* we cannot bypass queue discipline anymore */
592 sch->flags &= ~TCQ_F_CAN_BYPASS;
593 return 0;
594}
595
596static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
597{
598}
599
600static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
601 struct netlink_ext_ack *extack)
602{
603 struct fq_codel_sched_data *q = qdisc_priv(sch);
604
605 if (cl)
606 return NULL;
607 return q->block;
608}
609
610static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
611 struct sk_buff *skb, struct tcmsg *tcm)
612{
613 tcm->tcm_handle |= TC_H_MIN(cl);
614 return 0;
615}
616
617static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
618 struct gnet_dump *d)
619{
620 struct fq_codel_sched_data *q = qdisc_priv(sch);
621 u32 idx = cl - 1;
622 struct gnet_stats_queue qs = { 0 };
623 struct tc_fq_codel_xstats xstats;
624
625 if (idx < q->flows_cnt) {
626 const struct fq_codel_flow *flow = &q->flows[idx];
627 const struct sk_buff *skb;
628
629 memset(&xstats, 0, sizeof(xstats));
630 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
631 xstats.class_stats.deficit = flow->deficit;
632 xstats.class_stats.ldelay =
633 codel_time_to_us(flow->cvars.ldelay);
634 xstats.class_stats.count = flow->cvars.count;
635 xstats.class_stats.lastcount = flow->cvars.lastcount;
636 xstats.class_stats.dropping = flow->cvars.dropping;
637 if (flow->cvars.dropping) {
638 codel_tdiff_t delta = flow->cvars.drop_next -
639 codel_get_time();
640
641 xstats.class_stats.drop_next = (delta >= 0) ?
642 codel_time_to_us(delta) :
643 -codel_time_to_us(-delta);
644 }
645 if (flow->head) {
646 sch_tree_lock(sch);
647 skb = flow->head;
648 while (skb) {
649 qs.qlen++;
650 skb = skb->next;
651 }
652 sch_tree_unlock(sch);
653 }
654 qs.backlog = q->backlogs[idx];
655 qs.drops = flow->dropped;
656 }
657 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
658 return -1;
659 if (idx < q->flows_cnt)
660 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
661 return 0;
662}
663
664static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
665{
666 struct fq_codel_sched_data *q = qdisc_priv(sch);
667 unsigned int i;
668
669 if (arg->stop)
670 return;
671
672 for (i = 0; i < q->flows_cnt; i++) {
673 if (list_empty(&q->flows[i].flowchain) ||
674 arg->count < arg->skip) {
675 arg->count++;
676 continue;
677 }
678 if (arg->fn(sch, i + 1, arg) < 0) {
679 arg->stop = 1;
680 break;
681 }
682 arg->count++;
683 }
684}
685
686static const struct Qdisc_class_ops fq_codel_class_ops = {
687 .leaf = fq_codel_leaf,
688 .find = fq_codel_find,
689 .tcf_block = fq_codel_tcf_block,
690 .bind_tcf = fq_codel_bind,
691 .unbind_tcf = fq_codel_unbind,
692 .dump = fq_codel_dump_class,
693 .dump_stats = fq_codel_dump_class_stats,
694 .walk = fq_codel_walk,
695};
696
697static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
698 .cl_ops = &fq_codel_class_ops,
699 .id = "fq_codel",
700 .priv_size = sizeof(struct fq_codel_sched_data),
701 .enqueue = fq_codel_enqueue,
702 .dequeue = fq_codel_dequeue,
703 .peek = qdisc_peek_dequeued,
704 .init = fq_codel_init,
705 .reset = fq_codel_reset,
706 .destroy = fq_codel_destroy,
707 .change = fq_codel_change,
708 .dump = fq_codel_dump,
709 .dump_stats = fq_codel_dump_stats,
710 .owner = THIS_MODULE,
711};
712
713static int __init fq_codel_module_init(void)
714{
715 return register_qdisc(&fq_codel_qdisc_ops);
716}
717
718static void __exit fq_codel_module_exit(void)
719{
720 unregister_qdisc(&fq_codel_qdisc_ops);
721}
722
723module_init(fq_codel_module_init)
724module_exit(fq_codel_module_exit)
725MODULE_AUTHOR("Eric Dumazet");
726MODULE_LICENSE("GPL");