Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 *
5 * Copyright (C) 2013-2023 Eric Dumazet <edumazet@google.com>
6 *
7 * Meant to be mostly used for locally generated traffic :
8 * Fast classification depends on skb->sk being set before reaching us.
9 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
10 * All packets belonging to a socket are considered as a 'flow'.
11 *
12 * Flows are dynamically allocated and stored in a hash table of RB trees
13 * They are also part of one Round Robin 'queues' (new or old flows)
14 *
15 * Burst avoidance (aka pacing) capability :
16 *
17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
18 * bunch of packets, and this packet scheduler adds delay between
19 * packets to respect rate limitation.
20 *
21 * enqueue() :
22 * - lookup one RB tree (out of 1024 or more) to find the flow.
23 * If non existent flow, create it, add it to the tree.
24 * Add skb to the per flow list of skb (fifo).
25 * - Use a special fifo for high prio packets
26 *
27 * dequeue() : serves flows in Round Robin
28 * Note : When a flow becomes empty, we do not immediately remove it from
29 * rb trees, for performance reasons (its expected to send additional packets,
30 * or SLAB cache will reuse socket for another flow)
31 */
32
33#include <linux/module.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/jiffies.h>
37#include <linux/string.h>
38#include <linux/in.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/skbuff.h>
42#include <linux/slab.h>
43#include <linux/rbtree.h>
44#include <linux/hash.h>
45#include <linux/prefetch.h>
46#include <linux/vmalloc.h>
47#include <net/netlink.h>
48#include <net/pkt_sched.h>
49#include <net/sock.h>
50#include <net/tcp_states.h>
51#include <net/tcp.h>
52
53struct fq_skb_cb {
54 u64 time_to_send;
55 u8 band;
56};
57
58static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
59{
60 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
61 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
62}
63
64/*
65 * Per flow structure, dynamically allocated.
66 * If packets have monotically increasing time_to_send, they are placed in O(1)
67 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
68 */
69struct fq_flow {
70/* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */
71 struct rb_root t_root;
72 struct sk_buff *head; /* list of skbs for this flow : first skb */
73 union {
74 struct sk_buff *tail; /* last skb in the list */
75 unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */
76 };
77 union {
78 struct rb_node fq_node; /* anchor in fq_root[] trees */
79 /* Following field is only used for q->internal,
80 * because q->internal is not hashed in fq_root[]
81 */
82 u64 stat_fastpath_packets;
83 };
84 struct sock *sk;
85 u32 socket_hash; /* sk_hash */
86 int qlen; /* number of packets in flow queue */
87
88/* Second cache line */
89 int credit;
90 int band;
91 struct fq_flow *next; /* next pointer in RR lists */
92
93 struct rb_node rate_node; /* anchor in q->delayed tree */
94 u64 time_next_packet;
95};
96
97struct fq_flow_head {
98 struct fq_flow *first;
99 struct fq_flow *last;
100};
101
102struct fq_perband_flows {
103 struct fq_flow_head new_flows;
104 struct fq_flow_head old_flows;
105 int credit;
106 int quantum; /* based on band nr : 576KB, 192KB, 64KB */
107};
108
109#define FQ_PRIO2BAND_CRUMB_SIZE ((TC_PRIO_MAX + 1) >> 2)
110
111struct fq_sched_data {
112/* Read mostly cache line */
113
114 u64 offload_horizon;
115 u32 quantum;
116 u32 initial_quantum;
117 u32 flow_refill_delay;
118 u32 flow_plimit; /* max packets per flow */
119 unsigned long flow_max_rate; /* optional max rate per flow */
120 u64 ce_threshold;
121 u64 horizon; /* horizon in ns */
122 u32 orphan_mask; /* mask for orphaned skb */
123 u32 low_rate_threshold;
124 struct rb_root *fq_root;
125 u8 rate_enable;
126 u8 fq_trees_log;
127 u8 horizon_drop;
128 u8 prio2band[FQ_PRIO2BAND_CRUMB_SIZE];
129 u32 timer_slack; /* hrtimer slack in ns */
130
131/* Read/Write fields. */
132
133 unsigned int band_nr; /* band being serviced in fq_dequeue() */
134
135 struct fq_perband_flows band_flows[FQ_BANDS];
136
137 struct fq_flow internal; /* fastpath queue. */
138 struct rb_root delayed; /* for rate limited flows */
139 u64 time_next_delayed_flow;
140 unsigned long unthrottle_latency_ns;
141
142 u32 band_pkt_count[FQ_BANDS];
143 u32 flows;
144 u32 inactive_flows; /* Flows with no packet to send. */
145 u32 throttled_flows;
146
147 u64 stat_throttled;
148 struct qdisc_watchdog watchdog;
149 u64 stat_gc_flows;
150
151/* Seldom used fields. */
152
153 u64 stat_band_drops[FQ_BANDS];
154 u64 stat_ce_mark;
155 u64 stat_horizon_drops;
156 u64 stat_horizon_caps;
157 u64 stat_flows_plimit;
158 u64 stat_pkts_too_long;
159 u64 stat_allocation_errors;
160};
161
162/* return the i-th 2-bit value ("crumb") */
163static u8 fq_prio2band(const u8 *prio2band, unsigned int prio)
164{
165 return (READ_ONCE(prio2band[prio / 4]) >> (2 * (prio & 0x3))) & 0x3;
166}
167
168/*
169 * f->tail and f->age share the same location.
170 * We can use the low order bit to differentiate if this location points
171 * to a sk_buff or contains a jiffies value, if we force this value to be odd.
172 * This assumes f->tail low order bit must be 0 since alignof(struct sk_buff) >= 2
173 */
174static void fq_flow_set_detached(struct fq_flow *f)
175{
176 f->age = jiffies | 1UL;
177}
178
179static bool fq_flow_is_detached(const struct fq_flow *f)
180{
181 return !!(f->age & 1UL);
182}
183
184/* special value to mark a throttled flow (not on old/new list) */
185static struct fq_flow throttled;
186
187static bool fq_flow_is_throttled(const struct fq_flow *f)
188{
189 return f->next == &throttled;
190}
191
192enum new_flow {
193 NEW_FLOW,
194 OLD_FLOW
195};
196
197static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow,
198 enum new_flow list_sel)
199{
200 struct fq_perband_flows *pband = &q->band_flows[flow->band];
201 struct fq_flow_head *head = (list_sel == NEW_FLOW) ?
202 &pband->new_flows :
203 &pband->old_flows;
204
205 if (head->first)
206 head->last->next = flow;
207 else
208 head->first = flow;
209 head->last = flow;
210 flow->next = NULL;
211}
212
213static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
214{
215 rb_erase(&f->rate_node, &q->delayed);
216 q->throttled_flows--;
217 fq_flow_add_tail(q, f, OLD_FLOW);
218}
219
220static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
221{
222 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
223
224 while (*p) {
225 struct fq_flow *aux;
226
227 parent = *p;
228 aux = rb_entry(parent, struct fq_flow, rate_node);
229 if (f->time_next_packet >= aux->time_next_packet)
230 p = &parent->rb_right;
231 else
232 p = &parent->rb_left;
233 }
234 rb_link_node(&f->rate_node, parent, p);
235 rb_insert_color(&f->rate_node, &q->delayed);
236 q->throttled_flows++;
237 q->stat_throttled++;
238
239 f->next = &throttled;
240 if (q->time_next_delayed_flow > f->time_next_packet)
241 q->time_next_delayed_flow = f->time_next_packet;
242}
243
244
245static struct kmem_cache *fq_flow_cachep __read_mostly;
246
247
248/* limit number of collected flows per round */
249#define FQ_GC_MAX 8
250#define FQ_GC_AGE (3*HZ)
251
252static bool fq_gc_candidate(const struct fq_flow *f)
253{
254 return fq_flow_is_detached(f) &&
255 time_after(jiffies, f->age + FQ_GC_AGE);
256}
257
258static void fq_gc(struct fq_sched_data *q,
259 struct rb_root *root,
260 struct sock *sk)
261{
262 struct rb_node **p, *parent;
263 void *tofree[FQ_GC_MAX];
264 struct fq_flow *f;
265 int i, fcnt = 0;
266
267 p = &root->rb_node;
268 parent = NULL;
269 while (*p) {
270 parent = *p;
271
272 f = rb_entry(parent, struct fq_flow, fq_node);
273 if (f->sk == sk)
274 break;
275
276 if (fq_gc_candidate(f)) {
277 tofree[fcnt++] = f;
278 if (fcnt == FQ_GC_MAX)
279 break;
280 }
281
282 if (f->sk > sk)
283 p = &parent->rb_right;
284 else
285 p = &parent->rb_left;
286 }
287
288 if (!fcnt)
289 return;
290
291 for (i = fcnt; i > 0; ) {
292 f = tofree[--i];
293 rb_erase(&f->fq_node, root);
294 }
295 q->flows -= fcnt;
296 q->inactive_flows -= fcnt;
297 q->stat_gc_flows += fcnt;
298
299 kmem_cache_free_bulk(fq_flow_cachep, fcnt, tofree);
300}
301
302/* Fast path can be used if :
303 * 1) Packet tstamp is in the past, or within the pacing offload horizon.
304 * 2) FQ qlen == 0 OR
305 * (no flow is currently eligible for transmit,
306 * AND fast path queue has less than 8 packets)
307 * 3) No SO_MAX_PACING_RATE on the socket (if any).
308 * 4) No @maxrate attribute on this qdisc,
309 *
310 * FQ can not use generic TCQ_F_CAN_BYPASS infrastructure.
311 */
312static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
313 u64 now)
314{
315 const struct fq_sched_data *q = qdisc_priv(sch);
316 const struct sock *sk;
317
318 if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon)
319 return false;
320
321 if (sch->q.qlen != 0) {
322 /* Even if some packets are stored in this qdisc,
323 * we can still enable fast path if all of them are
324 * scheduled in the future (ie no flows are eligible)
325 * or in the fast path queue.
326 */
327 if (q->flows != q->inactive_flows + q->throttled_flows)
328 return false;
329
330 /* Do not allow fast path queue to explode, we want Fair Queue mode
331 * under pressure.
332 */
333 if (q->internal.qlen >= 8)
334 return false;
335
336 /* Ordering invariants fall apart if some delayed flows
337 * are ready but we haven't serviced them, yet.
338 */
339 if (q->time_next_delayed_flow <= now + q->offload_horizon)
340 return false;
341 }
342
343 sk = skb->sk;
344 if (sk && sk_fullsock(sk) && !sk_is_tcp(sk) &&
345 sk->sk_max_pacing_rate != ~0UL)
346 return false;
347
348 if (q->flow_max_rate != ~0UL)
349 return false;
350
351 return true;
352}
353
354static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
355 u64 now)
356{
357 struct fq_sched_data *q = qdisc_priv(sch);
358 struct rb_node **p, *parent;
359 struct sock *sk = skb->sk;
360 struct rb_root *root;
361 struct fq_flow *f;
362
363 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
364 * or a listener (SYNCOOKIE mode)
365 * 1) request sockets are not full blown,
366 * they do not contain sk_pacing_rate
367 * 2) They are not part of a 'flow' yet
368 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
369 * especially if the listener set SO_MAX_PACING_RATE
370 * 4) We pretend they are orphaned
371 * TCP can also associate TIME_WAIT sockets with RST or ACK packets.
372 */
373 if (!sk || sk_listener_or_tw(sk)) {
374 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
375
376 /* By forcing low order bit to 1, we make sure to not
377 * collide with a local flow (socket pointers are word aligned)
378 */
379 sk = (struct sock *)((hash << 1) | 1UL);
380 skb_orphan(skb);
381 } else if (sk->sk_state == TCP_CLOSE) {
382 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
383 /*
384 * Sockets in TCP_CLOSE are non connected.
385 * Typical use case is UDP sockets, they can send packets
386 * with sendto() to many different destinations.
387 * We probably could use a generic bit advertising
388 * non connected sockets, instead of sk_state == TCP_CLOSE,
389 * if we care enough.
390 */
391 sk = (struct sock *)((hash << 1) | 1UL);
392 }
393
394 if (fq_fastpath_check(sch, skb, now)) {
395 q->internal.stat_fastpath_packets++;
396 if (skb->sk == sk && q->rate_enable &&
397 READ_ONCE(sk->sk_pacing_status) != SK_PACING_FQ)
398 smp_store_release(&sk->sk_pacing_status,
399 SK_PACING_FQ);
400 return &q->internal;
401 }
402
403 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
404
405 fq_gc(q, root, sk);
406
407 p = &root->rb_node;
408 parent = NULL;
409 while (*p) {
410 parent = *p;
411
412 f = rb_entry(parent, struct fq_flow, fq_node);
413 if (f->sk == sk) {
414 /* socket might have been reallocated, so check
415 * if its sk_hash is the same.
416 * It not, we need to refill credit with
417 * initial quantum
418 */
419 if (unlikely(skb->sk == sk &&
420 f->socket_hash != sk->sk_hash)) {
421 f->credit = q->initial_quantum;
422 f->socket_hash = sk->sk_hash;
423 if (q->rate_enable)
424 smp_store_release(&sk->sk_pacing_status,
425 SK_PACING_FQ);
426 if (fq_flow_is_throttled(f))
427 fq_flow_unset_throttled(q, f);
428 f->time_next_packet = 0ULL;
429 }
430 return f;
431 }
432 if (f->sk > sk)
433 p = &parent->rb_right;
434 else
435 p = &parent->rb_left;
436 }
437
438 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
439 if (unlikely(!f)) {
440 q->stat_allocation_errors++;
441 return &q->internal;
442 }
443 /* f->t_root is already zeroed after kmem_cache_zalloc() */
444
445 fq_flow_set_detached(f);
446 f->sk = sk;
447 if (skb->sk == sk) {
448 f->socket_hash = sk->sk_hash;
449 if (q->rate_enable)
450 smp_store_release(&sk->sk_pacing_status,
451 SK_PACING_FQ);
452 }
453 f->credit = q->initial_quantum;
454
455 rb_link_node(&f->fq_node, parent, p);
456 rb_insert_color(&f->fq_node, root);
457
458 q->flows++;
459 q->inactive_flows++;
460 return f;
461}
462
463static struct sk_buff *fq_peek(struct fq_flow *flow)
464{
465 struct sk_buff *skb = skb_rb_first(&flow->t_root);
466 struct sk_buff *head = flow->head;
467
468 if (!skb)
469 return head;
470
471 if (!head)
472 return skb;
473
474 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
475 return skb;
476 return head;
477}
478
479static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
480 struct sk_buff *skb)
481{
482 if (skb == flow->head) {
483 flow->head = skb->next;
484 } else {
485 rb_erase(&skb->rbnode, &flow->t_root);
486 skb->dev = qdisc_dev(sch);
487 }
488}
489
490/* Remove one skb from flow queue.
491 * This skb must be the return value of prior fq_peek().
492 */
493static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
494 struct sk_buff *skb)
495{
496 fq_erase_head(sch, flow, skb);
497 skb_mark_not_on_list(skb);
498 qdisc_qstats_backlog_dec(sch, skb);
499 sch->q.qlen--;
500}
501
502static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
503{
504 struct rb_node **p, *parent;
505 struct sk_buff *head, *aux;
506
507 head = flow->head;
508 if (!head ||
509 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
510 if (!head)
511 flow->head = skb;
512 else
513 flow->tail->next = skb;
514 flow->tail = skb;
515 skb->next = NULL;
516 return;
517 }
518
519 p = &flow->t_root.rb_node;
520 parent = NULL;
521
522 while (*p) {
523 parent = *p;
524 aux = rb_to_skb(parent);
525 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
526 p = &parent->rb_right;
527 else
528 p = &parent->rb_left;
529 }
530 rb_link_node(&skb->rbnode, parent, p);
531 rb_insert_color(&skb->rbnode, &flow->t_root);
532}
533
534static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
535 const struct fq_sched_data *q, u64 now)
536{
537 return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
538}
539
540static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
541 struct sk_buff **to_free)
542{
543 struct fq_sched_data *q = qdisc_priv(sch);
544 struct fq_flow *f;
545 u64 now;
546 u8 band;
547
548 band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX);
549 if (unlikely(q->band_pkt_count[band] >= sch->limit)) {
550 q->stat_band_drops[band]++;
551 return qdisc_drop(skb, sch, to_free);
552 }
553
554 now = ktime_get_ns();
555 if (!skb->tstamp) {
556 fq_skb_cb(skb)->time_to_send = now;
557 } else {
558 /* Check if packet timestamp is too far in the future. */
559 if (fq_packet_beyond_horizon(skb, q, now)) {
560 if (q->horizon_drop) {
561 q->stat_horizon_drops++;
562 return qdisc_drop(skb, sch, to_free);
563 }
564 q->stat_horizon_caps++;
565 skb->tstamp = now + q->horizon;
566 }
567 fq_skb_cb(skb)->time_to_send = skb->tstamp;
568 }
569
570 f = fq_classify(sch, skb, now);
571
572 if (f != &q->internal) {
573 if (unlikely(f->qlen >= q->flow_plimit)) {
574 q->stat_flows_plimit++;
575 return qdisc_drop(skb, sch, to_free);
576 }
577
578 if (fq_flow_is_detached(f)) {
579 fq_flow_add_tail(q, f, NEW_FLOW);
580 if (time_after(jiffies, f->age + q->flow_refill_delay))
581 f->credit = max_t(u32, f->credit, q->quantum);
582 }
583
584 f->band = band;
585 q->band_pkt_count[band]++;
586 fq_skb_cb(skb)->band = band;
587 if (f->qlen == 0)
588 q->inactive_flows--;
589 }
590
591 f->qlen++;
592 /* Note: this overwrites f->age */
593 flow_queue_add(f, skb);
594
595 qdisc_qstats_backlog_inc(sch, skb);
596 sch->q.qlen++;
597
598 return NET_XMIT_SUCCESS;
599}
600
601static void fq_check_throttled(struct fq_sched_data *q, u64 now)
602{
603 unsigned long sample;
604 struct rb_node *p;
605
606 if (q->time_next_delayed_flow > now + q->offload_horizon)
607 return;
608
609 /* Update unthrottle latency EWMA.
610 * This is cheap and can help diagnosing timer/latency problems.
611 */
612 sample = (unsigned long)(now - q->time_next_delayed_flow);
613 if ((long)sample > 0) {
614 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
615 q->unthrottle_latency_ns += sample >> 3;
616 }
617 now += q->offload_horizon;
618
619 q->time_next_delayed_flow = ~0ULL;
620 while ((p = rb_first(&q->delayed)) != NULL) {
621 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
622
623 if (f->time_next_packet > now) {
624 q->time_next_delayed_flow = f->time_next_packet;
625 break;
626 }
627 fq_flow_unset_throttled(q, f);
628 }
629}
630
631static struct fq_flow_head *fq_pband_head_select(struct fq_perband_flows *pband)
632{
633 if (pband->credit <= 0)
634 return NULL;
635
636 if (pband->new_flows.first)
637 return &pband->new_flows;
638
639 return pband->old_flows.first ? &pband->old_flows : NULL;
640}
641
642static struct sk_buff *fq_dequeue(struct Qdisc *sch)
643{
644 struct fq_sched_data *q = qdisc_priv(sch);
645 struct fq_perband_flows *pband;
646 struct fq_flow_head *head;
647 struct sk_buff *skb;
648 struct fq_flow *f;
649 unsigned long rate;
650 int retry;
651 u32 plen;
652 u64 now;
653
654 if (!sch->q.qlen)
655 return NULL;
656
657 skb = fq_peek(&q->internal);
658 if (unlikely(skb)) {
659 q->internal.qlen--;
660 fq_dequeue_skb(sch, &q->internal, skb);
661 goto out;
662 }
663
664 now = ktime_get_ns();
665 fq_check_throttled(q, now);
666 retry = 0;
667 pband = &q->band_flows[q->band_nr];
668begin:
669 head = fq_pband_head_select(pband);
670 if (!head) {
671 while (++retry <= FQ_BANDS) {
672 if (++q->band_nr == FQ_BANDS)
673 q->band_nr = 0;
674 pband = &q->band_flows[q->band_nr];
675 pband->credit = min(pband->credit + pband->quantum,
676 pband->quantum);
677 if (pband->credit > 0)
678 goto begin;
679 retry = 0;
680 }
681 if (q->time_next_delayed_flow != ~0ULL)
682 qdisc_watchdog_schedule_range_ns(&q->watchdog,
683 q->time_next_delayed_flow,
684 q->timer_slack);
685 return NULL;
686 }
687 f = head->first;
688 retry = 0;
689 if (f->credit <= 0) {
690 f->credit += q->quantum;
691 head->first = f->next;
692 fq_flow_add_tail(q, f, OLD_FLOW);
693 goto begin;
694 }
695
696 skb = fq_peek(f);
697 if (skb) {
698 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
699 f->time_next_packet);
700
701 if (now + q->offload_horizon < time_next_packet) {
702 head->first = f->next;
703 f->time_next_packet = time_next_packet;
704 fq_flow_set_throttled(q, f);
705 goto begin;
706 }
707 prefetch(&skb->end);
708 if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
709 INET_ECN_set_ce(skb);
710 q->stat_ce_mark++;
711 }
712 if (--f->qlen == 0)
713 q->inactive_flows++;
714 q->band_pkt_count[fq_skb_cb(skb)->band]--;
715 fq_dequeue_skb(sch, f, skb);
716 } else {
717 head->first = f->next;
718 /* force a pass through old_flows to prevent starvation */
719 if (head == &pband->new_flows) {
720 fq_flow_add_tail(q, f, OLD_FLOW);
721 } else {
722 fq_flow_set_detached(f);
723 }
724 goto begin;
725 }
726 plen = qdisc_pkt_len(skb);
727 f->credit -= plen;
728 pband->credit -= plen;
729
730 if (!q->rate_enable)
731 goto out;
732
733 rate = q->flow_max_rate;
734
735 /* If EDT time was provided for this skb, we need to
736 * update f->time_next_packet only if this qdisc enforces
737 * a flow max rate.
738 */
739 if (!skb->tstamp) {
740 if (skb->sk)
741 rate = min(READ_ONCE(skb->sk->sk_pacing_rate), rate);
742
743 if (rate <= q->low_rate_threshold) {
744 f->credit = 0;
745 } else {
746 plen = max(plen, q->quantum);
747 if (f->credit > 0)
748 goto out;
749 }
750 }
751 if (rate != ~0UL) {
752 u64 len = (u64)plen * NSEC_PER_SEC;
753
754 if (likely(rate))
755 len = div64_ul(len, rate);
756 /* Since socket rate can change later,
757 * clamp the delay to 1 second.
758 * Really, providers of too big packets should be fixed !
759 */
760 if (unlikely(len > NSEC_PER_SEC)) {
761 len = NSEC_PER_SEC;
762 q->stat_pkts_too_long++;
763 }
764 /* Account for schedule/timers drifts.
765 * f->time_next_packet was set when prior packet was sent,
766 * and current time (@now) can be too late by tens of us.
767 */
768 if (f->time_next_packet)
769 len -= min(len/2, now - f->time_next_packet);
770 f->time_next_packet = now + len;
771 }
772out:
773 qdisc_bstats_update(sch, skb);
774 return skb;
775}
776
777static void fq_flow_purge(struct fq_flow *flow)
778{
779 struct rb_node *p = rb_first(&flow->t_root);
780
781 while (p) {
782 struct sk_buff *skb = rb_to_skb(p);
783
784 p = rb_next(p);
785 rb_erase(&skb->rbnode, &flow->t_root);
786 rtnl_kfree_skbs(skb, skb);
787 }
788 rtnl_kfree_skbs(flow->head, flow->tail);
789 flow->head = NULL;
790 flow->qlen = 0;
791}
792
793static void fq_reset(struct Qdisc *sch)
794{
795 struct fq_sched_data *q = qdisc_priv(sch);
796 struct rb_root *root;
797 struct rb_node *p;
798 struct fq_flow *f;
799 unsigned int idx;
800
801 sch->q.qlen = 0;
802 sch->qstats.backlog = 0;
803
804 fq_flow_purge(&q->internal);
805
806 if (!q->fq_root)
807 return;
808
809 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
810 root = &q->fq_root[idx];
811 while ((p = rb_first(root)) != NULL) {
812 f = rb_entry(p, struct fq_flow, fq_node);
813 rb_erase(p, root);
814
815 fq_flow_purge(f);
816
817 kmem_cache_free(fq_flow_cachep, f);
818 }
819 }
820 for (idx = 0; idx < FQ_BANDS; idx++) {
821 q->band_flows[idx].new_flows.first = NULL;
822 q->band_flows[idx].old_flows.first = NULL;
823 }
824 q->delayed = RB_ROOT;
825 q->flows = 0;
826 q->inactive_flows = 0;
827 q->throttled_flows = 0;
828}
829
830static void fq_rehash(struct fq_sched_data *q,
831 struct rb_root *old_array, u32 old_log,
832 struct rb_root *new_array, u32 new_log)
833{
834 struct rb_node *op, **np, *parent;
835 struct rb_root *oroot, *nroot;
836 struct fq_flow *of, *nf;
837 int fcnt = 0;
838 u32 idx;
839
840 for (idx = 0; idx < (1U << old_log); idx++) {
841 oroot = &old_array[idx];
842 while ((op = rb_first(oroot)) != NULL) {
843 rb_erase(op, oroot);
844 of = rb_entry(op, struct fq_flow, fq_node);
845 if (fq_gc_candidate(of)) {
846 fcnt++;
847 kmem_cache_free(fq_flow_cachep, of);
848 continue;
849 }
850 nroot = &new_array[hash_ptr(of->sk, new_log)];
851
852 np = &nroot->rb_node;
853 parent = NULL;
854 while (*np) {
855 parent = *np;
856
857 nf = rb_entry(parent, struct fq_flow, fq_node);
858 BUG_ON(nf->sk == of->sk);
859
860 if (nf->sk > of->sk)
861 np = &parent->rb_right;
862 else
863 np = &parent->rb_left;
864 }
865
866 rb_link_node(&of->fq_node, parent, np);
867 rb_insert_color(&of->fq_node, nroot);
868 }
869 }
870 q->flows -= fcnt;
871 q->inactive_flows -= fcnt;
872 q->stat_gc_flows += fcnt;
873}
874
875static void fq_free(void *addr)
876{
877 kvfree(addr);
878}
879
880static int fq_resize(struct Qdisc *sch, u32 log)
881{
882 struct fq_sched_data *q = qdisc_priv(sch);
883 struct rb_root *array;
884 void *old_fq_root;
885 u32 idx;
886
887 if (q->fq_root && log == q->fq_trees_log)
888 return 0;
889
890 /* If XPS was setup, we can allocate memory on right NUMA node */
891 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
892 netdev_queue_numa_node_read(sch->dev_queue));
893 if (!array)
894 return -ENOMEM;
895
896 for (idx = 0; idx < (1U << log); idx++)
897 array[idx] = RB_ROOT;
898
899 sch_tree_lock(sch);
900
901 old_fq_root = q->fq_root;
902 if (old_fq_root)
903 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
904
905 q->fq_root = array;
906 WRITE_ONCE(q->fq_trees_log, log);
907
908 sch_tree_unlock(sch);
909
910 fq_free(old_fq_root);
911
912 return 0;
913}
914
915static const struct netlink_range_validation iq_range = {
916 .max = INT_MAX,
917};
918
919static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
920 [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK },
921
922 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
923 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
924 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
925 [TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range),
926 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
927 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
928 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
929 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
930 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
931 [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
932 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
933 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
934 [TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 },
935 [TCA_FQ_HORIZON] = { .type = NLA_U32 },
936 [TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 },
937 [TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)),
938 [TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)),
939 [TCA_FQ_OFFLOAD_HORIZON] = { .type = NLA_U32 },
940};
941
942/* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
943static void fq_prio2band_compress_crumb(const u8 *in, u8 *out)
944{
945 const int num_elems = TC_PRIO_MAX + 1;
946 u8 tmp[FQ_PRIO2BAND_CRUMB_SIZE];
947 int i;
948
949 memset(tmp, 0, sizeof(tmp));
950 for (i = 0; i < num_elems; i++)
951 tmp[i / 4] |= in[i] << (2 * (i & 0x3));
952
953 for (i = 0; i < FQ_PRIO2BAND_CRUMB_SIZE; i++)
954 WRITE_ONCE(out[i], tmp[i]);
955}
956
957static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out)
958{
959 const int num_elems = TC_PRIO_MAX + 1;
960 int i;
961
962 for (i = 0; i < num_elems; i++)
963 out[i] = fq_prio2band(in, i);
964}
965
966static int fq_load_weights(struct fq_sched_data *q,
967 const struct nlattr *attr,
968 struct netlink_ext_ack *extack)
969{
970 s32 *weights = nla_data(attr);
971 int i;
972
973 for (i = 0; i < FQ_BANDS; i++) {
974 if (weights[i] < FQ_MIN_WEIGHT) {
975 NL_SET_ERR_MSG_FMT_MOD(extack, "Weight %d less that minimum allowed %d",
976 weights[i], FQ_MIN_WEIGHT);
977 return -EINVAL;
978 }
979 }
980 for (i = 0; i < FQ_BANDS; i++)
981 WRITE_ONCE(q->band_flows[i].quantum, weights[i]);
982 return 0;
983}
984
985static int fq_load_priomap(struct fq_sched_data *q,
986 const struct nlattr *attr,
987 struct netlink_ext_ack *extack)
988{
989 const struct tc_prio_qopt *map = nla_data(attr);
990 int i;
991
992 if (map->bands != FQ_BANDS) {
993 NL_SET_ERR_MSG_MOD(extack, "FQ only supports 3 bands");
994 return -EINVAL;
995 }
996 for (i = 0; i < TC_PRIO_MAX + 1; i++) {
997 if (map->priomap[i] >= FQ_BANDS) {
998 NL_SET_ERR_MSG_FMT_MOD(extack, "FQ priomap field %d maps to a too high band %d",
999 i, map->priomap[i]);
1000 return -EINVAL;
1001 }
1002 }
1003 fq_prio2band_compress_crumb(map->priomap, q->prio2band);
1004 return 0;
1005}
1006
1007static int fq_change(struct Qdisc *sch, struct nlattr *opt,
1008 struct netlink_ext_ack *extack)
1009{
1010 struct fq_sched_data *q = qdisc_priv(sch);
1011 struct nlattr *tb[TCA_FQ_MAX + 1];
1012 int err, drop_count = 0;
1013 unsigned drop_len = 0;
1014 u32 fq_log;
1015
1016 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
1017 NULL);
1018 if (err < 0)
1019 return err;
1020
1021 sch_tree_lock(sch);
1022
1023 fq_log = q->fq_trees_log;
1024
1025 if (tb[TCA_FQ_BUCKETS_LOG]) {
1026 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
1027
1028 if (nval >= 1 && nval <= ilog2(256*1024))
1029 fq_log = nval;
1030 else
1031 err = -EINVAL;
1032 }
1033 if (tb[TCA_FQ_PLIMIT])
1034 WRITE_ONCE(sch->limit,
1035 nla_get_u32(tb[TCA_FQ_PLIMIT]));
1036
1037 if (tb[TCA_FQ_FLOW_PLIMIT])
1038 WRITE_ONCE(q->flow_plimit,
1039 nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]));
1040
1041 if (tb[TCA_FQ_QUANTUM]) {
1042 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
1043
1044 if (quantum > 0 && quantum <= (1 << 20)) {
1045 WRITE_ONCE(q->quantum, quantum);
1046 } else {
1047 NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
1048 err = -EINVAL;
1049 }
1050 }
1051
1052 if (tb[TCA_FQ_INITIAL_QUANTUM])
1053 WRITE_ONCE(q->initial_quantum,
1054 nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]));
1055
1056 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
1057 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
1058 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
1059
1060 if (tb[TCA_FQ_FLOW_MAX_RATE]) {
1061 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
1062
1063 WRITE_ONCE(q->flow_max_rate,
1064 (rate == ~0U) ? ~0UL : rate);
1065 }
1066 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
1067 WRITE_ONCE(q->low_rate_threshold,
1068 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]));
1069
1070 if (tb[TCA_FQ_RATE_ENABLE]) {
1071 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
1072
1073 if (enable <= 1)
1074 WRITE_ONCE(q->rate_enable,
1075 enable);
1076 else
1077 err = -EINVAL;
1078 }
1079
1080 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
1081 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
1082
1083 WRITE_ONCE(q->flow_refill_delay,
1084 usecs_to_jiffies(usecs_delay));
1085 }
1086
1087 if (!err && tb[TCA_FQ_PRIOMAP])
1088 err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack);
1089
1090 if (!err && tb[TCA_FQ_WEIGHTS])
1091 err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack);
1092
1093 if (tb[TCA_FQ_ORPHAN_MASK])
1094 WRITE_ONCE(q->orphan_mask,
1095 nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]));
1096
1097 if (tb[TCA_FQ_CE_THRESHOLD])
1098 WRITE_ONCE(q->ce_threshold,
1099 (u64)NSEC_PER_USEC *
1100 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]));
1101
1102 if (tb[TCA_FQ_TIMER_SLACK])
1103 WRITE_ONCE(q->timer_slack,
1104 nla_get_u32(tb[TCA_FQ_TIMER_SLACK]));
1105
1106 if (tb[TCA_FQ_HORIZON])
1107 WRITE_ONCE(q->horizon,
1108 (u64)NSEC_PER_USEC *
1109 nla_get_u32(tb[TCA_FQ_HORIZON]));
1110
1111 if (tb[TCA_FQ_HORIZON_DROP])
1112 WRITE_ONCE(q->horizon_drop,
1113 nla_get_u8(tb[TCA_FQ_HORIZON_DROP]));
1114
1115 if (tb[TCA_FQ_OFFLOAD_HORIZON]) {
1116 u64 offload_horizon = (u64)NSEC_PER_USEC *
1117 nla_get_u32(tb[TCA_FQ_OFFLOAD_HORIZON]);
1118
1119 if (offload_horizon <= qdisc_dev(sch)->max_pacing_offload_horizon) {
1120 WRITE_ONCE(q->offload_horizon, offload_horizon);
1121 } else {
1122 NL_SET_ERR_MSG_MOD(extack, "invalid offload_horizon");
1123 err = -EINVAL;
1124 }
1125 }
1126 if (!err) {
1127
1128 sch_tree_unlock(sch);
1129 err = fq_resize(sch, fq_log);
1130 sch_tree_lock(sch);
1131 }
1132 while (sch->q.qlen > sch->limit) {
1133 struct sk_buff *skb = fq_dequeue(sch);
1134
1135 if (!skb)
1136 break;
1137 drop_len += qdisc_pkt_len(skb);
1138 rtnl_kfree_skbs(skb, skb);
1139 drop_count++;
1140 }
1141 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
1142
1143 sch_tree_unlock(sch);
1144 return err;
1145}
1146
1147static void fq_destroy(struct Qdisc *sch)
1148{
1149 struct fq_sched_data *q = qdisc_priv(sch);
1150
1151 fq_reset(sch);
1152 fq_free(q->fq_root);
1153 qdisc_watchdog_cancel(&q->watchdog);
1154}
1155
1156static int fq_init(struct Qdisc *sch, struct nlattr *opt,
1157 struct netlink_ext_ack *extack)
1158{
1159 struct fq_sched_data *q = qdisc_priv(sch);
1160 int i, err;
1161
1162 sch->limit = 10000;
1163 q->flow_plimit = 100;
1164 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
1165 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
1166 q->flow_refill_delay = msecs_to_jiffies(40);
1167 q->flow_max_rate = ~0UL;
1168 q->time_next_delayed_flow = ~0ULL;
1169 q->rate_enable = 1;
1170 for (i = 0; i < FQ_BANDS; i++) {
1171 q->band_flows[i].new_flows.first = NULL;
1172 q->band_flows[i].old_flows.first = NULL;
1173 }
1174 q->band_flows[0].quantum = 9 << 16;
1175 q->band_flows[1].quantum = 3 << 16;
1176 q->band_flows[2].quantum = 1 << 16;
1177 q->delayed = RB_ROOT;
1178 q->fq_root = NULL;
1179 q->fq_trees_log = ilog2(1024);
1180 q->orphan_mask = 1024 - 1;
1181 q->low_rate_threshold = 550000 / 8;
1182
1183 q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */
1184
1185 q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */
1186 q->horizon_drop = 1; /* by default, drop packets beyond horizon */
1187
1188 /* Default ce_threshold of 4294 seconds */
1189 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
1190
1191 fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band);
1192 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
1193
1194 if (opt)
1195 err = fq_change(sch, opt, extack);
1196 else
1197 err = fq_resize(sch, q->fq_trees_log);
1198
1199 return err;
1200}
1201
1202static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
1203{
1204 struct fq_sched_data *q = qdisc_priv(sch);
1205 struct tc_prio_qopt prio = {
1206 .bands = FQ_BANDS,
1207 };
1208 struct nlattr *opts;
1209 u64 offload_horizon;
1210 u64 ce_threshold;
1211 s32 weights[3];
1212 u64 horizon;
1213
1214 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
1215 if (opts == NULL)
1216 goto nla_put_failure;
1217
1218 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
1219
1220 ce_threshold = READ_ONCE(q->ce_threshold);
1221 do_div(ce_threshold, NSEC_PER_USEC);
1222
1223 horizon = READ_ONCE(q->horizon);
1224 do_div(horizon, NSEC_PER_USEC);
1225
1226 offload_horizon = READ_ONCE(q->offload_horizon);
1227 do_div(offload_horizon, NSEC_PER_USEC);
1228
1229 if (nla_put_u32(skb, TCA_FQ_PLIMIT,
1230 READ_ONCE(sch->limit)) ||
1231 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT,
1232 READ_ONCE(q->flow_plimit)) ||
1233 nla_put_u32(skb, TCA_FQ_QUANTUM,
1234 READ_ONCE(q->quantum)) ||
1235 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM,
1236 READ_ONCE(q->initial_quantum)) ||
1237 nla_put_u32(skb, TCA_FQ_RATE_ENABLE,
1238 READ_ONCE(q->rate_enable)) ||
1239 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
1240 min_t(unsigned long,
1241 READ_ONCE(q->flow_max_rate), ~0U)) ||
1242 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
1243 jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) ||
1244 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK,
1245 READ_ONCE(q->orphan_mask)) ||
1246 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
1247 READ_ONCE(q->low_rate_threshold)) ||
1248 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
1249 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG,
1250 READ_ONCE(q->fq_trees_log)) ||
1251 nla_put_u32(skb, TCA_FQ_TIMER_SLACK,
1252 READ_ONCE(q->timer_slack)) ||
1253 nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) ||
1254 nla_put_u32(skb, TCA_FQ_OFFLOAD_HORIZON, (u32)offload_horizon) ||
1255 nla_put_u8(skb, TCA_FQ_HORIZON_DROP,
1256 READ_ONCE(q->horizon_drop)))
1257 goto nla_put_failure;
1258
1259 fq_prio2band_decompress_crumb(q->prio2band, prio.priomap);
1260 if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio))
1261 goto nla_put_failure;
1262
1263 weights[0] = READ_ONCE(q->band_flows[0].quantum);
1264 weights[1] = READ_ONCE(q->band_flows[1].quantum);
1265 weights[2] = READ_ONCE(q->band_flows[2].quantum);
1266 if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights))
1267 goto nla_put_failure;
1268
1269 return nla_nest_end(skb, opts);
1270
1271nla_put_failure:
1272 return -1;
1273}
1274
1275static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1276{
1277 struct fq_sched_data *q = qdisc_priv(sch);
1278 struct tc_fq_qd_stats st;
1279 int i;
1280
1281 st.pad = 0;
1282
1283 sch_tree_lock(sch);
1284
1285 st.gc_flows = q->stat_gc_flows;
1286 st.highprio_packets = 0;
1287 st.fastpath_packets = q->internal.stat_fastpath_packets;
1288 st.tcp_retrans = 0;
1289 st.throttled = q->stat_throttled;
1290 st.flows_plimit = q->stat_flows_plimit;
1291 st.pkts_too_long = q->stat_pkts_too_long;
1292 st.allocation_errors = q->stat_allocation_errors;
1293 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack -
1294 ktime_get_ns();
1295 st.flows = q->flows;
1296 st.inactive_flows = q->inactive_flows;
1297 st.throttled_flows = q->throttled_flows;
1298 st.unthrottle_latency_ns = min_t(unsigned long,
1299 q->unthrottle_latency_ns, ~0U);
1300 st.ce_mark = q->stat_ce_mark;
1301 st.horizon_drops = q->stat_horizon_drops;
1302 st.horizon_caps = q->stat_horizon_caps;
1303 for (i = 0; i < FQ_BANDS; i++) {
1304 st.band_drops[i] = q->stat_band_drops[i];
1305 st.band_pkt_count[i] = q->band_pkt_count[i];
1306 }
1307 sch_tree_unlock(sch);
1308
1309 return gnet_stats_copy_app(d, &st, sizeof(st));
1310}
1311
1312static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
1313 .id = "fq",
1314 .priv_size = sizeof(struct fq_sched_data),
1315
1316 .enqueue = fq_enqueue,
1317 .dequeue = fq_dequeue,
1318 .peek = qdisc_peek_dequeued,
1319 .init = fq_init,
1320 .reset = fq_reset,
1321 .destroy = fq_destroy,
1322 .change = fq_change,
1323 .dump = fq_dump,
1324 .dump_stats = fq_dump_stats,
1325 .owner = THIS_MODULE,
1326};
1327MODULE_ALIAS_NET_SCH("fq");
1328
1329static int __init fq_module_init(void)
1330{
1331 int ret;
1332
1333 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
1334 sizeof(struct fq_flow),
1335 0, SLAB_HWCACHE_ALIGN, NULL);
1336 if (!fq_flow_cachep)
1337 return -ENOMEM;
1338
1339 ret = register_qdisc(&fq_qdisc_ops);
1340 if (ret)
1341 kmem_cache_destroy(fq_flow_cachep);
1342 return ret;
1343}
1344
1345static void __exit fq_module_exit(void)
1346{
1347 unregister_qdisc(&fq_qdisc_ops);
1348 kmem_cache_destroy(fq_flow_cachep);
1349}
1350
1351module_init(fq_module_init)
1352module_exit(fq_module_exit)
1353MODULE_AUTHOR("Eric Dumazet");
1354MODULE_LICENSE("GPL");
1355MODULE_DESCRIPTION("Fair Queue Packet Scheduler");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 *
5 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
6 *
7 * Meant to be mostly used for locally generated traffic :
8 * Fast classification depends on skb->sk being set before reaching us.
9 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
10 * All packets belonging to a socket are considered as a 'flow'.
11 *
12 * Flows are dynamically allocated and stored in a hash table of RB trees
13 * They are also part of one Round Robin 'queues' (new or old flows)
14 *
15 * Burst avoidance (aka pacing) capability :
16 *
17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
18 * bunch of packets, and this packet scheduler adds delay between
19 * packets to respect rate limitation.
20 *
21 * enqueue() :
22 * - lookup one RB tree (out of 1024 or more) to find the flow.
23 * If non existent flow, create it, add it to the tree.
24 * Add skb to the per flow list of skb (fifo).
25 * - Use a special fifo for high prio packets
26 *
27 * dequeue() : serves flows in Round Robin
28 * Note : When a flow becomes empty, we do not immediately remove it from
29 * rb trees, for performance reasons (its expected to send additional packets,
30 * or SLAB cache will reuse socket for another flow)
31 */
32
33#include <linux/module.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/jiffies.h>
37#include <linux/string.h>
38#include <linux/in.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/skbuff.h>
42#include <linux/slab.h>
43#include <linux/rbtree.h>
44#include <linux/hash.h>
45#include <linux/prefetch.h>
46#include <linux/vmalloc.h>
47#include <net/netlink.h>
48#include <net/pkt_sched.h>
49#include <net/sock.h>
50#include <net/tcp_states.h>
51#include <net/tcp.h>
52
53struct fq_skb_cb {
54 u64 time_to_send;
55};
56
57static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
58{
59 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
60 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
61}
62
63/*
64 * Per flow structure, dynamically allocated.
65 * If packets have monotically increasing time_to_send, they are placed in O(1)
66 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
67 */
68struct fq_flow {
69 struct rb_root t_root;
70 struct sk_buff *head; /* list of skbs for this flow : first skb */
71 union {
72 struct sk_buff *tail; /* last skb in the list */
73 unsigned long age; /* jiffies when flow was emptied, for gc */
74 };
75 struct rb_node fq_node; /* anchor in fq_root[] trees */
76 struct sock *sk;
77 int qlen; /* number of packets in flow queue */
78 int credit;
79 u32 socket_hash; /* sk_hash */
80 struct fq_flow *next; /* next pointer in RR lists, or &detached */
81
82 struct rb_node rate_node; /* anchor in q->delayed tree */
83 u64 time_next_packet;
84};
85
86struct fq_flow_head {
87 struct fq_flow *first;
88 struct fq_flow *last;
89};
90
91struct fq_sched_data {
92 struct fq_flow_head new_flows;
93
94 struct fq_flow_head old_flows;
95
96 struct rb_root delayed; /* for rate limited flows */
97 u64 time_next_delayed_flow;
98 unsigned long unthrottle_latency_ns;
99
100 struct fq_flow internal; /* for non classified or high prio packets */
101 u32 quantum;
102 u32 initial_quantum;
103 u32 flow_refill_delay;
104 u32 flow_plimit; /* max packets per flow */
105 unsigned long flow_max_rate; /* optional max rate per flow */
106 u64 ce_threshold;
107 u32 orphan_mask; /* mask for orphaned skb */
108 u32 low_rate_threshold;
109 struct rb_root *fq_root;
110 u8 rate_enable;
111 u8 fq_trees_log;
112
113 u32 flows;
114 u32 inactive_flows;
115 u32 throttled_flows;
116
117 u64 stat_gc_flows;
118 u64 stat_internal_packets;
119 u64 stat_throttled;
120 u64 stat_ce_mark;
121 u64 stat_flows_plimit;
122 u64 stat_pkts_too_long;
123 u64 stat_allocation_errors;
124 struct qdisc_watchdog watchdog;
125};
126
127/* special value to mark a detached flow (not on old/new list) */
128static struct fq_flow detached, throttled;
129
130static void fq_flow_set_detached(struct fq_flow *f)
131{
132 f->next = &detached;
133 f->age = jiffies;
134}
135
136static bool fq_flow_is_detached(const struct fq_flow *f)
137{
138 return f->next == &detached;
139}
140
141static bool fq_flow_is_throttled(const struct fq_flow *f)
142{
143 return f->next == &throttled;
144}
145
146static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
147{
148 if (head->first)
149 head->last->next = flow;
150 else
151 head->first = flow;
152 head->last = flow;
153 flow->next = NULL;
154}
155
156static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
157{
158 rb_erase(&f->rate_node, &q->delayed);
159 q->throttled_flows--;
160 fq_flow_add_tail(&q->old_flows, f);
161}
162
163static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
164{
165 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
166
167 while (*p) {
168 struct fq_flow *aux;
169
170 parent = *p;
171 aux = rb_entry(parent, struct fq_flow, rate_node);
172 if (f->time_next_packet >= aux->time_next_packet)
173 p = &parent->rb_right;
174 else
175 p = &parent->rb_left;
176 }
177 rb_link_node(&f->rate_node, parent, p);
178 rb_insert_color(&f->rate_node, &q->delayed);
179 q->throttled_flows++;
180 q->stat_throttled++;
181
182 f->next = &throttled;
183 if (q->time_next_delayed_flow > f->time_next_packet)
184 q->time_next_delayed_flow = f->time_next_packet;
185}
186
187
188static struct kmem_cache *fq_flow_cachep __read_mostly;
189
190
191/* limit number of collected flows per round */
192#define FQ_GC_MAX 8
193#define FQ_GC_AGE (3*HZ)
194
195static bool fq_gc_candidate(const struct fq_flow *f)
196{
197 return fq_flow_is_detached(f) &&
198 time_after(jiffies, f->age + FQ_GC_AGE);
199}
200
201static void fq_gc(struct fq_sched_data *q,
202 struct rb_root *root,
203 struct sock *sk)
204{
205 struct fq_flow *f, *tofree[FQ_GC_MAX];
206 struct rb_node **p, *parent;
207 int fcnt = 0;
208
209 p = &root->rb_node;
210 parent = NULL;
211 while (*p) {
212 parent = *p;
213
214 f = rb_entry(parent, struct fq_flow, fq_node);
215 if (f->sk == sk)
216 break;
217
218 if (fq_gc_candidate(f)) {
219 tofree[fcnt++] = f;
220 if (fcnt == FQ_GC_MAX)
221 break;
222 }
223
224 if (f->sk > sk)
225 p = &parent->rb_right;
226 else
227 p = &parent->rb_left;
228 }
229
230 q->flows -= fcnt;
231 q->inactive_flows -= fcnt;
232 q->stat_gc_flows += fcnt;
233 while (fcnt) {
234 struct fq_flow *f = tofree[--fcnt];
235
236 rb_erase(&f->fq_node, root);
237 kmem_cache_free(fq_flow_cachep, f);
238 }
239}
240
241static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
242{
243 struct rb_node **p, *parent;
244 struct sock *sk = skb->sk;
245 struct rb_root *root;
246 struct fq_flow *f;
247
248 /* warning: no starvation prevention... */
249 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
250 return &q->internal;
251
252 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
253 * or a listener (SYNCOOKIE mode)
254 * 1) request sockets are not full blown,
255 * they do not contain sk_pacing_rate
256 * 2) They are not part of a 'flow' yet
257 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
258 * especially if the listener set SO_MAX_PACING_RATE
259 * 4) We pretend they are orphaned
260 */
261 if (!sk || sk_listener(sk)) {
262 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
263
264 /* By forcing low order bit to 1, we make sure to not
265 * collide with a local flow (socket pointers are word aligned)
266 */
267 sk = (struct sock *)((hash << 1) | 1UL);
268 skb_orphan(skb);
269 } else if (sk->sk_state == TCP_CLOSE) {
270 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
271 /*
272 * Sockets in TCP_CLOSE are non connected.
273 * Typical use case is UDP sockets, they can send packets
274 * with sendto() to many different destinations.
275 * We probably could use a generic bit advertising
276 * non connected sockets, instead of sk_state == TCP_CLOSE,
277 * if we care enough.
278 */
279 sk = (struct sock *)((hash << 1) | 1UL);
280 }
281
282 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
283
284 if (q->flows >= (2U << q->fq_trees_log) &&
285 q->inactive_flows > q->flows/2)
286 fq_gc(q, root, sk);
287
288 p = &root->rb_node;
289 parent = NULL;
290 while (*p) {
291 parent = *p;
292
293 f = rb_entry(parent, struct fq_flow, fq_node);
294 if (f->sk == sk) {
295 /* socket might have been reallocated, so check
296 * if its sk_hash is the same.
297 * It not, we need to refill credit with
298 * initial quantum
299 */
300 if (unlikely(skb->sk == sk &&
301 f->socket_hash != sk->sk_hash)) {
302 f->credit = q->initial_quantum;
303 f->socket_hash = sk->sk_hash;
304 if (fq_flow_is_throttled(f))
305 fq_flow_unset_throttled(q, f);
306 f->time_next_packet = 0ULL;
307 }
308 return f;
309 }
310 if (f->sk > sk)
311 p = &parent->rb_right;
312 else
313 p = &parent->rb_left;
314 }
315
316 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
317 if (unlikely(!f)) {
318 q->stat_allocation_errors++;
319 return &q->internal;
320 }
321 /* f->t_root is already zeroed after kmem_cache_zalloc() */
322
323 fq_flow_set_detached(f);
324 f->sk = sk;
325 if (skb->sk == sk)
326 f->socket_hash = sk->sk_hash;
327 f->credit = q->initial_quantum;
328
329 rb_link_node(&f->fq_node, parent, p);
330 rb_insert_color(&f->fq_node, root);
331
332 q->flows++;
333 q->inactive_flows++;
334 return f;
335}
336
337static struct sk_buff *fq_peek(struct fq_flow *flow)
338{
339 struct sk_buff *skb = skb_rb_first(&flow->t_root);
340 struct sk_buff *head = flow->head;
341
342 if (!skb)
343 return head;
344
345 if (!head)
346 return skb;
347
348 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
349 return skb;
350 return head;
351}
352
353static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
354 struct sk_buff *skb)
355{
356 if (skb == flow->head) {
357 flow->head = skb->next;
358 } else {
359 rb_erase(&skb->rbnode, &flow->t_root);
360 skb->dev = qdisc_dev(sch);
361 }
362}
363
364/* remove one skb from head of flow queue */
365static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
366{
367 struct sk_buff *skb = fq_peek(flow);
368
369 if (skb) {
370 fq_erase_head(sch, flow, skb);
371 skb_mark_not_on_list(skb);
372 flow->qlen--;
373 qdisc_qstats_backlog_dec(sch, skb);
374 sch->q.qlen--;
375 }
376 return skb;
377}
378
379static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
380{
381 struct rb_node **p, *parent;
382 struct sk_buff *head, *aux;
383
384 fq_skb_cb(skb)->time_to_send = skb->tstamp ?: ktime_get_ns();
385
386 head = flow->head;
387 if (!head ||
388 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
389 if (!head)
390 flow->head = skb;
391 else
392 flow->tail->next = skb;
393 flow->tail = skb;
394 skb->next = NULL;
395 return;
396 }
397
398 p = &flow->t_root.rb_node;
399 parent = NULL;
400
401 while (*p) {
402 parent = *p;
403 aux = rb_to_skb(parent);
404 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
405 p = &parent->rb_right;
406 else
407 p = &parent->rb_left;
408 }
409 rb_link_node(&skb->rbnode, parent, p);
410 rb_insert_color(&skb->rbnode, &flow->t_root);
411}
412
413static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
414 struct sk_buff **to_free)
415{
416 struct fq_sched_data *q = qdisc_priv(sch);
417 struct fq_flow *f;
418
419 if (unlikely(sch->q.qlen >= sch->limit))
420 return qdisc_drop(skb, sch, to_free);
421
422 f = fq_classify(skb, q);
423 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
424 q->stat_flows_plimit++;
425 return qdisc_drop(skb, sch, to_free);
426 }
427
428 f->qlen++;
429 qdisc_qstats_backlog_inc(sch, skb);
430 if (fq_flow_is_detached(f)) {
431 struct sock *sk = skb->sk;
432
433 fq_flow_add_tail(&q->new_flows, f);
434 if (time_after(jiffies, f->age + q->flow_refill_delay))
435 f->credit = max_t(u32, f->credit, q->quantum);
436 if (sk && q->rate_enable) {
437 if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
438 SK_PACING_FQ))
439 smp_store_release(&sk->sk_pacing_status,
440 SK_PACING_FQ);
441 }
442 q->inactive_flows--;
443 }
444
445 /* Note: this overwrites f->age */
446 flow_queue_add(f, skb);
447
448 if (unlikely(f == &q->internal)) {
449 q->stat_internal_packets++;
450 }
451 sch->q.qlen++;
452
453 return NET_XMIT_SUCCESS;
454}
455
456static void fq_check_throttled(struct fq_sched_data *q, u64 now)
457{
458 unsigned long sample;
459 struct rb_node *p;
460
461 if (q->time_next_delayed_flow > now)
462 return;
463
464 /* Update unthrottle latency EWMA.
465 * This is cheap and can help diagnosing timer/latency problems.
466 */
467 sample = (unsigned long)(now - q->time_next_delayed_flow);
468 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
469 q->unthrottle_latency_ns += sample >> 3;
470
471 q->time_next_delayed_flow = ~0ULL;
472 while ((p = rb_first(&q->delayed)) != NULL) {
473 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
474
475 if (f->time_next_packet > now) {
476 q->time_next_delayed_flow = f->time_next_packet;
477 break;
478 }
479 fq_flow_unset_throttled(q, f);
480 }
481}
482
483static struct sk_buff *fq_dequeue(struct Qdisc *sch)
484{
485 struct fq_sched_data *q = qdisc_priv(sch);
486 struct fq_flow_head *head;
487 struct sk_buff *skb;
488 struct fq_flow *f;
489 unsigned long rate;
490 u32 plen;
491 u64 now;
492
493 if (!sch->q.qlen)
494 return NULL;
495
496 skb = fq_dequeue_head(sch, &q->internal);
497 if (skb)
498 goto out;
499
500 now = ktime_get_ns();
501 fq_check_throttled(q, now);
502begin:
503 head = &q->new_flows;
504 if (!head->first) {
505 head = &q->old_flows;
506 if (!head->first) {
507 if (q->time_next_delayed_flow != ~0ULL)
508 qdisc_watchdog_schedule_ns(&q->watchdog,
509 q->time_next_delayed_flow);
510 return NULL;
511 }
512 }
513 f = head->first;
514
515 if (f->credit <= 0) {
516 f->credit += q->quantum;
517 head->first = f->next;
518 fq_flow_add_tail(&q->old_flows, f);
519 goto begin;
520 }
521
522 skb = fq_peek(f);
523 if (skb) {
524 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
525 f->time_next_packet);
526
527 if (now < time_next_packet) {
528 head->first = f->next;
529 f->time_next_packet = time_next_packet;
530 fq_flow_set_throttled(q, f);
531 goto begin;
532 }
533 if (time_next_packet &&
534 (s64)(now - time_next_packet - q->ce_threshold) > 0) {
535 INET_ECN_set_ce(skb);
536 q->stat_ce_mark++;
537 }
538 }
539
540 skb = fq_dequeue_head(sch, f);
541 if (!skb) {
542 head->first = f->next;
543 /* force a pass through old_flows to prevent starvation */
544 if ((head == &q->new_flows) && q->old_flows.first) {
545 fq_flow_add_tail(&q->old_flows, f);
546 } else {
547 fq_flow_set_detached(f);
548 q->inactive_flows++;
549 }
550 goto begin;
551 }
552 prefetch(&skb->end);
553 plen = qdisc_pkt_len(skb);
554 f->credit -= plen;
555
556 if (!q->rate_enable)
557 goto out;
558
559 rate = q->flow_max_rate;
560
561 /* If EDT time was provided for this skb, we need to
562 * update f->time_next_packet only if this qdisc enforces
563 * a flow max rate.
564 */
565 if (!skb->tstamp) {
566 if (skb->sk)
567 rate = min(skb->sk->sk_pacing_rate, rate);
568
569 if (rate <= q->low_rate_threshold) {
570 f->credit = 0;
571 } else {
572 plen = max(plen, q->quantum);
573 if (f->credit > 0)
574 goto out;
575 }
576 }
577 if (rate != ~0UL) {
578 u64 len = (u64)plen * NSEC_PER_SEC;
579
580 if (likely(rate))
581 len = div64_ul(len, rate);
582 /* Since socket rate can change later,
583 * clamp the delay to 1 second.
584 * Really, providers of too big packets should be fixed !
585 */
586 if (unlikely(len > NSEC_PER_SEC)) {
587 len = NSEC_PER_SEC;
588 q->stat_pkts_too_long++;
589 }
590 /* Account for schedule/timers drifts.
591 * f->time_next_packet was set when prior packet was sent,
592 * and current time (@now) can be too late by tens of us.
593 */
594 if (f->time_next_packet)
595 len -= min(len/2, now - f->time_next_packet);
596 f->time_next_packet = now + len;
597 }
598out:
599 qdisc_bstats_update(sch, skb);
600 return skb;
601}
602
603static void fq_flow_purge(struct fq_flow *flow)
604{
605 struct rb_node *p = rb_first(&flow->t_root);
606
607 while (p) {
608 struct sk_buff *skb = rb_to_skb(p);
609
610 p = rb_next(p);
611 rb_erase(&skb->rbnode, &flow->t_root);
612 rtnl_kfree_skbs(skb, skb);
613 }
614 rtnl_kfree_skbs(flow->head, flow->tail);
615 flow->head = NULL;
616 flow->qlen = 0;
617}
618
619static void fq_reset(struct Qdisc *sch)
620{
621 struct fq_sched_data *q = qdisc_priv(sch);
622 struct rb_root *root;
623 struct rb_node *p;
624 struct fq_flow *f;
625 unsigned int idx;
626
627 sch->q.qlen = 0;
628 sch->qstats.backlog = 0;
629
630 fq_flow_purge(&q->internal);
631
632 if (!q->fq_root)
633 return;
634
635 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
636 root = &q->fq_root[idx];
637 while ((p = rb_first(root)) != NULL) {
638 f = rb_entry(p, struct fq_flow, fq_node);
639 rb_erase(p, root);
640
641 fq_flow_purge(f);
642
643 kmem_cache_free(fq_flow_cachep, f);
644 }
645 }
646 q->new_flows.first = NULL;
647 q->old_flows.first = NULL;
648 q->delayed = RB_ROOT;
649 q->flows = 0;
650 q->inactive_flows = 0;
651 q->throttled_flows = 0;
652}
653
654static void fq_rehash(struct fq_sched_data *q,
655 struct rb_root *old_array, u32 old_log,
656 struct rb_root *new_array, u32 new_log)
657{
658 struct rb_node *op, **np, *parent;
659 struct rb_root *oroot, *nroot;
660 struct fq_flow *of, *nf;
661 int fcnt = 0;
662 u32 idx;
663
664 for (idx = 0; idx < (1U << old_log); idx++) {
665 oroot = &old_array[idx];
666 while ((op = rb_first(oroot)) != NULL) {
667 rb_erase(op, oroot);
668 of = rb_entry(op, struct fq_flow, fq_node);
669 if (fq_gc_candidate(of)) {
670 fcnt++;
671 kmem_cache_free(fq_flow_cachep, of);
672 continue;
673 }
674 nroot = &new_array[hash_ptr(of->sk, new_log)];
675
676 np = &nroot->rb_node;
677 parent = NULL;
678 while (*np) {
679 parent = *np;
680
681 nf = rb_entry(parent, struct fq_flow, fq_node);
682 BUG_ON(nf->sk == of->sk);
683
684 if (nf->sk > of->sk)
685 np = &parent->rb_right;
686 else
687 np = &parent->rb_left;
688 }
689
690 rb_link_node(&of->fq_node, parent, np);
691 rb_insert_color(&of->fq_node, nroot);
692 }
693 }
694 q->flows -= fcnt;
695 q->inactive_flows -= fcnt;
696 q->stat_gc_flows += fcnt;
697}
698
699static void fq_free(void *addr)
700{
701 kvfree(addr);
702}
703
704static int fq_resize(struct Qdisc *sch, u32 log)
705{
706 struct fq_sched_data *q = qdisc_priv(sch);
707 struct rb_root *array;
708 void *old_fq_root;
709 u32 idx;
710
711 if (q->fq_root && log == q->fq_trees_log)
712 return 0;
713
714 /* If XPS was setup, we can allocate memory on right NUMA node */
715 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
716 netdev_queue_numa_node_read(sch->dev_queue));
717 if (!array)
718 return -ENOMEM;
719
720 for (idx = 0; idx < (1U << log); idx++)
721 array[idx] = RB_ROOT;
722
723 sch_tree_lock(sch);
724
725 old_fq_root = q->fq_root;
726 if (old_fq_root)
727 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
728
729 q->fq_root = array;
730 q->fq_trees_log = log;
731
732 sch_tree_unlock(sch);
733
734 fq_free(old_fq_root);
735
736 return 0;
737}
738
739static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
740 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
741 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
742 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
743 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
744 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
745 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
746 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
747 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
748 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
749 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
750 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
751};
752
753static int fq_change(struct Qdisc *sch, struct nlattr *opt,
754 struct netlink_ext_ack *extack)
755{
756 struct fq_sched_data *q = qdisc_priv(sch);
757 struct nlattr *tb[TCA_FQ_MAX + 1];
758 int err, drop_count = 0;
759 unsigned drop_len = 0;
760 u32 fq_log;
761
762 if (!opt)
763 return -EINVAL;
764
765 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
766 NULL);
767 if (err < 0)
768 return err;
769
770 sch_tree_lock(sch);
771
772 fq_log = q->fq_trees_log;
773
774 if (tb[TCA_FQ_BUCKETS_LOG]) {
775 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
776
777 if (nval >= 1 && nval <= ilog2(256*1024))
778 fq_log = nval;
779 else
780 err = -EINVAL;
781 }
782 if (tb[TCA_FQ_PLIMIT])
783 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
784
785 if (tb[TCA_FQ_FLOW_PLIMIT])
786 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
787
788 if (tb[TCA_FQ_QUANTUM]) {
789 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
790
791 if (quantum > 0)
792 q->quantum = quantum;
793 else
794 err = -EINVAL;
795 }
796
797 if (tb[TCA_FQ_INITIAL_QUANTUM])
798 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
799
800 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
801 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
802 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
803
804 if (tb[TCA_FQ_FLOW_MAX_RATE]) {
805 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
806
807 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
808 }
809 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
810 q->low_rate_threshold =
811 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
812
813 if (tb[TCA_FQ_RATE_ENABLE]) {
814 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
815
816 if (enable <= 1)
817 q->rate_enable = enable;
818 else
819 err = -EINVAL;
820 }
821
822 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
823 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
824
825 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
826 }
827
828 if (tb[TCA_FQ_ORPHAN_MASK])
829 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
830
831 if (tb[TCA_FQ_CE_THRESHOLD])
832 q->ce_threshold = (u64)NSEC_PER_USEC *
833 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
834
835 if (!err) {
836 sch_tree_unlock(sch);
837 err = fq_resize(sch, fq_log);
838 sch_tree_lock(sch);
839 }
840 while (sch->q.qlen > sch->limit) {
841 struct sk_buff *skb = fq_dequeue(sch);
842
843 if (!skb)
844 break;
845 drop_len += qdisc_pkt_len(skb);
846 rtnl_kfree_skbs(skb, skb);
847 drop_count++;
848 }
849 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
850
851 sch_tree_unlock(sch);
852 return err;
853}
854
855static void fq_destroy(struct Qdisc *sch)
856{
857 struct fq_sched_data *q = qdisc_priv(sch);
858
859 fq_reset(sch);
860 fq_free(q->fq_root);
861 qdisc_watchdog_cancel(&q->watchdog);
862}
863
864static int fq_init(struct Qdisc *sch, struct nlattr *opt,
865 struct netlink_ext_ack *extack)
866{
867 struct fq_sched_data *q = qdisc_priv(sch);
868 int err;
869
870 sch->limit = 10000;
871 q->flow_plimit = 100;
872 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
873 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
874 q->flow_refill_delay = msecs_to_jiffies(40);
875 q->flow_max_rate = ~0UL;
876 q->time_next_delayed_flow = ~0ULL;
877 q->rate_enable = 1;
878 q->new_flows.first = NULL;
879 q->old_flows.first = NULL;
880 q->delayed = RB_ROOT;
881 q->fq_root = NULL;
882 q->fq_trees_log = ilog2(1024);
883 q->orphan_mask = 1024 - 1;
884 q->low_rate_threshold = 550000 / 8;
885
886 /* Default ce_threshold of 4294 seconds */
887 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
888
889 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
890
891 if (opt)
892 err = fq_change(sch, opt, extack);
893 else
894 err = fq_resize(sch, q->fq_trees_log);
895
896 return err;
897}
898
899static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
900{
901 struct fq_sched_data *q = qdisc_priv(sch);
902 u64 ce_threshold = q->ce_threshold;
903 struct nlattr *opts;
904
905 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
906 if (opts == NULL)
907 goto nla_put_failure;
908
909 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
910
911 do_div(ce_threshold, NSEC_PER_USEC);
912
913 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
914 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
915 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
916 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
917 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
918 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
919 min_t(unsigned long, q->flow_max_rate, ~0U)) ||
920 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
921 jiffies_to_usecs(q->flow_refill_delay)) ||
922 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
923 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
924 q->low_rate_threshold) ||
925 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
926 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
927 goto nla_put_failure;
928
929 return nla_nest_end(skb, opts);
930
931nla_put_failure:
932 return -1;
933}
934
935static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
936{
937 struct fq_sched_data *q = qdisc_priv(sch);
938 struct tc_fq_qd_stats st;
939
940 sch_tree_lock(sch);
941
942 st.gc_flows = q->stat_gc_flows;
943 st.highprio_packets = q->stat_internal_packets;
944 st.tcp_retrans = 0;
945 st.throttled = q->stat_throttled;
946 st.flows_plimit = q->stat_flows_plimit;
947 st.pkts_too_long = q->stat_pkts_too_long;
948 st.allocation_errors = q->stat_allocation_errors;
949 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
950 st.flows = q->flows;
951 st.inactive_flows = q->inactive_flows;
952 st.throttled_flows = q->throttled_flows;
953 st.unthrottle_latency_ns = min_t(unsigned long,
954 q->unthrottle_latency_ns, ~0U);
955 st.ce_mark = q->stat_ce_mark;
956 sch_tree_unlock(sch);
957
958 return gnet_stats_copy_app(d, &st, sizeof(st));
959}
960
961static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
962 .id = "fq",
963 .priv_size = sizeof(struct fq_sched_data),
964
965 .enqueue = fq_enqueue,
966 .dequeue = fq_dequeue,
967 .peek = qdisc_peek_dequeued,
968 .init = fq_init,
969 .reset = fq_reset,
970 .destroy = fq_destroy,
971 .change = fq_change,
972 .dump = fq_dump,
973 .dump_stats = fq_dump_stats,
974 .owner = THIS_MODULE,
975};
976
977static int __init fq_module_init(void)
978{
979 int ret;
980
981 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
982 sizeof(struct fq_flow),
983 0, 0, NULL);
984 if (!fq_flow_cachep)
985 return -ENOMEM;
986
987 ret = register_qdisc(&fq_qdisc_ops);
988 if (ret)
989 kmem_cache_destroy(fq_flow_cachep);
990 return ret;
991}
992
993static void __exit fq_module_exit(void)
994{
995 unregister_qdisc(&fq_qdisc_ops);
996 kmem_cache_destroy(fq_flow_cachep);
997}
998
999module_init(fq_module_init)
1000module_exit(fq_module_exit)
1001MODULE_AUTHOR("Eric Dumazet");
1002MODULE_LICENSE("GPL");