Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 *
5 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
6 *
7 * Meant to be mostly used for locally generated traffic :
8 * Fast classification depends on skb->sk being set before reaching us.
9 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
10 * All packets belonging to a socket are considered as a 'flow'.
11 *
12 * Flows are dynamically allocated and stored in a hash table of RB trees
13 * They are also part of one Round Robin 'queues' (new or old flows)
14 *
15 * Burst avoidance (aka pacing) capability :
16 *
17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
18 * bunch of packets, and this packet scheduler adds delay between
19 * packets to respect rate limitation.
20 *
21 * enqueue() :
22 * - lookup one RB tree (out of 1024 or more) to find the flow.
23 * If non existent flow, create it, add it to the tree.
24 * Add skb to the per flow list of skb (fifo).
25 * - Use a special fifo for high prio packets
26 *
27 * dequeue() : serves flows in Round Robin
28 * Note : When a flow becomes empty, we do not immediately remove it from
29 * rb trees, for performance reasons (its expected to send additional packets,
30 * or SLAB cache will reuse socket for another flow)
31 */
32
33#include <linux/module.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/jiffies.h>
37#include <linux/string.h>
38#include <linux/in.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/skbuff.h>
42#include <linux/slab.h>
43#include <linux/rbtree.h>
44#include <linux/hash.h>
45#include <linux/prefetch.h>
46#include <linux/vmalloc.h>
47#include <net/netlink.h>
48#include <net/pkt_sched.h>
49#include <net/sock.h>
50#include <net/tcp_states.h>
51#include <net/tcp.h>
52
53struct fq_skb_cb {
54 u64 time_to_send;
55};
56
57static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
58{
59 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
60 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
61}
62
63/*
64 * Per flow structure, dynamically allocated.
65 * If packets have monotically increasing time_to_send, they are placed in O(1)
66 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
67 */
68struct fq_flow {
69/* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */
70 struct rb_root t_root;
71 struct sk_buff *head; /* list of skbs for this flow : first skb */
72 union {
73 struct sk_buff *tail; /* last skb in the list */
74 unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */
75 };
76 struct rb_node fq_node; /* anchor in fq_root[] trees */
77 struct sock *sk;
78 u32 socket_hash; /* sk_hash */
79 int qlen; /* number of packets in flow queue */
80
81/* Second cache line, used in fq_dequeue() */
82 int credit;
83 /* 32bit hole on 64bit arches */
84
85 struct fq_flow *next; /* next pointer in RR lists */
86
87 struct rb_node rate_node; /* anchor in q->delayed tree */
88 u64 time_next_packet;
89} ____cacheline_aligned_in_smp;
90
91struct fq_flow_head {
92 struct fq_flow *first;
93 struct fq_flow *last;
94};
95
96struct fq_sched_data {
97 struct fq_flow_head new_flows;
98
99 struct fq_flow_head old_flows;
100
101 struct rb_root delayed; /* for rate limited flows */
102 u64 time_next_delayed_flow;
103 u64 ktime_cache; /* copy of last ktime_get_ns() */
104 unsigned long unthrottle_latency_ns;
105
106 struct fq_flow internal; /* for non classified or high prio packets */
107 u32 quantum;
108 u32 initial_quantum;
109 u32 flow_refill_delay;
110 u32 flow_plimit; /* max packets per flow */
111 unsigned long flow_max_rate; /* optional max rate per flow */
112 u64 ce_threshold;
113 u64 horizon; /* horizon in ns */
114 u32 orphan_mask; /* mask for orphaned skb */
115 u32 low_rate_threshold;
116 struct rb_root *fq_root;
117 u8 rate_enable;
118 u8 fq_trees_log;
119 u8 horizon_drop;
120 u32 flows;
121 u32 inactive_flows;
122 u32 throttled_flows;
123
124 u64 stat_gc_flows;
125 u64 stat_internal_packets;
126 u64 stat_throttled;
127 u64 stat_ce_mark;
128 u64 stat_horizon_drops;
129 u64 stat_horizon_caps;
130 u64 stat_flows_plimit;
131 u64 stat_pkts_too_long;
132 u64 stat_allocation_errors;
133
134 u32 timer_slack; /* hrtimer slack in ns */
135 struct qdisc_watchdog watchdog;
136};
137
138/*
139 * f->tail and f->age share the same location.
140 * We can use the low order bit to differentiate if this location points
141 * to a sk_buff or contains a jiffies value, if we force this value to be odd.
142 * This assumes f->tail low order bit must be 0 since alignof(struct sk_buff) >= 2
143 */
144static void fq_flow_set_detached(struct fq_flow *f)
145{
146 f->age = jiffies | 1UL;
147}
148
149static bool fq_flow_is_detached(const struct fq_flow *f)
150{
151 return !!(f->age & 1UL);
152}
153
154/* special value to mark a throttled flow (not on old/new list) */
155static struct fq_flow throttled;
156
157static bool fq_flow_is_throttled(const struct fq_flow *f)
158{
159 return f->next == &throttled;
160}
161
162static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
163{
164 if (head->first)
165 head->last->next = flow;
166 else
167 head->first = flow;
168 head->last = flow;
169 flow->next = NULL;
170}
171
172static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
173{
174 rb_erase(&f->rate_node, &q->delayed);
175 q->throttled_flows--;
176 fq_flow_add_tail(&q->old_flows, f);
177}
178
179static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
180{
181 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
182
183 while (*p) {
184 struct fq_flow *aux;
185
186 parent = *p;
187 aux = rb_entry(parent, struct fq_flow, rate_node);
188 if (f->time_next_packet >= aux->time_next_packet)
189 p = &parent->rb_right;
190 else
191 p = &parent->rb_left;
192 }
193 rb_link_node(&f->rate_node, parent, p);
194 rb_insert_color(&f->rate_node, &q->delayed);
195 q->throttled_flows++;
196 q->stat_throttled++;
197
198 f->next = &throttled;
199 if (q->time_next_delayed_flow > f->time_next_packet)
200 q->time_next_delayed_flow = f->time_next_packet;
201}
202
203
204static struct kmem_cache *fq_flow_cachep __read_mostly;
205
206
207/* limit number of collected flows per round */
208#define FQ_GC_MAX 8
209#define FQ_GC_AGE (3*HZ)
210
211static bool fq_gc_candidate(const struct fq_flow *f)
212{
213 return fq_flow_is_detached(f) &&
214 time_after(jiffies, f->age + FQ_GC_AGE);
215}
216
217static void fq_gc(struct fq_sched_data *q,
218 struct rb_root *root,
219 struct sock *sk)
220{
221 struct rb_node **p, *parent;
222 void *tofree[FQ_GC_MAX];
223 struct fq_flow *f;
224 int i, fcnt = 0;
225
226 p = &root->rb_node;
227 parent = NULL;
228 while (*p) {
229 parent = *p;
230
231 f = rb_entry(parent, struct fq_flow, fq_node);
232 if (f->sk == sk)
233 break;
234
235 if (fq_gc_candidate(f)) {
236 tofree[fcnt++] = f;
237 if (fcnt == FQ_GC_MAX)
238 break;
239 }
240
241 if (f->sk > sk)
242 p = &parent->rb_right;
243 else
244 p = &parent->rb_left;
245 }
246
247 if (!fcnt)
248 return;
249
250 for (i = fcnt; i > 0; ) {
251 f = tofree[--i];
252 rb_erase(&f->fq_node, root);
253 }
254 q->flows -= fcnt;
255 q->inactive_flows -= fcnt;
256 q->stat_gc_flows += fcnt;
257
258 kmem_cache_free_bulk(fq_flow_cachep, fcnt, tofree);
259}
260
261static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
262{
263 struct rb_node **p, *parent;
264 struct sock *sk = skb->sk;
265 struct rb_root *root;
266 struct fq_flow *f;
267
268 /* warning: no starvation prevention... */
269 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
270 return &q->internal;
271
272 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
273 * or a listener (SYNCOOKIE mode)
274 * 1) request sockets are not full blown,
275 * they do not contain sk_pacing_rate
276 * 2) They are not part of a 'flow' yet
277 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
278 * especially if the listener set SO_MAX_PACING_RATE
279 * 4) We pretend they are orphaned
280 */
281 if (!sk || sk_listener(sk)) {
282 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
283
284 /* By forcing low order bit to 1, we make sure to not
285 * collide with a local flow (socket pointers are word aligned)
286 */
287 sk = (struct sock *)((hash << 1) | 1UL);
288 skb_orphan(skb);
289 } else if (sk->sk_state == TCP_CLOSE) {
290 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
291 /*
292 * Sockets in TCP_CLOSE are non connected.
293 * Typical use case is UDP sockets, they can send packets
294 * with sendto() to many different destinations.
295 * We probably could use a generic bit advertising
296 * non connected sockets, instead of sk_state == TCP_CLOSE,
297 * if we care enough.
298 */
299 sk = (struct sock *)((hash << 1) | 1UL);
300 }
301
302 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
303
304 if (q->flows >= (2U << q->fq_trees_log) &&
305 q->inactive_flows > q->flows/2)
306 fq_gc(q, root, sk);
307
308 p = &root->rb_node;
309 parent = NULL;
310 while (*p) {
311 parent = *p;
312
313 f = rb_entry(parent, struct fq_flow, fq_node);
314 if (f->sk == sk) {
315 /* socket might have been reallocated, so check
316 * if its sk_hash is the same.
317 * It not, we need to refill credit with
318 * initial quantum
319 */
320 if (unlikely(skb->sk == sk &&
321 f->socket_hash != sk->sk_hash)) {
322 f->credit = q->initial_quantum;
323 f->socket_hash = sk->sk_hash;
324 if (q->rate_enable)
325 smp_store_release(&sk->sk_pacing_status,
326 SK_PACING_FQ);
327 if (fq_flow_is_throttled(f))
328 fq_flow_unset_throttled(q, f);
329 f->time_next_packet = 0ULL;
330 }
331 return f;
332 }
333 if (f->sk > sk)
334 p = &parent->rb_right;
335 else
336 p = &parent->rb_left;
337 }
338
339 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
340 if (unlikely(!f)) {
341 q->stat_allocation_errors++;
342 return &q->internal;
343 }
344 /* f->t_root is already zeroed after kmem_cache_zalloc() */
345
346 fq_flow_set_detached(f);
347 f->sk = sk;
348 if (skb->sk == sk) {
349 f->socket_hash = sk->sk_hash;
350 if (q->rate_enable)
351 smp_store_release(&sk->sk_pacing_status,
352 SK_PACING_FQ);
353 }
354 f->credit = q->initial_quantum;
355
356 rb_link_node(&f->fq_node, parent, p);
357 rb_insert_color(&f->fq_node, root);
358
359 q->flows++;
360 q->inactive_flows++;
361 return f;
362}
363
364static struct sk_buff *fq_peek(struct fq_flow *flow)
365{
366 struct sk_buff *skb = skb_rb_first(&flow->t_root);
367 struct sk_buff *head = flow->head;
368
369 if (!skb)
370 return head;
371
372 if (!head)
373 return skb;
374
375 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
376 return skb;
377 return head;
378}
379
380static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
381 struct sk_buff *skb)
382{
383 if (skb == flow->head) {
384 flow->head = skb->next;
385 } else {
386 rb_erase(&skb->rbnode, &flow->t_root);
387 skb->dev = qdisc_dev(sch);
388 }
389}
390
391/* Remove one skb from flow queue.
392 * This skb must be the return value of prior fq_peek().
393 */
394static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
395 struct sk_buff *skb)
396{
397 fq_erase_head(sch, flow, skb);
398 skb_mark_not_on_list(skb);
399 flow->qlen--;
400 qdisc_qstats_backlog_dec(sch, skb);
401 sch->q.qlen--;
402}
403
404static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
405{
406 struct rb_node **p, *parent;
407 struct sk_buff *head, *aux;
408
409 head = flow->head;
410 if (!head ||
411 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
412 if (!head)
413 flow->head = skb;
414 else
415 flow->tail->next = skb;
416 flow->tail = skb;
417 skb->next = NULL;
418 return;
419 }
420
421 p = &flow->t_root.rb_node;
422 parent = NULL;
423
424 while (*p) {
425 parent = *p;
426 aux = rb_to_skb(parent);
427 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
428 p = &parent->rb_right;
429 else
430 p = &parent->rb_left;
431 }
432 rb_link_node(&skb->rbnode, parent, p);
433 rb_insert_color(&skb->rbnode, &flow->t_root);
434}
435
436static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
437 const struct fq_sched_data *q)
438{
439 return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon));
440}
441
442static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
443 struct sk_buff **to_free)
444{
445 struct fq_sched_data *q = qdisc_priv(sch);
446 struct fq_flow *f;
447
448 if (unlikely(sch->q.qlen >= sch->limit))
449 return qdisc_drop(skb, sch, to_free);
450
451 if (!skb->tstamp) {
452 fq_skb_cb(skb)->time_to_send = q->ktime_cache = ktime_get_ns();
453 } else {
454 /* Check if packet timestamp is too far in the future.
455 * Try first if our cached value, to avoid ktime_get_ns()
456 * cost in most cases.
457 */
458 if (fq_packet_beyond_horizon(skb, q)) {
459 /* Refresh our cache and check another time */
460 q->ktime_cache = ktime_get_ns();
461 if (fq_packet_beyond_horizon(skb, q)) {
462 if (q->horizon_drop) {
463 q->stat_horizon_drops++;
464 return qdisc_drop(skb, sch, to_free);
465 }
466 q->stat_horizon_caps++;
467 skb->tstamp = q->ktime_cache + q->horizon;
468 }
469 }
470 fq_skb_cb(skb)->time_to_send = skb->tstamp;
471 }
472
473 f = fq_classify(skb, q);
474 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
475 q->stat_flows_plimit++;
476 return qdisc_drop(skb, sch, to_free);
477 }
478
479 f->qlen++;
480 qdisc_qstats_backlog_inc(sch, skb);
481 if (fq_flow_is_detached(f)) {
482 fq_flow_add_tail(&q->new_flows, f);
483 if (time_after(jiffies, f->age + q->flow_refill_delay))
484 f->credit = max_t(u32, f->credit, q->quantum);
485 q->inactive_flows--;
486 }
487
488 /* Note: this overwrites f->age */
489 flow_queue_add(f, skb);
490
491 if (unlikely(f == &q->internal)) {
492 q->stat_internal_packets++;
493 }
494 sch->q.qlen++;
495
496 return NET_XMIT_SUCCESS;
497}
498
499static void fq_check_throttled(struct fq_sched_data *q, u64 now)
500{
501 unsigned long sample;
502 struct rb_node *p;
503
504 if (q->time_next_delayed_flow > now)
505 return;
506
507 /* Update unthrottle latency EWMA.
508 * This is cheap and can help diagnosing timer/latency problems.
509 */
510 sample = (unsigned long)(now - q->time_next_delayed_flow);
511 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
512 q->unthrottle_latency_ns += sample >> 3;
513
514 q->time_next_delayed_flow = ~0ULL;
515 while ((p = rb_first(&q->delayed)) != NULL) {
516 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
517
518 if (f->time_next_packet > now) {
519 q->time_next_delayed_flow = f->time_next_packet;
520 break;
521 }
522 fq_flow_unset_throttled(q, f);
523 }
524}
525
526static struct sk_buff *fq_dequeue(struct Qdisc *sch)
527{
528 struct fq_sched_data *q = qdisc_priv(sch);
529 struct fq_flow_head *head;
530 struct sk_buff *skb;
531 struct fq_flow *f;
532 unsigned long rate;
533 u32 plen;
534 u64 now;
535
536 if (!sch->q.qlen)
537 return NULL;
538
539 skb = fq_peek(&q->internal);
540 if (unlikely(skb)) {
541 fq_dequeue_skb(sch, &q->internal, skb);
542 goto out;
543 }
544
545 q->ktime_cache = now = ktime_get_ns();
546 fq_check_throttled(q, now);
547begin:
548 head = &q->new_flows;
549 if (!head->first) {
550 head = &q->old_flows;
551 if (!head->first) {
552 if (q->time_next_delayed_flow != ~0ULL)
553 qdisc_watchdog_schedule_range_ns(&q->watchdog,
554 q->time_next_delayed_flow,
555 q->timer_slack);
556 return NULL;
557 }
558 }
559 f = head->first;
560
561 if (f->credit <= 0) {
562 f->credit += q->quantum;
563 head->first = f->next;
564 fq_flow_add_tail(&q->old_flows, f);
565 goto begin;
566 }
567
568 skb = fq_peek(f);
569 if (skb) {
570 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
571 f->time_next_packet);
572
573 if (now < time_next_packet) {
574 head->first = f->next;
575 f->time_next_packet = time_next_packet;
576 fq_flow_set_throttled(q, f);
577 goto begin;
578 }
579 prefetch(&skb->end);
580 if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
581 INET_ECN_set_ce(skb);
582 q->stat_ce_mark++;
583 }
584 fq_dequeue_skb(sch, f, skb);
585 } else {
586 head->first = f->next;
587 /* force a pass through old_flows to prevent starvation */
588 if ((head == &q->new_flows) && q->old_flows.first) {
589 fq_flow_add_tail(&q->old_flows, f);
590 } else {
591 fq_flow_set_detached(f);
592 q->inactive_flows++;
593 }
594 goto begin;
595 }
596 plen = qdisc_pkt_len(skb);
597 f->credit -= plen;
598
599 if (!q->rate_enable)
600 goto out;
601
602 rate = q->flow_max_rate;
603
604 /* If EDT time was provided for this skb, we need to
605 * update f->time_next_packet only if this qdisc enforces
606 * a flow max rate.
607 */
608 if (!skb->tstamp) {
609 if (skb->sk)
610 rate = min(skb->sk->sk_pacing_rate, rate);
611
612 if (rate <= q->low_rate_threshold) {
613 f->credit = 0;
614 } else {
615 plen = max(plen, q->quantum);
616 if (f->credit > 0)
617 goto out;
618 }
619 }
620 if (rate != ~0UL) {
621 u64 len = (u64)plen * NSEC_PER_SEC;
622
623 if (likely(rate))
624 len = div64_ul(len, rate);
625 /* Since socket rate can change later,
626 * clamp the delay to 1 second.
627 * Really, providers of too big packets should be fixed !
628 */
629 if (unlikely(len > NSEC_PER_SEC)) {
630 len = NSEC_PER_SEC;
631 q->stat_pkts_too_long++;
632 }
633 /* Account for schedule/timers drifts.
634 * f->time_next_packet was set when prior packet was sent,
635 * and current time (@now) can be too late by tens of us.
636 */
637 if (f->time_next_packet)
638 len -= min(len/2, now - f->time_next_packet);
639 f->time_next_packet = now + len;
640 }
641out:
642 qdisc_bstats_update(sch, skb);
643 return skb;
644}
645
646static void fq_flow_purge(struct fq_flow *flow)
647{
648 struct rb_node *p = rb_first(&flow->t_root);
649
650 while (p) {
651 struct sk_buff *skb = rb_to_skb(p);
652
653 p = rb_next(p);
654 rb_erase(&skb->rbnode, &flow->t_root);
655 rtnl_kfree_skbs(skb, skb);
656 }
657 rtnl_kfree_skbs(flow->head, flow->tail);
658 flow->head = NULL;
659 flow->qlen = 0;
660}
661
662static void fq_reset(struct Qdisc *sch)
663{
664 struct fq_sched_data *q = qdisc_priv(sch);
665 struct rb_root *root;
666 struct rb_node *p;
667 struct fq_flow *f;
668 unsigned int idx;
669
670 sch->q.qlen = 0;
671 sch->qstats.backlog = 0;
672
673 fq_flow_purge(&q->internal);
674
675 if (!q->fq_root)
676 return;
677
678 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
679 root = &q->fq_root[idx];
680 while ((p = rb_first(root)) != NULL) {
681 f = rb_entry(p, struct fq_flow, fq_node);
682 rb_erase(p, root);
683
684 fq_flow_purge(f);
685
686 kmem_cache_free(fq_flow_cachep, f);
687 }
688 }
689 q->new_flows.first = NULL;
690 q->old_flows.first = NULL;
691 q->delayed = RB_ROOT;
692 q->flows = 0;
693 q->inactive_flows = 0;
694 q->throttled_flows = 0;
695}
696
697static void fq_rehash(struct fq_sched_data *q,
698 struct rb_root *old_array, u32 old_log,
699 struct rb_root *new_array, u32 new_log)
700{
701 struct rb_node *op, **np, *parent;
702 struct rb_root *oroot, *nroot;
703 struct fq_flow *of, *nf;
704 int fcnt = 0;
705 u32 idx;
706
707 for (idx = 0; idx < (1U << old_log); idx++) {
708 oroot = &old_array[idx];
709 while ((op = rb_first(oroot)) != NULL) {
710 rb_erase(op, oroot);
711 of = rb_entry(op, struct fq_flow, fq_node);
712 if (fq_gc_candidate(of)) {
713 fcnt++;
714 kmem_cache_free(fq_flow_cachep, of);
715 continue;
716 }
717 nroot = &new_array[hash_ptr(of->sk, new_log)];
718
719 np = &nroot->rb_node;
720 parent = NULL;
721 while (*np) {
722 parent = *np;
723
724 nf = rb_entry(parent, struct fq_flow, fq_node);
725 BUG_ON(nf->sk == of->sk);
726
727 if (nf->sk > of->sk)
728 np = &parent->rb_right;
729 else
730 np = &parent->rb_left;
731 }
732
733 rb_link_node(&of->fq_node, parent, np);
734 rb_insert_color(&of->fq_node, nroot);
735 }
736 }
737 q->flows -= fcnt;
738 q->inactive_flows -= fcnt;
739 q->stat_gc_flows += fcnt;
740}
741
742static void fq_free(void *addr)
743{
744 kvfree(addr);
745}
746
747static int fq_resize(struct Qdisc *sch, u32 log)
748{
749 struct fq_sched_data *q = qdisc_priv(sch);
750 struct rb_root *array;
751 void *old_fq_root;
752 u32 idx;
753
754 if (q->fq_root && log == q->fq_trees_log)
755 return 0;
756
757 /* If XPS was setup, we can allocate memory on right NUMA node */
758 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
759 netdev_queue_numa_node_read(sch->dev_queue));
760 if (!array)
761 return -ENOMEM;
762
763 for (idx = 0; idx < (1U << log); idx++)
764 array[idx] = RB_ROOT;
765
766 sch_tree_lock(sch);
767
768 old_fq_root = q->fq_root;
769 if (old_fq_root)
770 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
771
772 q->fq_root = array;
773 q->fq_trees_log = log;
774
775 sch_tree_unlock(sch);
776
777 fq_free(old_fq_root);
778
779 return 0;
780}
781
782static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
783 [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK },
784
785 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
786 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
787 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
788 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
789 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
790 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
791 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
792 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
793 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
794 [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
795 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
796 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
797 [TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 },
798 [TCA_FQ_HORIZON] = { .type = NLA_U32 },
799 [TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 },
800};
801
802static int fq_change(struct Qdisc *sch, struct nlattr *opt,
803 struct netlink_ext_ack *extack)
804{
805 struct fq_sched_data *q = qdisc_priv(sch);
806 struct nlattr *tb[TCA_FQ_MAX + 1];
807 int err, drop_count = 0;
808 unsigned drop_len = 0;
809 u32 fq_log;
810
811 if (!opt)
812 return -EINVAL;
813
814 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
815 NULL);
816 if (err < 0)
817 return err;
818
819 sch_tree_lock(sch);
820
821 fq_log = q->fq_trees_log;
822
823 if (tb[TCA_FQ_BUCKETS_LOG]) {
824 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
825
826 if (nval >= 1 && nval <= ilog2(256*1024))
827 fq_log = nval;
828 else
829 err = -EINVAL;
830 }
831 if (tb[TCA_FQ_PLIMIT])
832 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
833
834 if (tb[TCA_FQ_FLOW_PLIMIT])
835 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
836
837 if (tb[TCA_FQ_QUANTUM]) {
838 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
839
840 if (quantum > 0 && quantum <= (1 << 20)) {
841 q->quantum = quantum;
842 } else {
843 NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
844 err = -EINVAL;
845 }
846 }
847
848 if (tb[TCA_FQ_INITIAL_QUANTUM])
849 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
850
851 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
852 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
853 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
854
855 if (tb[TCA_FQ_FLOW_MAX_RATE]) {
856 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
857
858 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
859 }
860 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
861 q->low_rate_threshold =
862 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
863
864 if (tb[TCA_FQ_RATE_ENABLE]) {
865 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
866
867 if (enable <= 1)
868 q->rate_enable = enable;
869 else
870 err = -EINVAL;
871 }
872
873 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
874 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
875
876 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
877 }
878
879 if (tb[TCA_FQ_ORPHAN_MASK])
880 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
881
882 if (tb[TCA_FQ_CE_THRESHOLD])
883 q->ce_threshold = (u64)NSEC_PER_USEC *
884 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
885
886 if (tb[TCA_FQ_TIMER_SLACK])
887 q->timer_slack = nla_get_u32(tb[TCA_FQ_TIMER_SLACK]);
888
889 if (tb[TCA_FQ_HORIZON])
890 q->horizon = (u64)NSEC_PER_USEC *
891 nla_get_u32(tb[TCA_FQ_HORIZON]);
892
893 if (tb[TCA_FQ_HORIZON_DROP])
894 q->horizon_drop = nla_get_u8(tb[TCA_FQ_HORIZON_DROP]);
895
896 if (!err) {
897
898 sch_tree_unlock(sch);
899 err = fq_resize(sch, fq_log);
900 sch_tree_lock(sch);
901 }
902 while (sch->q.qlen > sch->limit) {
903 struct sk_buff *skb = fq_dequeue(sch);
904
905 if (!skb)
906 break;
907 drop_len += qdisc_pkt_len(skb);
908 rtnl_kfree_skbs(skb, skb);
909 drop_count++;
910 }
911 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
912
913 sch_tree_unlock(sch);
914 return err;
915}
916
917static void fq_destroy(struct Qdisc *sch)
918{
919 struct fq_sched_data *q = qdisc_priv(sch);
920
921 fq_reset(sch);
922 fq_free(q->fq_root);
923 qdisc_watchdog_cancel(&q->watchdog);
924}
925
926static int fq_init(struct Qdisc *sch, struct nlattr *opt,
927 struct netlink_ext_ack *extack)
928{
929 struct fq_sched_data *q = qdisc_priv(sch);
930 int err;
931
932 sch->limit = 10000;
933 q->flow_plimit = 100;
934 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
935 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
936 q->flow_refill_delay = msecs_to_jiffies(40);
937 q->flow_max_rate = ~0UL;
938 q->time_next_delayed_flow = ~0ULL;
939 q->rate_enable = 1;
940 q->new_flows.first = NULL;
941 q->old_flows.first = NULL;
942 q->delayed = RB_ROOT;
943 q->fq_root = NULL;
944 q->fq_trees_log = ilog2(1024);
945 q->orphan_mask = 1024 - 1;
946 q->low_rate_threshold = 550000 / 8;
947
948 q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */
949
950 q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */
951 q->horizon_drop = 1; /* by default, drop packets beyond horizon */
952
953 /* Default ce_threshold of 4294 seconds */
954 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
955
956 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
957
958 if (opt)
959 err = fq_change(sch, opt, extack);
960 else
961 err = fq_resize(sch, q->fq_trees_log);
962
963 return err;
964}
965
966static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
967{
968 struct fq_sched_data *q = qdisc_priv(sch);
969 u64 ce_threshold = q->ce_threshold;
970 u64 horizon = q->horizon;
971 struct nlattr *opts;
972
973 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
974 if (opts == NULL)
975 goto nla_put_failure;
976
977 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
978
979 do_div(ce_threshold, NSEC_PER_USEC);
980 do_div(horizon, NSEC_PER_USEC);
981
982 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
983 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
984 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
985 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
986 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
987 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
988 min_t(unsigned long, q->flow_max_rate, ~0U)) ||
989 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
990 jiffies_to_usecs(q->flow_refill_delay)) ||
991 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
992 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
993 q->low_rate_threshold) ||
994 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
995 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log) ||
996 nla_put_u32(skb, TCA_FQ_TIMER_SLACK, q->timer_slack) ||
997 nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) ||
998 nla_put_u8(skb, TCA_FQ_HORIZON_DROP, q->horizon_drop))
999 goto nla_put_failure;
1000
1001 return nla_nest_end(skb, opts);
1002
1003nla_put_failure:
1004 return -1;
1005}
1006
1007static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1008{
1009 struct fq_sched_data *q = qdisc_priv(sch);
1010 struct tc_fq_qd_stats st;
1011
1012 sch_tree_lock(sch);
1013
1014 st.gc_flows = q->stat_gc_flows;
1015 st.highprio_packets = q->stat_internal_packets;
1016 st.tcp_retrans = 0;
1017 st.throttled = q->stat_throttled;
1018 st.flows_plimit = q->stat_flows_plimit;
1019 st.pkts_too_long = q->stat_pkts_too_long;
1020 st.allocation_errors = q->stat_allocation_errors;
1021 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack -
1022 ktime_get_ns();
1023 st.flows = q->flows;
1024 st.inactive_flows = q->inactive_flows;
1025 st.throttled_flows = q->throttled_flows;
1026 st.unthrottle_latency_ns = min_t(unsigned long,
1027 q->unthrottle_latency_ns, ~0U);
1028 st.ce_mark = q->stat_ce_mark;
1029 st.horizon_drops = q->stat_horizon_drops;
1030 st.horizon_caps = q->stat_horizon_caps;
1031 sch_tree_unlock(sch);
1032
1033 return gnet_stats_copy_app(d, &st, sizeof(st));
1034}
1035
1036static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
1037 .id = "fq",
1038 .priv_size = sizeof(struct fq_sched_data),
1039
1040 .enqueue = fq_enqueue,
1041 .dequeue = fq_dequeue,
1042 .peek = qdisc_peek_dequeued,
1043 .init = fq_init,
1044 .reset = fq_reset,
1045 .destroy = fq_destroy,
1046 .change = fq_change,
1047 .dump = fq_dump,
1048 .dump_stats = fq_dump_stats,
1049 .owner = THIS_MODULE,
1050};
1051
1052static int __init fq_module_init(void)
1053{
1054 int ret;
1055
1056 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
1057 sizeof(struct fq_flow),
1058 0, 0, NULL);
1059 if (!fq_flow_cachep)
1060 return -ENOMEM;
1061
1062 ret = register_qdisc(&fq_qdisc_ops);
1063 if (ret)
1064 kmem_cache_destroy(fq_flow_cachep);
1065 return ret;
1066}
1067
1068static void __exit fq_module_exit(void)
1069{
1070 unregister_qdisc(&fq_qdisc_ops);
1071 kmem_cache_destroy(fq_flow_cachep);
1072}
1073
1074module_init(fq_module_init)
1075module_exit(fq_module_exit)
1076MODULE_AUTHOR("Eric Dumazet");
1077MODULE_LICENSE("GPL");
1078MODULE_DESCRIPTION("Fair Queue Packet Scheduler");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 *
5 * Copyright (C) 2013-2023 Eric Dumazet <edumazet@google.com>
6 *
7 * Meant to be mostly used for locally generated traffic :
8 * Fast classification depends on skb->sk being set before reaching us.
9 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
10 * All packets belonging to a socket are considered as a 'flow'.
11 *
12 * Flows are dynamically allocated and stored in a hash table of RB trees
13 * They are also part of one Round Robin 'queues' (new or old flows)
14 *
15 * Burst avoidance (aka pacing) capability :
16 *
17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
18 * bunch of packets, and this packet scheduler adds delay between
19 * packets to respect rate limitation.
20 *
21 * enqueue() :
22 * - lookup one RB tree (out of 1024 or more) to find the flow.
23 * If non existent flow, create it, add it to the tree.
24 * Add skb to the per flow list of skb (fifo).
25 * - Use a special fifo for high prio packets
26 *
27 * dequeue() : serves flows in Round Robin
28 * Note : When a flow becomes empty, we do not immediately remove it from
29 * rb trees, for performance reasons (its expected to send additional packets,
30 * or SLAB cache will reuse socket for another flow)
31 */
32
33#include <linux/module.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/jiffies.h>
37#include <linux/string.h>
38#include <linux/in.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/skbuff.h>
42#include <linux/slab.h>
43#include <linux/rbtree.h>
44#include <linux/hash.h>
45#include <linux/prefetch.h>
46#include <linux/vmalloc.h>
47#include <net/netlink.h>
48#include <net/pkt_sched.h>
49#include <net/sock.h>
50#include <net/tcp_states.h>
51#include <net/tcp.h>
52
53struct fq_skb_cb {
54 u64 time_to_send;
55 u8 band;
56};
57
58static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
59{
60 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
61 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
62}
63
64/*
65 * Per flow structure, dynamically allocated.
66 * If packets have monotically increasing time_to_send, they are placed in O(1)
67 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
68 */
69struct fq_flow {
70/* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */
71 struct rb_root t_root;
72 struct sk_buff *head; /* list of skbs for this flow : first skb */
73 union {
74 struct sk_buff *tail; /* last skb in the list */
75 unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */
76 };
77 union {
78 struct rb_node fq_node; /* anchor in fq_root[] trees */
79 /* Following field is only used for q->internal,
80 * because q->internal is not hashed in fq_root[]
81 */
82 u64 stat_fastpath_packets;
83 };
84 struct sock *sk;
85 u32 socket_hash; /* sk_hash */
86 int qlen; /* number of packets in flow queue */
87
88/* Second cache line */
89 int credit;
90 int band;
91 struct fq_flow *next; /* next pointer in RR lists */
92
93 struct rb_node rate_node; /* anchor in q->delayed tree */
94 u64 time_next_packet;
95};
96
97struct fq_flow_head {
98 struct fq_flow *first;
99 struct fq_flow *last;
100};
101
102struct fq_perband_flows {
103 struct fq_flow_head new_flows;
104 struct fq_flow_head old_flows;
105 int credit;
106 int quantum; /* based on band nr : 576KB, 192KB, 64KB */
107};
108
109struct fq_sched_data {
110/* Read mostly cache line */
111
112 u32 quantum;
113 u32 initial_quantum;
114 u32 flow_refill_delay;
115 u32 flow_plimit; /* max packets per flow */
116 unsigned long flow_max_rate; /* optional max rate per flow */
117 u64 ce_threshold;
118 u64 horizon; /* horizon in ns */
119 u32 orphan_mask; /* mask for orphaned skb */
120 u32 low_rate_threshold;
121 struct rb_root *fq_root;
122 u8 rate_enable;
123 u8 fq_trees_log;
124 u8 horizon_drop;
125 u8 prio2band[(TC_PRIO_MAX + 1) >> 2];
126 u32 timer_slack; /* hrtimer slack in ns */
127
128/* Read/Write fields. */
129
130 unsigned int band_nr; /* band being serviced in fq_dequeue() */
131
132 struct fq_perband_flows band_flows[FQ_BANDS];
133
134 struct fq_flow internal; /* fastpath queue. */
135 struct rb_root delayed; /* for rate limited flows */
136 u64 time_next_delayed_flow;
137 unsigned long unthrottle_latency_ns;
138
139 u32 band_pkt_count[FQ_BANDS];
140 u32 flows;
141 u32 inactive_flows; /* Flows with no packet to send. */
142 u32 throttled_flows;
143
144 u64 stat_throttled;
145 struct qdisc_watchdog watchdog;
146 u64 stat_gc_flows;
147
148/* Seldom used fields. */
149
150 u64 stat_band_drops[FQ_BANDS];
151 u64 stat_ce_mark;
152 u64 stat_horizon_drops;
153 u64 stat_horizon_caps;
154 u64 stat_flows_plimit;
155 u64 stat_pkts_too_long;
156 u64 stat_allocation_errors;
157};
158
159/* return the i-th 2-bit value ("crumb") */
160static u8 fq_prio2band(const u8 *prio2band, unsigned int prio)
161{
162 return (prio2band[prio / 4] >> (2 * (prio & 0x3))) & 0x3;
163}
164
165/*
166 * f->tail and f->age share the same location.
167 * We can use the low order bit to differentiate if this location points
168 * to a sk_buff or contains a jiffies value, if we force this value to be odd.
169 * This assumes f->tail low order bit must be 0 since alignof(struct sk_buff) >= 2
170 */
171static void fq_flow_set_detached(struct fq_flow *f)
172{
173 f->age = jiffies | 1UL;
174}
175
176static bool fq_flow_is_detached(const struct fq_flow *f)
177{
178 return !!(f->age & 1UL);
179}
180
181/* special value to mark a throttled flow (not on old/new list) */
182static struct fq_flow throttled;
183
184static bool fq_flow_is_throttled(const struct fq_flow *f)
185{
186 return f->next == &throttled;
187}
188
189enum new_flow {
190 NEW_FLOW,
191 OLD_FLOW
192};
193
194static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow,
195 enum new_flow list_sel)
196{
197 struct fq_perband_flows *pband = &q->band_flows[flow->band];
198 struct fq_flow_head *head = (list_sel == NEW_FLOW) ?
199 &pband->new_flows :
200 &pband->old_flows;
201
202 if (head->first)
203 head->last->next = flow;
204 else
205 head->first = flow;
206 head->last = flow;
207 flow->next = NULL;
208}
209
210static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
211{
212 rb_erase(&f->rate_node, &q->delayed);
213 q->throttled_flows--;
214 fq_flow_add_tail(q, f, OLD_FLOW);
215}
216
217static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
218{
219 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
220
221 while (*p) {
222 struct fq_flow *aux;
223
224 parent = *p;
225 aux = rb_entry(parent, struct fq_flow, rate_node);
226 if (f->time_next_packet >= aux->time_next_packet)
227 p = &parent->rb_right;
228 else
229 p = &parent->rb_left;
230 }
231 rb_link_node(&f->rate_node, parent, p);
232 rb_insert_color(&f->rate_node, &q->delayed);
233 q->throttled_flows++;
234 q->stat_throttled++;
235
236 f->next = &throttled;
237 if (q->time_next_delayed_flow > f->time_next_packet)
238 q->time_next_delayed_flow = f->time_next_packet;
239}
240
241
242static struct kmem_cache *fq_flow_cachep __read_mostly;
243
244
245/* limit number of collected flows per round */
246#define FQ_GC_MAX 8
247#define FQ_GC_AGE (3*HZ)
248
249static bool fq_gc_candidate(const struct fq_flow *f)
250{
251 return fq_flow_is_detached(f) &&
252 time_after(jiffies, f->age + FQ_GC_AGE);
253}
254
255static void fq_gc(struct fq_sched_data *q,
256 struct rb_root *root,
257 struct sock *sk)
258{
259 struct rb_node **p, *parent;
260 void *tofree[FQ_GC_MAX];
261 struct fq_flow *f;
262 int i, fcnt = 0;
263
264 p = &root->rb_node;
265 parent = NULL;
266 while (*p) {
267 parent = *p;
268
269 f = rb_entry(parent, struct fq_flow, fq_node);
270 if (f->sk == sk)
271 break;
272
273 if (fq_gc_candidate(f)) {
274 tofree[fcnt++] = f;
275 if (fcnt == FQ_GC_MAX)
276 break;
277 }
278
279 if (f->sk > sk)
280 p = &parent->rb_right;
281 else
282 p = &parent->rb_left;
283 }
284
285 if (!fcnt)
286 return;
287
288 for (i = fcnt; i > 0; ) {
289 f = tofree[--i];
290 rb_erase(&f->fq_node, root);
291 }
292 q->flows -= fcnt;
293 q->inactive_flows -= fcnt;
294 q->stat_gc_flows += fcnt;
295
296 kmem_cache_free_bulk(fq_flow_cachep, fcnt, tofree);
297}
298
299/* Fast path can be used if :
300 * 1) Packet tstamp is in the past.
301 * 2) FQ qlen == 0 OR
302 * (no flow is currently eligible for transmit,
303 * AND fast path queue has less than 8 packets)
304 * 3) No SO_MAX_PACING_RATE on the socket (if any).
305 * 4) No @maxrate attribute on this qdisc,
306 *
307 * FQ can not use generic TCQ_F_CAN_BYPASS infrastructure.
308 */
309static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
310 u64 now)
311{
312 const struct fq_sched_data *q = qdisc_priv(sch);
313 const struct sock *sk;
314
315 if (fq_skb_cb(skb)->time_to_send > now)
316 return false;
317
318 if (sch->q.qlen != 0) {
319 /* Even if some packets are stored in this qdisc,
320 * we can still enable fast path if all of them are
321 * scheduled in the future (ie no flows are eligible)
322 * or in the fast path queue.
323 */
324 if (q->flows != q->inactive_flows + q->throttled_flows)
325 return false;
326
327 /* Do not allow fast path queue to explode, we want Fair Queue mode
328 * under pressure.
329 */
330 if (q->internal.qlen >= 8)
331 return false;
332 }
333
334 sk = skb->sk;
335 if (sk && sk_fullsock(sk) && !sk_is_tcp(sk) &&
336 sk->sk_max_pacing_rate != ~0UL)
337 return false;
338
339 if (q->flow_max_rate != ~0UL)
340 return false;
341
342 return true;
343}
344
345static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
346 u64 now)
347{
348 struct fq_sched_data *q = qdisc_priv(sch);
349 struct rb_node **p, *parent;
350 struct sock *sk = skb->sk;
351 struct rb_root *root;
352 struct fq_flow *f;
353
354 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
355 * or a listener (SYNCOOKIE mode)
356 * 1) request sockets are not full blown,
357 * they do not contain sk_pacing_rate
358 * 2) They are not part of a 'flow' yet
359 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
360 * especially if the listener set SO_MAX_PACING_RATE
361 * 4) We pretend they are orphaned
362 */
363 if (!sk || sk_listener(sk)) {
364 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
365
366 /* By forcing low order bit to 1, we make sure to not
367 * collide with a local flow (socket pointers are word aligned)
368 */
369 sk = (struct sock *)((hash << 1) | 1UL);
370 skb_orphan(skb);
371 } else if (sk->sk_state == TCP_CLOSE) {
372 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
373 /*
374 * Sockets in TCP_CLOSE are non connected.
375 * Typical use case is UDP sockets, they can send packets
376 * with sendto() to many different destinations.
377 * We probably could use a generic bit advertising
378 * non connected sockets, instead of sk_state == TCP_CLOSE,
379 * if we care enough.
380 */
381 sk = (struct sock *)((hash << 1) | 1UL);
382 }
383
384 if (fq_fastpath_check(sch, skb, now)) {
385 q->internal.stat_fastpath_packets++;
386 if (skb->sk == sk && q->rate_enable &&
387 READ_ONCE(sk->sk_pacing_status) != SK_PACING_FQ)
388 smp_store_release(&sk->sk_pacing_status,
389 SK_PACING_FQ);
390 return &q->internal;
391 }
392
393 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
394
395 fq_gc(q, root, sk);
396
397 p = &root->rb_node;
398 parent = NULL;
399 while (*p) {
400 parent = *p;
401
402 f = rb_entry(parent, struct fq_flow, fq_node);
403 if (f->sk == sk) {
404 /* socket might have been reallocated, so check
405 * if its sk_hash is the same.
406 * It not, we need to refill credit with
407 * initial quantum
408 */
409 if (unlikely(skb->sk == sk &&
410 f->socket_hash != sk->sk_hash)) {
411 f->credit = q->initial_quantum;
412 f->socket_hash = sk->sk_hash;
413 if (q->rate_enable)
414 smp_store_release(&sk->sk_pacing_status,
415 SK_PACING_FQ);
416 if (fq_flow_is_throttled(f))
417 fq_flow_unset_throttled(q, f);
418 f->time_next_packet = 0ULL;
419 }
420 return f;
421 }
422 if (f->sk > sk)
423 p = &parent->rb_right;
424 else
425 p = &parent->rb_left;
426 }
427
428 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
429 if (unlikely(!f)) {
430 q->stat_allocation_errors++;
431 return &q->internal;
432 }
433 /* f->t_root is already zeroed after kmem_cache_zalloc() */
434
435 fq_flow_set_detached(f);
436 f->sk = sk;
437 if (skb->sk == sk) {
438 f->socket_hash = sk->sk_hash;
439 if (q->rate_enable)
440 smp_store_release(&sk->sk_pacing_status,
441 SK_PACING_FQ);
442 }
443 f->credit = q->initial_quantum;
444
445 rb_link_node(&f->fq_node, parent, p);
446 rb_insert_color(&f->fq_node, root);
447
448 q->flows++;
449 q->inactive_flows++;
450 return f;
451}
452
453static struct sk_buff *fq_peek(struct fq_flow *flow)
454{
455 struct sk_buff *skb = skb_rb_first(&flow->t_root);
456 struct sk_buff *head = flow->head;
457
458 if (!skb)
459 return head;
460
461 if (!head)
462 return skb;
463
464 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
465 return skb;
466 return head;
467}
468
469static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
470 struct sk_buff *skb)
471{
472 if (skb == flow->head) {
473 flow->head = skb->next;
474 } else {
475 rb_erase(&skb->rbnode, &flow->t_root);
476 skb->dev = qdisc_dev(sch);
477 }
478}
479
480/* Remove one skb from flow queue.
481 * This skb must be the return value of prior fq_peek().
482 */
483static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
484 struct sk_buff *skb)
485{
486 fq_erase_head(sch, flow, skb);
487 skb_mark_not_on_list(skb);
488 qdisc_qstats_backlog_dec(sch, skb);
489 sch->q.qlen--;
490}
491
492static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
493{
494 struct rb_node **p, *parent;
495 struct sk_buff *head, *aux;
496
497 head = flow->head;
498 if (!head ||
499 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
500 if (!head)
501 flow->head = skb;
502 else
503 flow->tail->next = skb;
504 flow->tail = skb;
505 skb->next = NULL;
506 return;
507 }
508
509 p = &flow->t_root.rb_node;
510 parent = NULL;
511
512 while (*p) {
513 parent = *p;
514 aux = rb_to_skb(parent);
515 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
516 p = &parent->rb_right;
517 else
518 p = &parent->rb_left;
519 }
520 rb_link_node(&skb->rbnode, parent, p);
521 rb_insert_color(&skb->rbnode, &flow->t_root);
522}
523
524static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
525 const struct fq_sched_data *q, u64 now)
526{
527 return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
528}
529
530static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
531 struct sk_buff **to_free)
532{
533 struct fq_sched_data *q = qdisc_priv(sch);
534 struct fq_flow *f;
535 u64 now;
536 u8 band;
537
538 band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX);
539 if (unlikely(q->band_pkt_count[band] >= sch->limit)) {
540 q->stat_band_drops[band]++;
541 return qdisc_drop(skb, sch, to_free);
542 }
543
544 now = ktime_get_ns();
545 if (!skb->tstamp) {
546 fq_skb_cb(skb)->time_to_send = now;
547 } else {
548 /* Check if packet timestamp is too far in the future. */
549 if (fq_packet_beyond_horizon(skb, q, now)) {
550 if (q->horizon_drop) {
551 q->stat_horizon_drops++;
552 return qdisc_drop(skb, sch, to_free);
553 }
554 q->stat_horizon_caps++;
555 skb->tstamp = now + q->horizon;
556 }
557 fq_skb_cb(skb)->time_to_send = skb->tstamp;
558 }
559
560 f = fq_classify(sch, skb, now);
561
562 if (f != &q->internal) {
563 if (unlikely(f->qlen >= q->flow_plimit)) {
564 q->stat_flows_plimit++;
565 return qdisc_drop(skb, sch, to_free);
566 }
567
568 if (fq_flow_is_detached(f)) {
569 fq_flow_add_tail(q, f, NEW_FLOW);
570 if (time_after(jiffies, f->age + q->flow_refill_delay))
571 f->credit = max_t(u32, f->credit, q->quantum);
572 }
573
574 f->band = band;
575 q->band_pkt_count[band]++;
576 fq_skb_cb(skb)->band = band;
577 if (f->qlen == 0)
578 q->inactive_flows--;
579 }
580
581 f->qlen++;
582 /* Note: this overwrites f->age */
583 flow_queue_add(f, skb);
584
585 qdisc_qstats_backlog_inc(sch, skb);
586 sch->q.qlen++;
587
588 return NET_XMIT_SUCCESS;
589}
590
591static void fq_check_throttled(struct fq_sched_data *q, u64 now)
592{
593 unsigned long sample;
594 struct rb_node *p;
595
596 if (q->time_next_delayed_flow > now)
597 return;
598
599 /* Update unthrottle latency EWMA.
600 * This is cheap and can help diagnosing timer/latency problems.
601 */
602 sample = (unsigned long)(now - q->time_next_delayed_flow);
603 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
604 q->unthrottle_latency_ns += sample >> 3;
605
606 q->time_next_delayed_flow = ~0ULL;
607 while ((p = rb_first(&q->delayed)) != NULL) {
608 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
609
610 if (f->time_next_packet > now) {
611 q->time_next_delayed_flow = f->time_next_packet;
612 break;
613 }
614 fq_flow_unset_throttled(q, f);
615 }
616}
617
618static struct fq_flow_head *fq_pband_head_select(struct fq_perband_flows *pband)
619{
620 if (pband->credit <= 0)
621 return NULL;
622
623 if (pband->new_flows.first)
624 return &pband->new_flows;
625
626 return pband->old_flows.first ? &pband->old_flows : NULL;
627}
628
629static struct sk_buff *fq_dequeue(struct Qdisc *sch)
630{
631 struct fq_sched_data *q = qdisc_priv(sch);
632 struct fq_perband_flows *pband;
633 struct fq_flow_head *head;
634 struct sk_buff *skb;
635 struct fq_flow *f;
636 unsigned long rate;
637 int retry;
638 u32 plen;
639 u64 now;
640
641 if (!sch->q.qlen)
642 return NULL;
643
644 skb = fq_peek(&q->internal);
645 if (unlikely(skb)) {
646 q->internal.qlen--;
647 fq_dequeue_skb(sch, &q->internal, skb);
648 goto out;
649 }
650
651 now = ktime_get_ns();
652 fq_check_throttled(q, now);
653 retry = 0;
654 pband = &q->band_flows[q->band_nr];
655begin:
656 head = fq_pband_head_select(pband);
657 if (!head) {
658 while (++retry <= FQ_BANDS) {
659 if (++q->band_nr == FQ_BANDS)
660 q->band_nr = 0;
661 pband = &q->band_flows[q->band_nr];
662 pband->credit = min(pband->credit + pband->quantum,
663 pband->quantum);
664 goto begin;
665 }
666 if (q->time_next_delayed_flow != ~0ULL)
667 qdisc_watchdog_schedule_range_ns(&q->watchdog,
668 q->time_next_delayed_flow,
669 q->timer_slack);
670 return NULL;
671 }
672 f = head->first;
673 retry = 0;
674 if (f->credit <= 0) {
675 f->credit += q->quantum;
676 head->first = f->next;
677 fq_flow_add_tail(q, f, OLD_FLOW);
678 goto begin;
679 }
680
681 skb = fq_peek(f);
682 if (skb) {
683 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
684 f->time_next_packet);
685
686 if (now < time_next_packet) {
687 head->first = f->next;
688 f->time_next_packet = time_next_packet;
689 fq_flow_set_throttled(q, f);
690 goto begin;
691 }
692 prefetch(&skb->end);
693 if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
694 INET_ECN_set_ce(skb);
695 q->stat_ce_mark++;
696 }
697 if (--f->qlen == 0)
698 q->inactive_flows++;
699 q->band_pkt_count[fq_skb_cb(skb)->band]--;
700 fq_dequeue_skb(sch, f, skb);
701 } else {
702 head->first = f->next;
703 /* force a pass through old_flows to prevent starvation */
704 if (head == &pband->new_flows) {
705 fq_flow_add_tail(q, f, OLD_FLOW);
706 } else {
707 fq_flow_set_detached(f);
708 }
709 goto begin;
710 }
711 plen = qdisc_pkt_len(skb);
712 f->credit -= plen;
713 pband->credit -= plen;
714
715 if (!q->rate_enable)
716 goto out;
717
718 rate = q->flow_max_rate;
719
720 /* If EDT time was provided for this skb, we need to
721 * update f->time_next_packet only if this qdisc enforces
722 * a flow max rate.
723 */
724 if (!skb->tstamp) {
725 if (skb->sk)
726 rate = min(READ_ONCE(skb->sk->sk_pacing_rate), rate);
727
728 if (rate <= q->low_rate_threshold) {
729 f->credit = 0;
730 } else {
731 plen = max(plen, q->quantum);
732 if (f->credit > 0)
733 goto out;
734 }
735 }
736 if (rate != ~0UL) {
737 u64 len = (u64)plen * NSEC_PER_SEC;
738
739 if (likely(rate))
740 len = div64_ul(len, rate);
741 /* Since socket rate can change later,
742 * clamp the delay to 1 second.
743 * Really, providers of too big packets should be fixed !
744 */
745 if (unlikely(len > NSEC_PER_SEC)) {
746 len = NSEC_PER_SEC;
747 q->stat_pkts_too_long++;
748 }
749 /* Account for schedule/timers drifts.
750 * f->time_next_packet was set when prior packet was sent,
751 * and current time (@now) can be too late by tens of us.
752 */
753 if (f->time_next_packet)
754 len -= min(len/2, now - f->time_next_packet);
755 f->time_next_packet = now + len;
756 }
757out:
758 qdisc_bstats_update(sch, skb);
759 return skb;
760}
761
762static void fq_flow_purge(struct fq_flow *flow)
763{
764 struct rb_node *p = rb_first(&flow->t_root);
765
766 while (p) {
767 struct sk_buff *skb = rb_to_skb(p);
768
769 p = rb_next(p);
770 rb_erase(&skb->rbnode, &flow->t_root);
771 rtnl_kfree_skbs(skb, skb);
772 }
773 rtnl_kfree_skbs(flow->head, flow->tail);
774 flow->head = NULL;
775 flow->qlen = 0;
776}
777
778static void fq_reset(struct Qdisc *sch)
779{
780 struct fq_sched_data *q = qdisc_priv(sch);
781 struct rb_root *root;
782 struct rb_node *p;
783 struct fq_flow *f;
784 unsigned int idx;
785
786 sch->q.qlen = 0;
787 sch->qstats.backlog = 0;
788
789 fq_flow_purge(&q->internal);
790
791 if (!q->fq_root)
792 return;
793
794 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
795 root = &q->fq_root[idx];
796 while ((p = rb_first(root)) != NULL) {
797 f = rb_entry(p, struct fq_flow, fq_node);
798 rb_erase(p, root);
799
800 fq_flow_purge(f);
801
802 kmem_cache_free(fq_flow_cachep, f);
803 }
804 }
805 for (idx = 0; idx < FQ_BANDS; idx++) {
806 q->band_flows[idx].new_flows.first = NULL;
807 q->band_flows[idx].old_flows.first = NULL;
808 }
809 q->delayed = RB_ROOT;
810 q->flows = 0;
811 q->inactive_flows = 0;
812 q->throttled_flows = 0;
813}
814
815static void fq_rehash(struct fq_sched_data *q,
816 struct rb_root *old_array, u32 old_log,
817 struct rb_root *new_array, u32 new_log)
818{
819 struct rb_node *op, **np, *parent;
820 struct rb_root *oroot, *nroot;
821 struct fq_flow *of, *nf;
822 int fcnt = 0;
823 u32 idx;
824
825 for (idx = 0; idx < (1U << old_log); idx++) {
826 oroot = &old_array[idx];
827 while ((op = rb_first(oroot)) != NULL) {
828 rb_erase(op, oroot);
829 of = rb_entry(op, struct fq_flow, fq_node);
830 if (fq_gc_candidate(of)) {
831 fcnt++;
832 kmem_cache_free(fq_flow_cachep, of);
833 continue;
834 }
835 nroot = &new_array[hash_ptr(of->sk, new_log)];
836
837 np = &nroot->rb_node;
838 parent = NULL;
839 while (*np) {
840 parent = *np;
841
842 nf = rb_entry(parent, struct fq_flow, fq_node);
843 BUG_ON(nf->sk == of->sk);
844
845 if (nf->sk > of->sk)
846 np = &parent->rb_right;
847 else
848 np = &parent->rb_left;
849 }
850
851 rb_link_node(&of->fq_node, parent, np);
852 rb_insert_color(&of->fq_node, nroot);
853 }
854 }
855 q->flows -= fcnt;
856 q->inactive_flows -= fcnt;
857 q->stat_gc_flows += fcnt;
858}
859
860static void fq_free(void *addr)
861{
862 kvfree(addr);
863}
864
865static int fq_resize(struct Qdisc *sch, u32 log)
866{
867 struct fq_sched_data *q = qdisc_priv(sch);
868 struct rb_root *array;
869 void *old_fq_root;
870 u32 idx;
871
872 if (q->fq_root && log == q->fq_trees_log)
873 return 0;
874
875 /* If XPS was setup, we can allocate memory on right NUMA node */
876 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
877 netdev_queue_numa_node_read(sch->dev_queue));
878 if (!array)
879 return -ENOMEM;
880
881 for (idx = 0; idx < (1U << log); idx++)
882 array[idx] = RB_ROOT;
883
884 sch_tree_lock(sch);
885
886 old_fq_root = q->fq_root;
887 if (old_fq_root)
888 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
889
890 q->fq_root = array;
891 q->fq_trees_log = log;
892
893 sch_tree_unlock(sch);
894
895 fq_free(old_fq_root);
896
897 return 0;
898}
899
900static const struct netlink_range_validation iq_range = {
901 .max = INT_MAX,
902};
903
904static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
905 [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK },
906
907 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
908 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
909 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
910 [TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range),
911 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
912 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
913 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
914 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
915 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
916 [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
917 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
918 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
919 [TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 },
920 [TCA_FQ_HORIZON] = { .type = NLA_U32 },
921 [TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 },
922 [TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)),
923 [TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)),
924};
925
926/* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
927static void fq_prio2band_compress_crumb(const u8 *in, u8 *out)
928{
929 const int num_elems = TC_PRIO_MAX + 1;
930 int i;
931
932 memset(out, 0, num_elems / 4);
933 for (i = 0; i < num_elems; i++)
934 out[i / 4] |= in[i] << (2 * (i & 0x3));
935}
936
937static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out)
938{
939 const int num_elems = TC_PRIO_MAX + 1;
940 int i;
941
942 for (i = 0; i < num_elems; i++)
943 out[i] = fq_prio2band(in, i);
944}
945
946static int fq_load_weights(struct fq_sched_data *q,
947 const struct nlattr *attr,
948 struct netlink_ext_ack *extack)
949{
950 s32 *weights = nla_data(attr);
951 int i;
952
953 for (i = 0; i < FQ_BANDS; i++) {
954 if (weights[i] < FQ_MIN_WEIGHT) {
955 NL_SET_ERR_MSG_FMT_MOD(extack, "Weight %d less that minimum allowed %d",
956 weights[i], FQ_MIN_WEIGHT);
957 return -EINVAL;
958 }
959 }
960 for (i = 0; i < FQ_BANDS; i++)
961 q->band_flows[i].quantum = weights[i];
962 return 0;
963}
964
965static int fq_load_priomap(struct fq_sched_data *q,
966 const struct nlattr *attr,
967 struct netlink_ext_ack *extack)
968{
969 const struct tc_prio_qopt *map = nla_data(attr);
970 int i;
971
972 if (map->bands != FQ_BANDS) {
973 NL_SET_ERR_MSG_MOD(extack, "FQ only supports 3 bands");
974 return -EINVAL;
975 }
976 for (i = 0; i < TC_PRIO_MAX + 1; i++) {
977 if (map->priomap[i] >= FQ_BANDS) {
978 NL_SET_ERR_MSG_FMT_MOD(extack, "FQ priomap field %d maps to a too high band %d",
979 i, map->priomap[i]);
980 return -EINVAL;
981 }
982 }
983 fq_prio2band_compress_crumb(map->priomap, q->prio2band);
984 return 0;
985}
986
987static int fq_change(struct Qdisc *sch, struct nlattr *opt,
988 struct netlink_ext_ack *extack)
989{
990 struct fq_sched_data *q = qdisc_priv(sch);
991 struct nlattr *tb[TCA_FQ_MAX + 1];
992 int err, drop_count = 0;
993 unsigned drop_len = 0;
994 u32 fq_log;
995
996 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
997 NULL);
998 if (err < 0)
999 return err;
1000
1001 sch_tree_lock(sch);
1002
1003 fq_log = q->fq_trees_log;
1004
1005 if (tb[TCA_FQ_BUCKETS_LOG]) {
1006 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
1007
1008 if (nval >= 1 && nval <= ilog2(256*1024))
1009 fq_log = nval;
1010 else
1011 err = -EINVAL;
1012 }
1013 if (tb[TCA_FQ_PLIMIT])
1014 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
1015
1016 if (tb[TCA_FQ_FLOW_PLIMIT])
1017 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
1018
1019 if (tb[TCA_FQ_QUANTUM]) {
1020 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
1021
1022 if (quantum > 0 && quantum <= (1 << 20)) {
1023 q->quantum = quantum;
1024 } else {
1025 NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
1026 err = -EINVAL;
1027 }
1028 }
1029
1030 if (tb[TCA_FQ_INITIAL_QUANTUM])
1031 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
1032
1033 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
1034 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
1035 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
1036
1037 if (tb[TCA_FQ_FLOW_MAX_RATE]) {
1038 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
1039
1040 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
1041 }
1042 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
1043 q->low_rate_threshold =
1044 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
1045
1046 if (tb[TCA_FQ_RATE_ENABLE]) {
1047 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
1048
1049 if (enable <= 1)
1050 q->rate_enable = enable;
1051 else
1052 err = -EINVAL;
1053 }
1054
1055 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
1056 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
1057
1058 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
1059 }
1060
1061 if (!err && tb[TCA_FQ_PRIOMAP])
1062 err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack);
1063
1064 if (!err && tb[TCA_FQ_WEIGHTS])
1065 err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack);
1066
1067 if (tb[TCA_FQ_ORPHAN_MASK])
1068 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
1069
1070 if (tb[TCA_FQ_CE_THRESHOLD])
1071 q->ce_threshold = (u64)NSEC_PER_USEC *
1072 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
1073
1074 if (tb[TCA_FQ_TIMER_SLACK])
1075 q->timer_slack = nla_get_u32(tb[TCA_FQ_TIMER_SLACK]);
1076
1077 if (tb[TCA_FQ_HORIZON])
1078 q->horizon = (u64)NSEC_PER_USEC *
1079 nla_get_u32(tb[TCA_FQ_HORIZON]);
1080
1081 if (tb[TCA_FQ_HORIZON_DROP])
1082 q->horizon_drop = nla_get_u8(tb[TCA_FQ_HORIZON_DROP]);
1083
1084 if (!err) {
1085
1086 sch_tree_unlock(sch);
1087 err = fq_resize(sch, fq_log);
1088 sch_tree_lock(sch);
1089 }
1090 while (sch->q.qlen > sch->limit) {
1091 struct sk_buff *skb = fq_dequeue(sch);
1092
1093 if (!skb)
1094 break;
1095 drop_len += qdisc_pkt_len(skb);
1096 rtnl_kfree_skbs(skb, skb);
1097 drop_count++;
1098 }
1099 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
1100
1101 sch_tree_unlock(sch);
1102 return err;
1103}
1104
1105static void fq_destroy(struct Qdisc *sch)
1106{
1107 struct fq_sched_data *q = qdisc_priv(sch);
1108
1109 fq_reset(sch);
1110 fq_free(q->fq_root);
1111 qdisc_watchdog_cancel(&q->watchdog);
1112}
1113
1114static int fq_init(struct Qdisc *sch, struct nlattr *opt,
1115 struct netlink_ext_ack *extack)
1116{
1117 struct fq_sched_data *q = qdisc_priv(sch);
1118 int i, err;
1119
1120 sch->limit = 10000;
1121 q->flow_plimit = 100;
1122 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
1123 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
1124 q->flow_refill_delay = msecs_to_jiffies(40);
1125 q->flow_max_rate = ~0UL;
1126 q->time_next_delayed_flow = ~0ULL;
1127 q->rate_enable = 1;
1128 for (i = 0; i < FQ_BANDS; i++) {
1129 q->band_flows[i].new_flows.first = NULL;
1130 q->band_flows[i].old_flows.first = NULL;
1131 }
1132 q->band_flows[0].quantum = 9 << 16;
1133 q->band_flows[1].quantum = 3 << 16;
1134 q->band_flows[2].quantum = 1 << 16;
1135 q->delayed = RB_ROOT;
1136 q->fq_root = NULL;
1137 q->fq_trees_log = ilog2(1024);
1138 q->orphan_mask = 1024 - 1;
1139 q->low_rate_threshold = 550000 / 8;
1140
1141 q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */
1142
1143 q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */
1144 q->horizon_drop = 1; /* by default, drop packets beyond horizon */
1145
1146 /* Default ce_threshold of 4294 seconds */
1147 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
1148
1149 fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band);
1150 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
1151
1152 if (opt)
1153 err = fq_change(sch, opt, extack);
1154 else
1155 err = fq_resize(sch, q->fq_trees_log);
1156
1157 return err;
1158}
1159
1160static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
1161{
1162 struct fq_sched_data *q = qdisc_priv(sch);
1163 u64 ce_threshold = q->ce_threshold;
1164 struct tc_prio_qopt prio = {
1165 .bands = FQ_BANDS,
1166 };
1167 u64 horizon = q->horizon;
1168 struct nlattr *opts;
1169 s32 weights[3];
1170
1171 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
1172 if (opts == NULL)
1173 goto nla_put_failure;
1174
1175 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
1176
1177 do_div(ce_threshold, NSEC_PER_USEC);
1178 do_div(horizon, NSEC_PER_USEC);
1179
1180 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
1181 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
1182 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
1183 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
1184 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
1185 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
1186 min_t(unsigned long, q->flow_max_rate, ~0U)) ||
1187 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
1188 jiffies_to_usecs(q->flow_refill_delay)) ||
1189 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
1190 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
1191 q->low_rate_threshold) ||
1192 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
1193 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log) ||
1194 nla_put_u32(skb, TCA_FQ_TIMER_SLACK, q->timer_slack) ||
1195 nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) ||
1196 nla_put_u8(skb, TCA_FQ_HORIZON_DROP, q->horizon_drop))
1197 goto nla_put_failure;
1198
1199 fq_prio2band_decompress_crumb(q->prio2band, prio.priomap);
1200 if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio))
1201 goto nla_put_failure;
1202
1203 weights[0] = q->band_flows[0].quantum;
1204 weights[1] = q->band_flows[1].quantum;
1205 weights[2] = q->band_flows[2].quantum;
1206 if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights))
1207 goto nla_put_failure;
1208
1209 return nla_nest_end(skb, opts);
1210
1211nla_put_failure:
1212 return -1;
1213}
1214
1215static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1216{
1217 struct fq_sched_data *q = qdisc_priv(sch);
1218 struct tc_fq_qd_stats st;
1219 int i;
1220
1221 st.pad = 0;
1222
1223 sch_tree_lock(sch);
1224
1225 st.gc_flows = q->stat_gc_flows;
1226 st.highprio_packets = 0;
1227 st.fastpath_packets = q->internal.stat_fastpath_packets;
1228 st.tcp_retrans = 0;
1229 st.throttled = q->stat_throttled;
1230 st.flows_plimit = q->stat_flows_plimit;
1231 st.pkts_too_long = q->stat_pkts_too_long;
1232 st.allocation_errors = q->stat_allocation_errors;
1233 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack -
1234 ktime_get_ns();
1235 st.flows = q->flows;
1236 st.inactive_flows = q->inactive_flows;
1237 st.throttled_flows = q->throttled_flows;
1238 st.unthrottle_latency_ns = min_t(unsigned long,
1239 q->unthrottle_latency_ns, ~0U);
1240 st.ce_mark = q->stat_ce_mark;
1241 st.horizon_drops = q->stat_horizon_drops;
1242 st.horizon_caps = q->stat_horizon_caps;
1243 for (i = 0; i < FQ_BANDS; i++) {
1244 st.band_drops[i] = q->stat_band_drops[i];
1245 st.band_pkt_count[i] = q->band_pkt_count[i];
1246 }
1247 sch_tree_unlock(sch);
1248
1249 return gnet_stats_copy_app(d, &st, sizeof(st));
1250}
1251
1252static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
1253 .id = "fq",
1254 .priv_size = sizeof(struct fq_sched_data),
1255
1256 .enqueue = fq_enqueue,
1257 .dequeue = fq_dequeue,
1258 .peek = qdisc_peek_dequeued,
1259 .init = fq_init,
1260 .reset = fq_reset,
1261 .destroy = fq_destroy,
1262 .change = fq_change,
1263 .dump = fq_dump,
1264 .dump_stats = fq_dump_stats,
1265 .owner = THIS_MODULE,
1266};
1267
1268static int __init fq_module_init(void)
1269{
1270 int ret;
1271
1272 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
1273 sizeof(struct fq_flow),
1274 0, SLAB_HWCACHE_ALIGN, NULL);
1275 if (!fq_flow_cachep)
1276 return -ENOMEM;
1277
1278 ret = register_qdisc(&fq_qdisc_ops);
1279 if (ret)
1280 kmem_cache_destroy(fq_flow_cachep);
1281 return ret;
1282}
1283
1284static void __exit fq_module_exit(void)
1285{
1286 unregister_qdisc(&fq_qdisc_ops);
1287 kmem_cache_destroy(fq_flow_cachep);
1288}
1289
1290module_init(fq_module_init)
1291module_exit(fq_module_exit)
1292MODULE_AUTHOR("Eric Dumazet");
1293MODULE_LICENSE("GPL");
1294MODULE_DESCRIPTION("Fair Queue Packet Scheduler");