Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_api.c Packet scheduler API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Fixes:
8 *
9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <linux/init.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/kmod.h>
24#include <linux/list.h>
25#include <linux/hrtimer.h>
26#include <linux/slab.h>
27#include <linux/hashtable.h>
28
29#include <net/net_namespace.h>
30#include <net/sock.h>
31#include <net/netlink.h>
32#include <net/pkt_sched.h>
33#include <net/pkt_cls.h>
34
35/*
36
37 Short review.
38 -------------
39
40 This file consists of two interrelated parts:
41
42 1. queueing disciplines manager frontend.
43 2. traffic classes manager frontend.
44
45 Generally, queueing discipline ("qdisc") is a black box,
46 which is able to enqueue packets and to dequeue them (when
47 device is ready to send something) in order and at times
48 determined by algorithm hidden in it.
49
50 qdisc's are divided to two categories:
51 - "queues", which have no internal structure visible from outside.
52 - "schedulers", which split all the packets to "traffic classes",
53 using "packet classifiers" (look at cls_api.c)
54
55 In turn, classes may have child qdiscs (as rule, queues)
56 attached to them etc. etc. etc.
57
58 The goal of the routines in this file is to translate
59 information supplied by user in the form of handles
60 to more intelligible for kernel form, to make some sanity
61 checks and part of work, which is common to all qdiscs
62 and to provide rtnetlink notifications.
63
64 All real intelligent work is done inside qdisc modules.
65
66
67
68 Every discipline has two major routines: enqueue and dequeue.
69
70 ---dequeue
71
72 dequeue usually returns a skb to send. It is allowed to return NULL,
73 but it does not mean that queue is empty, it just means that
74 discipline does not want to send anything this time.
75 Queue is really empty if q->q.qlen == 0.
76 For complicated disciplines with multiple queues q->q is not
77 real packet queue, but however q->q.qlen must be valid.
78
79 ---enqueue
80
81 enqueue returns 0, if packet was enqueued successfully.
82 If packet (this one or another one) was dropped, it returns
83 not zero error code.
84 NET_XMIT_DROP - this packet dropped
85 Expected action: do not backoff, but wait until queue will clear.
86 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
87 Expected action: backoff or ignore
88
89 Auxiliary routines:
90
91 ---peek
92
93 like dequeue but without removing a packet from the queue
94
95 ---reset
96
97 returns qdisc to initial state: purge all buffers, clear all
98 timers, counters (except for statistics) etc.
99
100 ---init
101
102 initializes newly created qdisc.
103
104 ---destroy
105
106 destroys resources allocated by init and during lifetime of qdisc.
107
108 ---change
109
110 changes qdisc parameters.
111 */
112
113/* Protects list of registered TC modules. It is pure SMP lock. */
114static DEFINE_RWLOCK(qdisc_mod_lock);
115
116
117/************************************************
118 * Queueing disciplines manipulation. *
119 ************************************************/
120
121
122/* The list of all installed queueing disciplines. */
123
124static struct Qdisc_ops *qdisc_base;
125
126/* Register/unregister queueing discipline */
127
128int register_qdisc(struct Qdisc_ops *qops)
129{
130 struct Qdisc_ops *q, **qp;
131 int rc = -EEXIST;
132
133 write_lock(&qdisc_mod_lock);
134 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
135 if (!strcmp(qops->id, q->id))
136 goto out;
137
138 if (qops->enqueue == NULL)
139 qops->enqueue = noop_qdisc_ops.enqueue;
140 if (qops->peek == NULL) {
141 if (qops->dequeue == NULL)
142 qops->peek = noop_qdisc_ops.peek;
143 else
144 goto out_einval;
145 }
146 if (qops->dequeue == NULL)
147 qops->dequeue = noop_qdisc_ops.dequeue;
148
149 if (qops->cl_ops) {
150 const struct Qdisc_class_ops *cops = qops->cl_ops;
151
152 if (!(cops->find && cops->walk && cops->leaf))
153 goto out_einval;
154
155 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
156 goto out_einval;
157 }
158
159 qops->next = NULL;
160 *qp = qops;
161 rc = 0;
162out:
163 write_unlock(&qdisc_mod_lock);
164 return rc;
165
166out_einval:
167 rc = -EINVAL;
168 goto out;
169}
170EXPORT_SYMBOL(register_qdisc);
171
172int unregister_qdisc(struct Qdisc_ops *qops)
173{
174 struct Qdisc_ops *q, **qp;
175 int err = -ENOENT;
176
177 write_lock(&qdisc_mod_lock);
178 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
179 if (q == qops)
180 break;
181 if (q) {
182 *qp = q->next;
183 q->next = NULL;
184 err = 0;
185 }
186 write_unlock(&qdisc_mod_lock);
187 return err;
188}
189EXPORT_SYMBOL(unregister_qdisc);
190
191/* Get default qdisc if not otherwise specified */
192void qdisc_get_default(char *name, size_t len)
193{
194 read_lock(&qdisc_mod_lock);
195 strlcpy(name, default_qdisc_ops->id, len);
196 read_unlock(&qdisc_mod_lock);
197}
198
199static struct Qdisc_ops *qdisc_lookup_default(const char *name)
200{
201 struct Qdisc_ops *q = NULL;
202
203 for (q = qdisc_base; q; q = q->next) {
204 if (!strcmp(name, q->id)) {
205 if (!try_module_get(q->owner))
206 q = NULL;
207 break;
208 }
209 }
210
211 return q;
212}
213
214/* Set new default qdisc to use */
215int qdisc_set_default(const char *name)
216{
217 const struct Qdisc_ops *ops;
218
219 if (!capable(CAP_NET_ADMIN))
220 return -EPERM;
221
222 write_lock(&qdisc_mod_lock);
223 ops = qdisc_lookup_default(name);
224 if (!ops) {
225 /* Not found, drop lock and try to load module */
226 write_unlock(&qdisc_mod_lock);
227 request_module("sch_%s", name);
228 write_lock(&qdisc_mod_lock);
229
230 ops = qdisc_lookup_default(name);
231 }
232
233 if (ops) {
234 /* Set new default */
235 module_put(default_qdisc_ops->owner);
236 default_qdisc_ops = ops;
237 }
238 write_unlock(&qdisc_mod_lock);
239
240 return ops ? 0 : -ENOENT;
241}
242
243#ifdef CONFIG_NET_SCH_DEFAULT
244/* Set default value from kernel config */
245static int __init sch_default_qdisc(void)
246{
247 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
248}
249late_initcall(sch_default_qdisc);
250#endif
251
252/* We know handle. Find qdisc among all qdisc's attached to device
253 * (root qdisc, all its children, children of children etc.)
254 * Note: caller either uses rtnl or rcu_read_lock()
255 */
256
257static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
258{
259 struct Qdisc *q;
260
261 if (!qdisc_dev(root))
262 return (root->handle == handle ? root : NULL);
263
264 if (!(root->flags & TCQ_F_BUILTIN) &&
265 root->handle == handle)
266 return root;
267
268 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
269 if (q->handle == handle)
270 return q;
271 }
272 return NULL;
273}
274
275void qdisc_hash_add(struct Qdisc *q, bool invisible)
276{
277 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
278 ASSERT_RTNL();
279 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
280 if (invisible)
281 q->flags |= TCQ_F_INVISIBLE;
282 }
283}
284EXPORT_SYMBOL(qdisc_hash_add);
285
286void qdisc_hash_del(struct Qdisc *q)
287{
288 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
289 ASSERT_RTNL();
290 hash_del_rcu(&q->hash);
291 }
292}
293EXPORT_SYMBOL(qdisc_hash_del);
294
295struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
296{
297 struct Qdisc *q;
298
299 if (!handle)
300 return NULL;
301 q = qdisc_match_from_root(dev->qdisc, handle);
302 if (q)
303 goto out;
304
305 if (dev_ingress_queue(dev))
306 q = qdisc_match_from_root(
307 dev_ingress_queue(dev)->qdisc_sleeping,
308 handle);
309out:
310 return q;
311}
312
313struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
314{
315 struct netdev_queue *nq;
316 struct Qdisc *q;
317
318 if (!handle)
319 return NULL;
320 q = qdisc_match_from_root(dev->qdisc, handle);
321 if (q)
322 goto out;
323
324 nq = dev_ingress_queue_rcu(dev);
325 if (nq)
326 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
327out:
328 return q;
329}
330
331static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
332{
333 unsigned long cl;
334 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
335
336 if (cops == NULL)
337 return NULL;
338 cl = cops->find(p, classid);
339
340 if (cl == 0)
341 return NULL;
342 return cops->leaf(p, cl);
343}
344
345/* Find queueing discipline by name */
346
347static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
348{
349 struct Qdisc_ops *q = NULL;
350
351 if (kind) {
352 read_lock(&qdisc_mod_lock);
353 for (q = qdisc_base; q; q = q->next) {
354 if (nla_strcmp(kind, q->id) == 0) {
355 if (!try_module_get(q->owner))
356 q = NULL;
357 break;
358 }
359 }
360 read_unlock(&qdisc_mod_lock);
361 }
362 return q;
363}
364
365/* The linklayer setting were not transferred from iproute2, in older
366 * versions, and the rate tables lookup systems have been dropped in
367 * the kernel. To keep backward compatible with older iproute2 tc
368 * utils, we detect the linklayer setting by detecting if the rate
369 * table were modified.
370 *
371 * For linklayer ATM table entries, the rate table will be aligned to
372 * 48 bytes, thus some table entries will contain the same value. The
373 * mpu (min packet unit) is also encoded into the old rate table, thus
374 * starting from the mpu, we find low and high table entries for
375 * mapping this cell. If these entries contain the same value, when
376 * the rate tables have been modified for linklayer ATM.
377 *
378 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
379 * and then roundup to the next cell, calc the table entry one below,
380 * and compare.
381 */
382static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
383{
384 int low = roundup(r->mpu, 48);
385 int high = roundup(low+1, 48);
386 int cell_low = low >> r->cell_log;
387 int cell_high = (high >> r->cell_log) - 1;
388
389 /* rtab is too inaccurate at rates > 100Mbit/s */
390 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
391 pr_debug("TC linklayer: Giving up ATM detection\n");
392 return TC_LINKLAYER_ETHERNET;
393 }
394
395 if ((cell_high > cell_low) && (cell_high < 256)
396 && (rtab[cell_low] == rtab[cell_high])) {
397 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
398 cell_low, cell_high, rtab[cell_high]);
399 return TC_LINKLAYER_ATM;
400 }
401 return TC_LINKLAYER_ETHERNET;
402}
403
404static struct qdisc_rate_table *qdisc_rtab_list;
405
406struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
407 struct nlattr *tab,
408 struct netlink_ext_ack *extack)
409{
410 struct qdisc_rate_table *rtab;
411
412 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
413 nla_len(tab) != TC_RTAB_SIZE) {
414 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
415 return NULL;
416 }
417
418 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
419 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
420 !memcmp(&rtab->data, nla_data(tab), 1024)) {
421 rtab->refcnt++;
422 return rtab;
423 }
424 }
425
426 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
427 if (rtab) {
428 rtab->rate = *r;
429 rtab->refcnt = 1;
430 memcpy(rtab->data, nla_data(tab), 1024);
431 if (r->linklayer == TC_LINKLAYER_UNAWARE)
432 r->linklayer = __detect_linklayer(r, rtab->data);
433 rtab->next = qdisc_rtab_list;
434 qdisc_rtab_list = rtab;
435 } else {
436 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
437 }
438 return rtab;
439}
440EXPORT_SYMBOL(qdisc_get_rtab);
441
442void qdisc_put_rtab(struct qdisc_rate_table *tab)
443{
444 struct qdisc_rate_table *rtab, **rtabp;
445
446 if (!tab || --tab->refcnt)
447 return;
448
449 for (rtabp = &qdisc_rtab_list;
450 (rtab = *rtabp) != NULL;
451 rtabp = &rtab->next) {
452 if (rtab == tab) {
453 *rtabp = rtab->next;
454 kfree(rtab);
455 return;
456 }
457 }
458}
459EXPORT_SYMBOL(qdisc_put_rtab);
460
461static LIST_HEAD(qdisc_stab_list);
462
463static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
464 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
465 [TCA_STAB_DATA] = { .type = NLA_BINARY },
466};
467
468static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
469 struct netlink_ext_ack *extack)
470{
471 struct nlattr *tb[TCA_STAB_MAX + 1];
472 struct qdisc_size_table *stab;
473 struct tc_sizespec *s;
474 unsigned int tsize = 0;
475 u16 *tab = NULL;
476 int err;
477
478 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
479 extack);
480 if (err < 0)
481 return ERR_PTR(err);
482 if (!tb[TCA_STAB_BASE]) {
483 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
484 return ERR_PTR(-EINVAL);
485 }
486
487 s = nla_data(tb[TCA_STAB_BASE]);
488
489 if (s->tsize > 0) {
490 if (!tb[TCA_STAB_DATA]) {
491 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
492 return ERR_PTR(-EINVAL);
493 }
494 tab = nla_data(tb[TCA_STAB_DATA]);
495 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
496 }
497
498 if (tsize != s->tsize || (!tab && tsize > 0)) {
499 NL_SET_ERR_MSG(extack, "Invalid size of size table");
500 return ERR_PTR(-EINVAL);
501 }
502
503 list_for_each_entry(stab, &qdisc_stab_list, list) {
504 if (memcmp(&stab->szopts, s, sizeof(*s)))
505 continue;
506 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
507 continue;
508 stab->refcnt++;
509 return stab;
510 }
511
512 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
513 if (!stab)
514 return ERR_PTR(-ENOMEM);
515
516 stab->refcnt = 1;
517 stab->szopts = *s;
518 if (tsize > 0)
519 memcpy(stab->data, tab, tsize * sizeof(u16));
520
521 list_add_tail(&stab->list, &qdisc_stab_list);
522
523 return stab;
524}
525
526void qdisc_put_stab(struct qdisc_size_table *tab)
527{
528 if (!tab)
529 return;
530
531 if (--tab->refcnt == 0) {
532 list_del(&tab->list);
533 kfree_rcu(tab, rcu);
534 }
535}
536EXPORT_SYMBOL(qdisc_put_stab);
537
538static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
539{
540 struct nlattr *nest;
541
542 nest = nla_nest_start_noflag(skb, TCA_STAB);
543 if (nest == NULL)
544 goto nla_put_failure;
545 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
546 goto nla_put_failure;
547 nla_nest_end(skb, nest);
548
549 return skb->len;
550
551nla_put_failure:
552 return -1;
553}
554
555void __qdisc_calculate_pkt_len(struct sk_buff *skb,
556 const struct qdisc_size_table *stab)
557{
558 int pkt_len, slot;
559
560 pkt_len = skb->len + stab->szopts.overhead;
561 if (unlikely(!stab->szopts.tsize))
562 goto out;
563
564 slot = pkt_len + stab->szopts.cell_align;
565 if (unlikely(slot < 0))
566 slot = 0;
567
568 slot >>= stab->szopts.cell_log;
569 if (likely(slot < stab->szopts.tsize))
570 pkt_len = stab->data[slot];
571 else
572 pkt_len = stab->data[stab->szopts.tsize - 1] *
573 (slot / stab->szopts.tsize) +
574 stab->data[slot % stab->szopts.tsize];
575
576 pkt_len <<= stab->szopts.size_log;
577out:
578 if (unlikely(pkt_len < 1))
579 pkt_len = 1;
580 qdisc_skb_cb(skb)->pkt_len = pkt_len;
581}
582EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
583
584void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
585{
586 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
587 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
588 txt, qdisc->ops->id, qdisc->handle >> 16);
589 qdisc->flags |= TCQ_F_WARN_NONWC;
590 }
591}
592EXPORT_SYMBOL(qdisc_warn_nonwc);
593
594static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
595{
596 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
597 timer);
598
599 rcu_read_lock();
600 __netif_schedule(qdisc_root(wd->qdisc));
601 rcu_read_unlock();
602
603 return HRTIMER_NORESTART;
604}
605
606void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
607 clockid_t clockid)
608{
609 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
610 wd->timer.function = qdisc_watchdog;
611 wd->qdisc = qdisc;
612}
613EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
614
615void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
616{
617 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
618}
619EXPORT_SYMBOL(qdisc_watchdog_init);
620
621void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
622{
623 if (test_bit(__QDISC_STATE_DEACTIVATED,
624 &qdisc_root_sleeping(wd->qdisc)->state))
625 return;
626
627 if (wd->last_expires == expires)
628 return;
629
630 wd->last_expires = expires;
631 hrtimer_start(&wd->timer,
632 ns_to_ktime(expires),
633 HRTIMER_MODE_ABS_PINNED);
634}
635EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
636
637void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
638{
639 hrtimer_cancel(&wd->timer);
640}
641EXPORT_SYMBOL(qdisc_watchdog_cancel);
642
643static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
644{
645 struct hlist_head *h;
646 unsigned int i;
647
648 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
649
650 if (h != NULL) {
651 for (i = 0; i < n; i++)
652 INIT_HLIST_HEAD(&h[i]);
653 }
654 return h;
655}
656
657void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
658{
659 struct Qdisc_class_common *cl;
660 struct hlist_node *next;
661 struct hlist_head *nhash, *ohash;
662 unsigned int nsize, nmask, osize;
663 unsigned int i, h;
664
665 /* Rehash when load factor exceeds 0.75 */
666 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
667 return;
668 nsize = clhash->hashsize * 2;
669 nmask = nsize - 1;
670 nhash = qdisc_class_hash_alloc(nsize);
671 if (nhash == NULL)
672 return;
673
674 ohash = clhash->hash;
675 osize = clhash->hashsize;
676
677 sch_tree_lock(sch);
678 for (i = 0; i < osize; i++) {
679 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
680 h = qdisc_class_hash(cl->classid, nmask);
681 hlist_add_head(&cl->hnode, &nhash[h]);
682 }
683 }
684 clhash->hash = nhash;
685 clhash->hashsize = nsize;
686 clhash->hashmask = nmask;
687 sch_tree_unlock(sch);
688
689 kvfree(ohash);
690}
691EXPORT_SYMBOL(qdisc_class_hash_grow);
692
693int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
694{
695 unsigned int size = 4;
696
697 clhash->hash = qdisc_class_hash_alloc(size);
698 if (!clhash->hash)
699 return -ENOMEM;
700 clhash->hashsize = size;
701 clhash->hashmask = size - 1;
702 clhash->hashelems = 0;
703 return 0;
704}
705EXPORT_SYMBOL(qdisc_class_hash_init);
706
707void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
708{
709 kvfree(clhash->hash);
710}
711EXPORT_SYMBOL(qdisc_class_hash_destroy);
712
713void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
714 struct Qdisc_class_common *cl)
715{
716 unsigned int h;
717
718 INIT_HLIST_NODE(&cl->hnode);
719 h = qdisc_class_hash(cl->classid, clhash->hashmask);
720 hlist_add_head(&cl->hnode, &clhash->hash[h]);
721 clhash->hashelems++;
722}
723EXPORT_SYMBOL(qdisc_class_hash_insert);
724
725void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
726 struct Qdisc_class_common *cl)
727{
728 hlist_del(&cl->hnode);
729 clhash->hashelems--;
730}
731EXPORT_SYMBOL(qdisc_class_hash_remove);
732
733/* Allocate an unique handle from space managed by kernel
734 * Possible range is [8000-FFFF]:0000 (0x8000 values)
735 */
736static u32 qdisc_alloc_handle(struct net_device *dev)
737{
738 int i = 0x8000;
739 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
740
741 do {
742 autohandle += TC_H_MAKE(0x10000U, 0);
743 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
744 autohandle = TC_H_MAKE(0x80000000U, 0);
745 if (!qdisc_lookup(dev, autohandle))
746 return autohandle;
747 cond_resched();
748 } while (--i > 0);
749
750 return 0;
751}
752
753void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
754{
755 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
756 const struct Qdisc_class_ops *cops;
757 unsigned long cl;
758 u32 parentid;
759 bool notify;
760 int drops;
761
762 if (n == 0 && len == 0)
763 return;
764 drops = max_t(int, n, 0);
765 rcu_read_lock();
766 while ((parentid = sch->parent)) {
767 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
768 break;
769
770 if (sch->flags & TCQ_F_NOPARENT)
771 break;
772 /* Notify parent qdisc only if child qdisc becomes empty.
773 *
774 * If child was empty even before update then backlog
775 * counter is screwed and we skip notification because
776 * parent class is already passive.
777 *
778 * If the original child was offloaded then it is allowed
779 * to be seem as empty, so the parent is notified anyway.
780 */
781 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
782 !qdisc_is_offloaded);
783 /* TODO: perform the search on a per txq basis */
784 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
785 if (sch == NULL) {
786 WARN_ON_ONCE(parentid != TC_H_ROOT);
787 break;
788 }
789 cops = sch->ops->cl_ops;
790 if (notify && cops->qlen_notify) {
791 cl = cops->find(sch, parentid);
792 cops->qlen_notify(sch, cl);
793 }
794 sch->q.qlen -= n;
795 sch->qstats.backlog -= len;
796 __qdisc_qstats_drop(sch, drops);
797 }
798 rcu_read_unlock();
799}
800EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
801
802int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
803 void *type_data)
804{
805 struct net_device *dev = qdisc_dev(sch);
806 int err;
807
808 sch->flags &= ~TCQ_F_OFFLOADED;
809 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
810 return 0;
811
812 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
813 if (err == -EOPNOTSUPP)
814 return 0;
815
816 if (!err)
817 sch->flags |= TCQ_F_OFFLOADED;
818
819 return err;
820}
821EXPORT_SYMBOL(qdisc_offload_dump_helper);
822
823void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
824 struct Qdisc *new, struct Qdisc *old,
825 enum tc_setup_type type, void *type_data,
826 struct netlink_ext_ack *extack)
827{
828 bool any_qdisc_is_offloaded;
829 int err;
830
831 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
832 return;
833
834 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
835
836 /* Don't report error if the graft is part of destroy operation. */
837 if (!err || !new || new == &noop_qdisc)
838 return;
839
840 /* Don't report error if the parent, the old child and the new
841 * one are not offloaded.
842 */
843 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
844 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
845 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
846
847 if (any_qdisc_is_offloaded)
848 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
849}
850EXPORT_SYMBOL(qdisc_offload_graft_helper);
851
852static void qdisc_offload_graft_root(struct net_device *dev,
853 struct Qdisc *new, struct Qdisc *old,
854 struct netlink_ext_ack *extack)
855{
856 struct tc_root_qopt_offload graft_offload = {
857 .command = TC_ROOT_GRAFT,
858 .handle = new ? new->handle : 0,
859 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
860 (old && old->flags & TCQ_F_INGRESS),
861 };
862
863 qdisc_offload_graft_helper(dev, NULL, new, old,
864 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
865}
866
867static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
868 u32 portid, u32 seq, u16 flags, int event)
869{
870 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
871 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
872 struct tcmsg *tcm;
873 struct nlmsghdr *nlh;
874 unsigned char *b = skb_tail_pointer(skb);
875 struct gnet_dump d;
876 struct qdisc_size_table *stab;
877 u32 block_index;
878 __u32 qlen;
879
880 cond_resched();
881 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
882 if (!nlh)
883 goto out_nlmsg_trim;
884 tcm = nlmsg_data(nlh);
885 tcm->tcm_family = AF_UNSPEC;
886 tcm->tcm__pad1 = 0;
887 tcm->tcm__pad2 = 0;
888 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
889 tcm->tcm_parent = clid;
890 tcm->tcm_handle = q->handle;
891 tcm->tcm_info = refcount_read(&q->refcnt);
892 if (nla_put_string(skb, TCA_KIND, q->ops->id))
893 goto nla_put_failure;
894 if (q->ops->ingress_block_get) {
895 block_index = q->ops->ingress_block_get(q);
896 if (block_index &&
897 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
898 goto nla_put_failure;
899 }
900 if (q->ops->egress_block_get) {
901 block_index = q->ops->egress_block_get(q);
902 if (block_index &&
903 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
904 goto nla_put_failure;
905 }
906 if (q->ops->dump && q->ops->dump(q, skb) < 0)
907 goto nla_put_failure;
908 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
909 goto nla_put_failure;
910 qlen = qdisc_qlen_sum(q);
911
912 stab = rtnl_dereference(q->stab);
913 if (stab && qdisc_dump_stab(skb, stab) < 0)
914 goto nla_put_failure;
915
916 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
917 NULL, &d, TCA_PAD) < 0)
918 goto nla_put_failure;
919
920 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
921 goto nla_put_failure;
922
923 if (qdisc_is_percpu_stats(q)) {
924 cpu_bstats = q->cpu_bstats;
925 cpu_qstats = q->cpu_qstats;
926 }
927
928 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
929 &d, cpu_bstats, &q->bstats) < 0 ||
930 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
931 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
932 goto nla_put_failure;
933
934 if (gnet_stats_finish_copy(&d) < 0)
935 goto nla_put_failure;
936
937 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
938 return skb->len;
939
940out_nlmsg_trim:
941nla_put_failure:
942 nlmsg_trim(skb, b);
943 return -1;
944}
945
946static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
947{
948 if (q->flags & TCQ_F_BUILTIN)
949 return true;
950 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
951 return true;
952
953 return false;
954}
955
956static int qdisc_notify(struct net *net, struct sk_buff *oskb,
957 struct nlmsghdr *n, u32 clid,
958 struct Qdisc *old, struct Qdisc *new)
959{
960 struct sk_buff *skb;
961 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
962
963 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
964 if (!skb)
965 return -ENOBUFS;
966
967 if (old && !tc_qdisc_dump_ignore(old, false)) {
968 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
969 0, RTM_DELQDISC) < 0)
970 goto err_out;
971 }
972 if (new && !tc_qdisc_dump_ignore(new, false)) {
973 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
974 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
975 goto err_out;
976 }
977
978 if (skb->len)
979 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
980 n->nlmsg_flags & NLM_F_ECHO);
981
982err_out:
983 kfree_skb(skb);
984 return -EINVAL;
985}
986
987static void notify_and_destroy(struct net *net, struct sk_buff *skb,
988 struct nlmsghdr *n, u32 clid,
989 struct Qdisc *old, struct Qdisc *new)
990{
991 if (new || old)
992 qdisc_notify(net, skb, n, clid, old, new);
993
994 if (old)
995 qdisc_put(old);
996}
997
998static void qdisc_clear_nolock(struct Qdisc *sch)
999{
1000 sch->flags &= ~TCQ_F_NOLOCK;
1001 if (!(sch->flags & TCQ_F_CPUSTATS))
1002 return;
1003
1004 free_percpu(sch->cpu_bstats);
1005 free_percpu(sch->cpu_qstats);
1006 sch->cpu_bstats = NULL;
1007 sch->cpu_qstats = NULL;
1008 sch->flags &= ~TCQ_F_CPUSTATS;
1009}
1010
1011/* Graft qdisc "new" to class "classid" of qdisc "parent" or
1012 * to device "dev".
1013 *
1014 * When appropriate send a netlink notification using 'skb'
1015 * and "n".
1016 *
1017 * On success, destroy old qdisc.
1018 */
1019
1020static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1021 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1022 struct Qdisc *new, struct Qdisc *old,
1023 struct netlink_ext_ack *extack)
1024{
1025 struct Qdisc *q = old;
1026 struct net *net = dev_net(dev);
1027
1028 if (parent == NULL) {
1029 unsigned int i, num_q, ingress;
1030
1031 ingress = 0;
1032 num_q = dev->num_tx_queues;
1033 if ((q && q->flags & TCQ_F_INGRESS) ||
1034 (new && new->flags & TCQ_F_INGRESS)) {
1035 num_q = 1;
1036 ingress = 1;
1037 if (!dev_ingress_queue(dev)) {
1038 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1039 return -ENOENT;
1040 }
1041 }
1042
1043 if (dev->flags & IFF_UP)
1044 dev_deactivate(dev);
1045
1046 qdisc_offload_graft_root(dev, new, old, extack);
1047
1048 if (new && new->ops->attach)
1049 goto skip;
1050
1051 for (i = 0; i < num_q; i++) {
1052 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1053
1054 if (!ingress)
1055 dev_queue = netdev_get_tx_queue(dev, i);
1056
1057 old = dev_graft_qdisc(dev_queue, new);
1058 if (new && i > 0)
1059 qdisc_refcount_inc(new);
1060
1061 if (!ingress)
1062 qdisc_put(old);
1063 }
1064
1065skip:
1066 if (!ingress) {
1067 notify_and_destroy(net, skb, n, classid,
1068 dev->qdisc, new);
1069 if (new && !new->ops->attach)
1070 qdisc_refcount_inc(new);
1071 dev->qdisc = new ? : &noop_qdisc;
1072
1073 if (new && new->ops->attach)
1074 new->ops->attach(new);
1075 } else {
1076 notify_and_destroy(net, skb, n, classid, old, new);
1077 }
1078
1079 if (dev->flags & IFF_UP)
1080 dev_activate(dev);
1081 } else {
1082 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1083 unsigned long cl;
1084 int err;
1085
1086 /* Only support running class lockless if parent is lockless */
1087 if (new && (new->flags & TCQ_F_NOLOCK) &&
1088 parent && !(parent->flags & TCQ_F_NOLOCK))
1089 qdisc_clear_nolock(new);
1090
1091 if (!cops || !cops->graft)
1092 return -EOPNOTSUPP;
1093
1094 cl = cops->find(parent, classid);
1095 if (!cl) {
1096 NL_SET_ERR_MSG(extack, "Specified class not found");
1097 return -ENOENT;
1098 }
1099
1100 err = cops->graft(parent, cl, new, &old, extack);
1101 if (err)
1102 return err;
1103 notify_and_destroy(net, skb, n, classid, old, new);
1104 }
1105 return 0;
1106}
1107
1108static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1109 struct netlink_ext_ack *extack)
1110{
1111 u32 block_index;
1112
1113 if (tca[TCA_INGRESS_BLOCK]) {
1114 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1115
1116 if (!block_index) {
1117 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1118 return -EINVAL;
1119 }
1120 if (!sch->ops->ingress_block_set) {
1121 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1122 return -EOPNOTSUPP;
1123 }
1124 sch->ops->ingress_block_set(sch, block_index);
1125 }
1126 if (tca[TCA_EGRESS_BLOCK]) {
1127 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1128
1129 if (!block_index) {
1130 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1131 return -EINVAL;
1132 }
1133 if (!sch->ops->egress_block_set) {
1134 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1135 return -EOPNOTSUPP;
1136 }
1137 sch->ops->egress_block_set(sch, block_index);
1138 }
1139 return 0;
1140}
1141
1142/*
1143 Allocate and initialize new qdisc.
1144
1145 Parameters are passed via opt.
1146 */
1147
1148static struct Qdisc *qdisc_create(struct net_device *dev,
1149 struct netdev_queue *dev_queue,
1150 struct Qdisc *p, u32 parent, u32 handle,
1151 struct nlattr **tca, int *errp,
1152 struct netlink_ext_ack *extack)
1153{
1154 int err;
1155 struct nlattr *kind = tca[TCA_KIND];
1156 struct Qdisc *sch;
1157 struct Qdisc_ops *ops;
1158 struct qdisc_size_table *stab;
1159
1160 ops = qdisc_lookup_ops(kind);
1161#ifdef CONFIG_MODULES
1162 if (ops == NULL && kind != NULL) {
1163 char name[IFNAMSIZ];
1164 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1165 /* We dropped the RTNL semaphore in order to
1166 * perform the module load. So, even if we
1167 * succeeded in loading the module we have to
1168 * tell the caller to replay the request. We
1169 * indicate this using -EAGAIN.
1170 * We replay the request because the device may
1171 * go away in the mean time.
1172 */
1173 rtnl_unlock();
1174 request_module("sch_%s", name);
1175 rtnl_lock();
1176 ops = qdisc_lookup_ops(kind);
1177 if (ops != NULL) {
1178 /* We will try again qdisc_lookup_ops,
1179 * so don't keep a reference.
1180 */
1181 module_put(ops->owner);
1182 err = -EAGAIN;
1183 goto err_out;
1184 }
1185 }
1186 }
1187#endif
1188
1189 err = -ENOENT;
1190 if (!ops) {
1191 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1192 goto err_out;
1193 }
1194
1195 sch = qdisc_alloc(dev_queue, ops, extack);
1196 if (IS_ERR(sch)) {
1197 err = PTR_ERR(sch);
1198 goto err_out2;
1199 }
1200
1201 sch->parent = parent;
1202
1203 if (handle == TC_H_INGRESS) {
1204 sch->flags |= TCQ_F_INGRESS;
1205 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1206 } else {
1207 if (handle == 0) {
1208 handle = qdisc_alloc_handle(dev);
1209 if (handle == 0) {
1210 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1211 err = -ENOSPC;
1212 goto err_out3;
1213 }
1214 }
1215 if (!netif_is_multiqueue(dev))
1216 sch->flags |= TCQ_F_ONETXQUEUE;
1217 }
1218
1219 sch->handle = handle;
1220
1221 /* This exist to keep backward compatible with a userspace
1222 * loophole, what allowed userspace to get IFF_NO_QUEUE
1223 * facility on older kernels by setting tx_queue_len=0 (prior
1224 * to qdisc init), and then forgot to reinit tx_queue_len
1225 * before again attaching a qdisc.
1226 */
1227 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1228 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1229 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1230 }
1231
1232 err = qdisc_block_indexes_set(sch, tca, extack);
1233 if (err)
1234 goto err_out3;
1235
1236 if (ops->init) {
1237 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1238 if (err != 0)
1239 goto err_out5;
1240 }
1241
1242 if (tca[TCA_STAB]) {
1243 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1244 if (IS_ERR(stab)) {
1245 err = PTR_ERR(stab);
1246 goto err_out4;
1247 }
1248 rcu_assign_pointer(sch->stab, stab);
1249 }
1250 if (tca[TCA_RATE]) {
1251 seqcount_t *running;
1252
1253 err = -EOPNOTSUPP;
1254 if (sch->flags & TCQ_F_MQROOT) {
1255 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1256 goto err_out4;
1257 }
1258
1259 if (sch->parent != TC_H_ROOT &&
1260 !(sch->flags & TCQ_F_INGRESS) &&
1261 (!p || !(p->flags & TCQ_F_MQROOT)))
1262 running = qdisc_root_sleeping_running(sch);
1263 else
1264 running = &sch->running;
1265
1266 err = gen_new_estimator(&sch->bstats,
1267 sch->cpu_bstats,
1268 &sch->rate_est,
1269 NULL,
1270 running,
1271 tca[TCA_RATE]);
1272 if (err) {
1273 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1274 goto err_out4;
1275 }
1276 }
1277
1278 qdisc_hash_add(sch, false);
1279
1280 return sch;
1281
1282err_out5:
1283 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1284 if (ops->destroy)
1285 ops->destroy(sch);
1286err_out3:
1287 dev_put(dev);
1288 qdisc_free(sch);
1289err_out2:
1290 module_put(ops->owner);
1291err_out:
1292 *errp = err;
1293 return NULL;
1294
1295err_out4:
1296 /*
1297 * Any broken qdiscs that would require a ops->reset() here?
1298 * The qdisc was never in action so it shouldn't be necessary.
1299 */
1300 qdisc_put_stab(rtnl_dereference(sch->stab));
1301 if (ops->destroy)
1302 ops->destroy(sch);
1303 goto err_out3;
1304}
1305
1306static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1307 struct netlink_ext_ack *extack)
1308{
1309 struct qdisc_size_table *ostab, *stab = NULL;
1310 int err = 0;
1311
1312 if (tca[TCA_OPTIONS]) {
1313 if (!sch->ops->change) {
1314 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1315 return -EINVAL;
1316 }
1317 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1318 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1319 return -EOPNOTSUPP;
1320 }
1321 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1322 if (err)
1323 return err;
1324 }
1325
1326 if (tca[TCA_STAB]) {
1327 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1328 if (IS_ERR(stab))
1329 return PTR_ERR(stab);
1330 }
1331
1332 ostab = rtnl_dereference(sch->stab);
1333 rcu_assign_pointer(sch->stab, stab);
1334 qdisc_put_stab(ostab);
1335
1336 if (tca[TCA_RATE]) {
1337 /* NB: ignores errors from replace_estimator
1338 because change can't be undone. */
1339 if (sch->flags & TCQ_F_MQROOT)
1340 goto out;
1341 gen_replace_estimator(&sch->bstats,
1342 sch->cpu_bstats,
1343 &sch->rate_est,
1344 NULL,
1345 qdisc_root_sleeping_running(sch),
1346 tca[TCA_RATE]);
1347 }
1348out:
1349 return 0;
1350}
1351
1352struct check_loop_arg {
1353 struct qdisc_walker w;
1354 struct Qdisc *p;
1355 int depth;
1356};
1357
1358static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1359 struct qdisc_walker *w);
1360
1361static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1362{
1363 struct check_loop_arg arg;
1364
1365 if (q->ops->cl_ops == NULL)
1366 return 0;
1367
1368 arg.w.stop = arg.w.skip = arg.w.count = 0;
1369 arg.w.fn = check_loop_fn;
1370 arg.depth = depth;
1371 arg.p = p;
1372 q->ops->cl_ops->walk(q, &arg.w);
1373 return arg.w.stop ? -ELOOP : 0;
1374}
1375
1376static int
1377check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1378{
1379 struct Qdisc *leaf;
1380 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1381 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1382
1383 leaf = cops->leaf(q, cl);
1384 if (leaf) {
1385 if (leaf == arg->p || arg->depth > 7)
1386 return -ELOOP;
1387 return check_loop(leaf, arg->p, arg->depth + 1);
1388 }
1389 return 0;
1390}
1391
1392const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1393 [TCA_KIND] = { .type = NLA_STRING },
1394 [TCA_RATE] = { .type = NLA_BINARY,
1395 .len = sizeof(struct tc_estimator) },
1396 [TCA_STAB] = { .type = NLA_NESTED },
1397 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1398 [TCA_CHAIN] = { .type = NLA_U32 },
1399 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1400 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1401};
1402
1403/*
1404 * Delete/get qdisc.
1405 */
1406
1407static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1408 struct netlink_ext_ack *extack)
1409{
1410 struct net *net = sock_net(skb->sk);
1411 struct tcmsg *tcm = nlmsg_data(n);
1412 struct nlattr *tca[TCA_MAX + 1];
1413 struct net_device *dev;
1414 u32 clid;
1415 struct Qdisc *q = NULL;
1416 struct Qdisc *p = NULL;
1417 int err;
1418
1419 if ((n->nlmsg_type != RTM_GETQDISC) &&
1420 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1421 return -EPERM;
1422
1423 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1424 rtm_tca_policy, extack);
1425 if (err < 0)
1426 return err;
1427
1428 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1429 if (!dev)
1430 return -ENODEV;
1431
1432 clid = tcm->tcm_parent;
1433 if (clid) {
1434 if (clid != TC_H_ROOT) {
1435 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1436 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1437 if (!p) {
1438 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1439 return -ENOENT;
1440 }
1441 q = qdisc_leaf(p, clid);
1442 } else if (dev_ingress_queue(dev)) {
1443 q = dev_ingress_queue(dev)->qdisc_sleeping;
1444 }
1445 } else {
1446 q = dev->qdisc;
1447 }
1448 if (!q) {
1449 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1450 return -ENOENT;
1451 }
1452
1453 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1454 NL_SET_ERR_MSG(extack, "Invalid handle");
1455 return -EINVAL;
1456 }
1457 } else {
1458 q = qdisc_lookup(dev, tcm->tcm_handle);
1459 if (!q) {
1460 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1461 return -ENOENT;
1462 }
1463 }
1464
1465 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1466 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1467 return -EINVAL;
1468 }
1469
1470 if (n->nlmsg_type == RTM_DELQDISC) {
1471 if (!clid) {
1472 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1473 return -EINVAL;
1474 }
1475 if (q->handle == 0) {
1476 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1477 return -ENOENT;
1478 }
1479 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1480 if (err != 0)
1481 return err;
1482 } else {
1483 qdisc_notify(net, skb, n, clid, NULL, q);
1484 }
1485 return 0;
1486}
1487
1488/*
1489 * Create/change qdisc.
1490 */
1491
1492static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1493 struct netlink_ext_ack *extack)
1494{
1495 struct net *net = sock_net(skb->sk);
1496 struct tcmsg *tcm;
1497 struct nlattr *tca[TCA_MAX + 1];
1498 struct net_device *dev;
1499 u32 clid;
1500 struct Qdisc *q, *p;
1501 int err;
1502
1503 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1504 return -EPERM;
1505
1506replay:
1507 /* Reinit, just in case something touches this. */
1508 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1509 rtm_tca_policy, extack);
1510 if (err < 0)
1511 return err;
1512
1513 tcm = nlmsg_data(n);
1514 clid = tcm->tcm_parent;
1515 q = p = NULL;
1516
1517 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1518 if (!dev)
1519 return -ENODEV;
1520
1521
1522 if (clid) {
1523 if (clid != TC_H_ROOT) {
1524 if (clid != TC_H_INGRESS) {
1525 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1526 if (!p) {
1527 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1528 return -ENOENT;
1529 }
1530 q = qdisc_leaf(p, clid);
1531 } else if (dev_ingress_queue_create(dev)) {
1532 q = dev_ingress_queue(dev)->qdisc_sleeping;
1533 }
1534 } else {
1535 q = dev->qdisc;
1536 }
1537
1538 /* It may be default qdisc, ignore it */
1539 if (q && q->handle == 0)
1540 q = NULL;
1541
1542 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1543 if (tcm->tcm_handle) {
1544 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1545 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1546 return -EEXIST;
1547 }
1548 if (TC_H_MIN(tcm->tcm_handle)) {
1549 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1550 return -EINVAL;
1551 }
1552 q = qdisc_lookup(dev, tcm->tcm_handle);
1553 if (!q)
1554 goto create_n_graft;
1555 if (n->nlmsg_flags & NLM_F_EXCL) {
1556 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1557 return -EEXIST;
1558 }
1559 if (tca[TCA_KIND] &&
1560 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1561 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1562 return -EINVAL;
1563 }
1564 if (q == p ||
1565 (p && check_loop(q, p, 0))) {
1566 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1567 return -ELOOP;
1568 }
1569 qdisc_refcount_inc(q);
1570 goto graft;
1571 } else {
1572 if (!q)
1573 goto create_n_graft;
1574
1575 /* This magic test requires explanation.
1576 *
1577 * We know, that some child q is already
1578 * attached to this parent and have choice:
1579 * either to change it or to create/graft new one.
1580 *
1581 * 1. We are allowed to create/graft only
1582 * if CREATE and REPLACE flags are set.
1583 *
1584 * 2. If EXCL is set, requestor wanted to say,
1585 * that qdisc tcm_handle is not expected
1586 * to exist, so that we choose create/graft too.
1587 *
1588 * 3. The last case is when no flags are set.
1589 * Alas, it is sort of hole in API, we
1590 * cannot decide what to do unambiguously.
1591 * For now we select create/graft, if
1592 * user gave KIND, which does not match existing.
1593 */
1594 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1595 (n->nlmsg_flags & NLM_F_REPLACE) &&
1596 ((n->nlmsg_flags & NLM_F_EXCL) ||
1597 (tca[TCA_KIND] &&
1598 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1599 goto create_n_graft;
1600 }
1601 }
1602 } else {
1603 if (!tcm->tcm_handle) {
1604 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1605 return -EINVAL;
1606 }
1607 q = qdisc_lookup(dev, tcm->tcm_handle);
1608 }
1609
1610 /* Change qdisc parameters */
1611 if (!q) {
1612 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1613 return -ENOENT;
1614 }
1615 if (n->nlmsg_flags & NLM_F_EXCL) {
1616 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1617 return -EEXIST;
1618 }
1619 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1620 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1621 return -EINVAL;
1622 }
1623 err = qdisc_change(q, tca, extack);
1624 if (err == 0)
1625 qdisc_notify(net, skb, n, clid, NULL, q);
1626 return err;
1627
1628create_n_graft:
1629 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1630 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1631 return -ENOENT;
1632 }
1633 if (clid == TC_H_INGRESS) {
1634 if (dev_ingress_queue(dev)) {
1635 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1636 tcm->tcm_parent, tcm->tcm_parent,
1637 tca, &err, extack);
1638 } else {
1639 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1640 err = -ENOENT;
1641 }
1642 } else {
1643 struct netdev_queue *dev_queue;
1644
1645 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1646 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1647 else if (p)
1648 dev_queue = p->dev_queue;
1649 else
1650 dev_queue = netdev_get_tx_queue(dev, 0);
1651
1652 q = qdisc_create(dev, dev_queue, p,
1653 tcm->tcm_parent, tcm->tcm_handle,
1654 tca, &err, extack);
1655 }
1656 if (q == NULL) {
1657 if (err == -EAGAIN)
1658 goto replay;
1659 return err;
1660 }
1661
1662graft:
1663 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1664 if (err) {
1665 if (q)
1666 qdisc_put(q);
1667 return err;
1668 }
1669
1670 return 0;
1671}
1672
1673static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1674 struct netlink_callback *cb,
1675 int *q_idx_p, int s_q_idx, bool recur,
1676 bool dump_invisible)
1677{
1678 int ret = 0, q_idx = *q_idx_p;
1679 struct Qdisc *q;
1680 int b;
1681
1682 if (!root)
1683 return 0;
1684
1685 q = root;
1686 if (q_idx < s_q_idx) {
1687 q_idx++;
1688 } else {
1689 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1690 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1691 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1692 RTM_NEWQDISC) <= 0)
1693 goto done;
1694 q_idx++;
1695 }
1696
1697 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1698 * itself has already been dumped.
1699 *
1700 * If we've already dumped the top-level (ingress) qdisc above and the global
1701 * qdisc hashtable, we don't want to hit it again
1702 */
1703 if (!qdisc_dev(root) || !recur)
1704 goto out;
1705
1706 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1707 if (q_idx < s_q_idx) {
1708 q_idx++;
1709 continue;
1710 }
1711 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1712 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1713 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1714 RTM_NEWQDISC) <= 0)
1715 goto done;
1716 q_idx++;
1717 }
1718
1719out:
1720 *q_idx_p = q_idx;
1721 return ret;
1722done:
1723 ret = -1;
1724 goto out;
1725}
1726
1727static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1728{
1729 struct net *net = sock_net(skb->sk);
1730 int idx, q_idx;
1731 int s_idx, s_q_idx;
1732 struct net_device *dev;
1733 const struct nlmsghdr *nlh = cb->nlh;
1734 struct nlattr *tca[TCA_MAX + 1];
1735 int err;
1736
1737 s_idx = cb->args[0];
1738 s_q_idx = q_idx = cb->args[1];
1739
1740 idx = 0;
1741 ASSERT_RTNL();
1742
1743 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1744 rtm_tca_policy, cb->extack);
1745 if (err < 0)
1746 return err;
1747
1748 for_each_netdev(net, dev) {
1749 struct netdev_queue *dev_queue;
1750
1751 if (idx < s_idx)
1752 goto cont;
1753 if (idx > s_idx)
1754 s_q_idx = 0;
1755 q_idx = 0;
1756
1757 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1758 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1759 goto done;
1760
1761 dev_queue = dev_ingress_queue(dev);
1762 if (dev_queue &&
1763 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1764 &q_idx, s_q_idx, false,
1765 tca[TCA_DUMP_INVISIBLE]) < 0)
1766 goto done;
1767
1768cont:
1769 idx++;
1770 }
1771
1772done:
1773 cb->args[0] = idx;
1774 cb->args[1] = q_idx;
1775
1776 return skb->len;
1777}
1778
1779
1780
1781/************************************************
1782 * Traffic classes manipulation. *
1783 ************************************************/
1784
1785static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1786 unsigned long cl,
1787 u32 portid, u32 seq, u16 flags, int event)
1788{
1789 struct tcmsg *tcm;
1790 struct nlmsghdr *nlh;
1791 unsigned char *b = skb_tail_pointer(skb);
1792 struct gnet_dump d;
1793 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1794
1795 cond_resched();
1796 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1797 if (!nlh)
1798 goto out_nlmsg_trim;
1799 tcm = nlmsg_data(nlh);
1800 tcm->tcm_family = AF_UNSPEC;
1801 tcm->tcm__pad1 = 0;
1802 tcm->tcm__pad2 = 0;
1803 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1804 tcm->tcm_parent = q->handle;
1805 tcm->tcm_handle = q->handle;
1806 tcm->tcm_info = 0;
1807 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1808 goto nla_put_failure;
1809 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1810 goto nla_put_failure;
1811
1812 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1813 NULL, &d, TCA_PAD) < 0)
1814 goto nla_put_failure;
1815
1816 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1817 goto nla_put_failure;
1818
1819 if (gnet_stats_finish_copy(&d) < 0)
1820 goto nla_put_failure;
1821
1822 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1823 return skb->len;
1824
1825out_nlmsg_trim:
1826nla_put_failure:
1827 nlmsg_trim(skb, b);
1828 return -1;
1829}
1830
1831static int tclass_notify(struct net *net, struct sk_buff *oskb,
1832 struct nlmsghdr *n, struct Qdisc *q,
1833 unsigned long cl, int event)
1834{
1835 struct sk_buff *skb;
1836 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1837 int err = 0;
1838
1839 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1840 if (!skb)
1841 return -ENOBUFS;
1842
1843 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1844 kfree_skb(skb);
1845 return -EINVAL;
1846 }
1847
1848 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1849 n->nlmsg_flags & NLM_F_ECHO);
1850 if (err > 0)
1851 err = 0;
1852 return err;
1853}
1854
1855static int tclass_del_notify(struct net *net,
1856 const struct Qdisc_class_ops *cops,
1857 struct sk_buff *oskb, struct nlmsghdr *n,
1858 struct Qdisc *q, unsigned long cl)
1859{
1860 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1861 struct sk_buff *skb;
1862 int err = 0;
1863
1864 if (!cops->delete)
1865 return -EOPNOTSUPP;
1866
1867 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1868 if (!skb)
1869 return -ENOBUFS;
1870
1871 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1872 RTM_DELTCLASS) < 0) {
1873 kfree_skb(skb);
1874 return -EINVAL;
1875 }
1876
1877 err = cops->delete(q, cl);
1878 if (err) {
1879 kfree_skb(skb);
1880 return err;
1881 }
1882
1883 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1884 n->nlmsg_flags & NLM_F_ECHO);
1885 if (err > 0)
1886 err = 0;
1887 return err;
1888}
1889
1890#ifdef CONFIG_NET_CLS
1891
1892struct tcf_bind_args {
1893 struct tcf_walker w;
1894 u32 classid;
1895 unsigned long cl;
1896};
1897
1898static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1899{
1900 struct tcf_bind_args *a = (void *)arg;
1901
1902 if (tp->ops->bind_class) {
1903 struct Qdisc *q = tcf_block_q(tp->chain->block);
1904
1905 sch_tree_lock(q);
1906 tp->ops->bind_class(n, a->classid, a->cl);
1907 sch_tree_unlock(q);
1908 }
1909 return 0;
1910}
1911
1912static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1913 unsigned long new_cl)
1914{
1915 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1916 struct tcf_block *block;
1917 struct tcf_chain *chain;
1918 unsigned long cl;
1919
1920 cl = cops->find(q, portid);
1921 if (!cl)
1922 return;
1923 if (!cops->tcf_block)
1924 return;
1925 block = cops->tcf_block(q, cl, NULL);
1926 if (!block)
1927 return;
1928 for (chain = tcf_get_next_chain(block, NULL);
1929 chain;
1930 chain = tcf_get_next_chain(block, chain)) {
1931 struct tcf_proto *tp;
1932
1933 for (tp = tcf_get_next_proto(chain, NULL, true);
1934 tp; tp = tcf_get_next_proto(chain, tp, true)) {
1935 struct tcf_bind_args arg = {};
1936
1937 arg.w.fn = tcf_node_bind;
1938 arg.classid = clid;
1939 arg.cl = new_cl;
1940 tp->ops->walk(tp, &arg.w, true);
1941 }
1942 }
1943}
1944
1945#else
1946
1947static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1948 unsigned long new_cl)
1949{
1950}
1951
1952#endif
1953
1954static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1955 struct netlink_ext_ack *extack)
1956{
1957 struct net *net = sock_net(skb->sk);
1958 struct tcmsg *tcm = nlmsg_data(n);
1959 struct nlattr *tca[TCA_MAX + 1];
1960 struct net_device *dev;
1961 struct Qdisc *q = NULL;
1962 const struct Qdisc_class_ops *cops;
1963 unsigned long cl = 0;
1964 unsigned long new_cl;
1965 u32 portid;
1966 u32 clid;
1967 u32 qid;
1968 int err;
1969
1970 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1971 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1972 return -EPERM;
1973
1974 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1975 rtm_tca_policy, extack);
1976 if (err < 0)
1977 return err;
1978
1979 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1980 if (!dev)
1981 return -ENODEV;
1982
1983 /*
1984 parent == TC_H_UNSPEC - unspecified parent.
1985 parent == TC_H_ROOT - class is root, which has no parent.
1986 parent == X:0 - parent is root class.
1987 parent == X:Y - parent is a node in hierarchy.
1988 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1989
1990 handle == 0:0 - generate handle from kernel pool.
1991 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1992 handle == X:Y - clear.
1993 handle == X:0 - root class.
1994 */
1995
1996 /* Step 1. Determine qdisc handle X:0 */
1997
1998 portid = tcm->tcm_parent;
1999 clid = tcm->tcm_handle;
2000 qid = TC_H_MAJ(clid);
2001
2002 if (portid != TC_H_ROOT) {
2003 u32 qid1 = TC_H_MAJ(portid);
2004
2005 if (qid && qid1) {
2006 /* If both majors are known, they must be identical. */
2007 if (qid != qid1)
2008 return -EINVAL;
2009 } else if (qid1) {
2010 qid = qid1;
2011 } else if (qid == 0)
2012 qid = dev->qdisc->handle;
2013
2014 /* Now qid is genuine qdisc handle consistent
2015 * both with parent and child.
2016 *
2017 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2018 */
2019 if (portid)
2020 portid = TC_H_MAKE(qid, portid);
2021 } else {
2022 if (qid == 0)
2023 qid = dev->qdisc->handle;
2024 }
2025
2026 /* OK. Locate qdisc */
2027 q = qdisc_lookup(dev, qid);
2028 if (!q)
2029 return -ENOENT;
2030
2031 /* An check that it supports classes */
2032 cops = q->ops->cl_ops;
2033 if (cops == NULL)
2034 return -EINVAL;
2035
2036 /* Now try to get class */
2037 if (clid == 0) {
2038 if (portid == TC_H_ROOT)
2039 clid = qid;
2040 } else
2041 clid = TC_H_MAKE(qid, clid);
2042
2043 if (clid)
2044 cl = cops->find(q, clid);
2045
2046 if (cl == 0) {
2047 err = -ENOENT;
2048 if (n->nlmsg_type != RTM_NEWTCLASS ||
2049 !(n->nlmsg_flags & NLM_F_CREATE))
2050 goto out;
2051 } else {
2052 switch (n->nlmsg_type) {
2053 case RTM_NEWTCLASS:
2054 err = -EEXIST;
2055 if (n->nlmsg_flags & NLM_F_EXCL)
2056 goto out;
2057 break;
2058 case RTM_DELTCLASS:
2059 err = tclass_del_notify(net, cops, skb, n, q, cl);
2060 /* Unbind the class with flilters with 0 */
2061 tc_bind_tclass(q, portid, clid, 0);
2062 goto out;
2063 case RTM_GETTCLASS:
2064 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
2065 goto out;
2066 default:
2067 err = -EINVAL;
2068 goto out;
2069 }
2070 }
2071
2072 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2073 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2074 return -EOPNOTSUPP;
2075 }
2076
2077 new_cl = cl;
2078 err = -EOPNOTSUPP;
2079 if (cops->change)
2080 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2081 if (err == 0) {
2082 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2083 /* We just create a new class, need to do reverse binding. */
2084 if (cl != new_cl)
2085 tc_bind_tclass(q, portid, clid, new_cl);
2086 }
2087out:
2088 return err;
2089}
2090
2091struct qdisc_dump_args {
2092 struct qdisc_walker w;
2093 struct sk_buff *skb;
2094 struct netlink_callback *cb;
2095};
2096
2097static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2098 struct qdisc_walker *arg)
2099{
2100 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2101
2102 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2103 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2104 RTM_NEWTCLASS);
2105}
2106
2107static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2108 struct tcmsg *tcm, struct netlink_callback *cb,
2109 int *t_p, int s_t)
2110{
2111 struct qdisc_dump_args arg;
2112
2113 if (tc_qdisc_dump_ignore(q, false) ||
2114 *t_p < s_t || !q->ops->cl_ops ||
2115 (tcm->tcm_parent &&
2116 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2117 (*t_p)++;
2118 return 0;
2119 }
2120 if (*t_p > s_t)
2121 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2122 arg.w.fn = qdisc_class_dump;
2123 arg.skb = skb;
2124 arg.cb = cb;
2125 arg.w.stop = 0;
2126 arg.w.skip = cb->args[1];
2127 arg.w.count = 0;
2128 q->ops->cl_ops->walk(q, &arg.w);
2129 cb->args[1] = arg.w.count;
2130 if (arg.w.stop)
2131 return -1;
2132 (*t_p)++;
2133 return 0;
2134}
2135
2136static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2137 struct tcmsg *tcm, struct netlink_callback *cb,
2138 int *t_p, int s_t)
2139{
2140 struct Qdisc *q;
2141 int b;
2142
2143 if (!root)
2144 return 0;
2145
2146 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2147 return -1;
2148
2149 if (!qdisc_dev(root))
2150 return 0;
2151
2152 if (tcm->tcm_parent) {
2153 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2154 if (q && q != root &&
2155 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2156 return -1;
2157 return 0;
2158 }
2159 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2160 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2161 return -1;
2162 }
2163
2164 return 0;
2165}
2166
2167static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2168{
2169 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2170 struct net *net = sock_net(skb->sk);
2171 struct netdev_queue *dev_queue;
2172 struct net_device *dev;
2173 int t, s_t;
2174
2175 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2176 return 0;
2177 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2178 if (!dev)
2179 return 0;
2180
2181 s_t = cb->args[0];
2182 t = 0;
2183
2184 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
2185 goto done;
2186
2187 dev_queue = dev_ingress_queue(dev);
2188 if (dev_queue &&
2189 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2190 &t, s_t) < 0)
2191 goto done;
2192
2193done:
2194 cb->args[0] = t;
2195
2196 dev_put(dev);
2197 return skb->len;
2198}
2199
2200#ifdef CONFIG_PROC_FS
2201static int psched_show(struct seq_file *seq, void *v)
2202{
2203 seq_printf(seq, "%08x %08x %08x %08x\n",
2204 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2205 1000000,
2206 (u32)NSEC_PER_SEC / hrtimer_resolution);
2207
2208 return 0;
2209}
2210
2211static int __net_init psched_net_init(struct net *net)
2212{
2213 struct proc_dir_entry *e;
2214
2215 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2216 if (e == NULL)
2217 return -ENOMEM;
2218
2219 return 0;
2220}
2221
2222static void __net_exit psched_net_exit(struct net *net)
2223{
2224 remove_proc_entry("psched", net->proc_net);
2225}
2226#else
2227static int __net_init psched_net_init(struct net *net)
2228{
2229 return 0;
2230}
2231
2232static void __net_exit psched_net_exit(struct net *net)
2233{
2234}
2235#endif
2236
2237static struct pernet_operations psched_net_ops = {
2238 .init = psched_net_init,
2239 .exit = psched_net_exit,
2240};
2241
2242static int __init pktsched_init(void)
2243{
2244 int err;
2245
2246 err = register_pernet_subsys(&psched_net_ops);
2247 if (err) {
2248 pr_err("pktsched_init: "
2249 "cannot initialize per netns operations\n");
2250 return err;
2251 }
2252
2253 register_qdisc(&pfifo_fast_ops);
2254 register_qdisc(&pfifo_qdisc_ops);
2255 register_qdisc(&bfifo_qdisc_ops);
2256 register_qdisc(&pfifo_head_drop_qdisc_ops);
2257 register_qdisc(&mq_qdisc_ops);
2258 register_qdisc(&noqueue_qdisc_ops);
2259
2260 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2261 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2262 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2263 0);
2264 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2265 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2266 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2267 0);
2268
2269 return 0;
2270}
2271
2272subsys_initcall(pktsched_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_api.c Packet scheduler API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Fixes:
8 *
9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <linux/init.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/kmod.h>
24#include <linux/list.h>
25#include <linux/hrtimer.h>
26#include <linux/slab.h>
27#include <linux/hashtable.h>
28
29#include <net/net_namespace.h>
30#include <net/sock.h>
31#include <net/netlink.h>
32#include <net/pkt_sched.h>
33#include <net/pkt_cls.h>
34#include <net/tc_wrapper.h>
35
36#include <trace/events/qdisc.h>
37
38/*
39
40 Short review.
41 -------------
42
43 This file consists of two interrelated parts:
44
45 1. queueing disciplines manager frontend.
46 2. traffic classes manager frontend.
47
48 Generally, queueing discipline ("qdisc") is a black box,
49 which is able to enqueue packets and to dequeue them (when
50 device is ready to send something) in order and at times
51 determined by algorithm hidden in it.
52
53 qdisc's are divided to two categories:
54 - "queues", which have no internal structure visible from outside.
55 - "schedulers", which split all the packets to "traffic classes",
56 using "packet classifiers" (look at cls_api.c)
57
58 In turn, classes may have child qdiscs (as rule, queues)
59 attached to them etc. etc. etc.
60
61 The goal of the routines in this file is to translate
62 information supplied by user in the form of handles
63 to more intelligible for kernel form, to make some sanity
64 checks and part of work, which is common to all qdiscs
65 and to provide rtnetlink notifications.
66
67 All real intelligent work is done inside qdisc modules.
68
69
70
71 Every discipline has two major routines: enqueue and dequeue.
72
73 ---dequeue
74
75 dequeue usually returns a skb to send. It is allowed to return NULL,
76 but it does not mean that queue is empty, it just means that
77 discipline does not want to send anything this time.
78 Queue is really empty if q->q.qlen == 0.
79 For complicated disciplines with multiple queues q->q is not
80 real packet queue, but however q->q.qlen must be valid.
81
82 ---enqueue
83
84 enqueue returns 0, if packet was enqueued successfully.
85 If packet (this one or another one) was dropped, it returns
86 not zero error code.
87 NET_XMIT_DROP - this packet dropped
88 Expected action: do not backoff, but wait until queue will clear.
89 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
90 Expected action: backoff or ignore
91
92 Auxiliary routines:
93
94 ---peek
95
96 like dequeue but without removing a packet from the queue
97
98 ---reset
99
100 returns qdisc to initial state: purge all buffers, clear all
101 timers, counters (except for statistics) etc.
102
103 ---init
104
105 initializes newly created qdisc.
106
107 ---destroy
108
109 destroys resources allocated by init and during lifetime of qdisc.
110
111 ---change
112
113 changes qdisc parameters.
114 */
115
116/* Protects list of registered TC modules. It is pure SMP lock. */
117static DEFINE_RWLOCK(qdisc_mod_lock);
118
119
120/************************************************
121 * Queueing disciplines manipulation. *
122 ************************************************/
123
124
125/* The list of all installed queueing disciplines. */
126
127static struct Qdisc_ops *qdisc_base;
128
129/* Register/unregister queueing discipline */
130
131int register_qdisc(struct Qdisc_ops *qops)
132{
133 struct Qdisc_ops *q, **qp;
134 int rc = -EEXIST;
135
136 write_lock(&qdisc_mod_lock);
137 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 if (!strcmp(qops->id, q->id))
139 goto out;
140
141 if (qops->enqueue == NULL)
142 qops->enqueue = noop_qdisc_ops.enqueue;
143 if (qops->peek == NULL) {
144 if (qops->dequeue == NULL)
145 qops->peek = noop_qdisc_ops.peek;
146 else
147 goto out_einval;
148 }
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
151
152 if (qops->cl_ops) {
153 const struct Qdisc_class_ops *cops = qops->cl_ops;
154
155 if (!(cops->find && cops->walk && cops->leaf))
156 goto out_einval;
157
158 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
159 goto out_einval;
160 }
161
162 qops->next = NULL;
163 *qp = qops;
164 rc = 0;
165out:
166 write_unlock(&qdisc_mod_lock);
167 return rc;
168
169out_einval:
170 rc = -EINVAL;
171 goto out;
172}
173EXPORT_SYMBOL(register_qdisc);
174
175void unregister_qdisc(struct Qdisc_ops *qops)
176{
177 struct Qdisc_ops *q, **qp;
178 int err = -ENOENT;
179
180 write_lock(&qdisc_mod_lock);
181 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
182 if (q == qops)
183 break;
184 if (q) {
185 *qp = q->next;
186 q->next = NULL;
187 err = 0;
188 }
189 write_unlock(&qdisc_mod_lock);
190
191 WARN(err, "unregister qdisc(%s) failed\n", qops->id);
192}
193EXPORT_SYMBOL(unregister_qdisc);
194
195/* Get default qdisc if not otherwise specified */
196void qdisc_get_default(char *name, size_t len)
197{
198 read_lock(&qdisc_mod_lock);
199 strscpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
201}
202
203static struct Qdisc_ops *qdisc_lookup_default(const char *name)
204{
205 struct Qdisc_ops *q = NULL;
206
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
210 q = NULL;
211 break;
212 }
213 }
214
215 return q;
216}
217
218/* Set new default qdisc to use */
219int qdisc_set_default(const char *name)
220{
221 const struct Qdisc_ops *ops;
222
223 if (!capable(CAP_NET_ADMIN))
224 return -EPERM;
225
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
228 if (!ops) {
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module(NET_SCH_ALIAS_PREFIX "%s", name);
232 write_lock(&qdisc_mod_lock);
233
234 ops = qdisc_lookup_default(name);
235 }
236
237 if (ops) {
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
241 }
242 write_unlock(&qdisc_mod_lock);
243
244 return ops ? 0 : -ENOENT;
245}
246
247#ifdef CONFIG_NET_SCH_DEFAULT
248/* Set default value from kernel config */
249static int __init sch_default_qdisc(void)
250{
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
252}
253late_initcall(sch_default_qdisc);
254#endif
255
256/* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
259 */
260
261static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
262{
263 struct Qdisc *q;
264
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
267
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
270 return root;
271
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 lockdep_rtnl_is_held()) {
274 if (q->handle == handle)
275 return q;
276 }
277 return NULL;
278}
279
280void qdisc_hash_add(struct Qdisc *q, bool invisible)
281{
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 ASSERT_RTNL();
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 if (invisible)
286 q->flags |= TCQ_F_INVISIBLE;
287 }
288}
289EXPORT_SYMBOL(qdisc_hash_add);
290
291void qdisc_hash_del(struct Qdisc *q)
292{
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 ASSERT_RTNL();
295 hash_del_rcu(&q->hash);
296 }
297}
298EXPORT_SYMBOL(qdisc_hash_del);
299
300struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301{
302 struct Qdisc *q;
303
304 if (!handle)
305 return NULL;
306 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
307 if (q)
308 goto out;
309
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
313 handle);
314out:
315 return q;
316}
317
318struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
319{
320 struct netdev_queue *nq;
321 struct Qdisc *q;
322
323 if (!handle)
324 return NULL;
325 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
326 if (q)
327 goto out;
328
329 nq = dev_ingress_queue_rcu(dev);
330 if (nq)
331 q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
332 handle);
333out:
334 return q;
335}
336
337static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
338{
339 unsigned long cl;
340 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
341
342 if (cops == NULL)
343 return NULL;
344 cl = cops->find(p, classid);
345
346 if (cl == 0)
347 return NULL;
348 return cops->leaf(p, cl);
349}
350
351/* Find queueing discipline by name */
352
353static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
354{
355 struct Qdisc_ops *q = NULL;
356
357 if (kind) {
358 read_lock(&qdisc_mod_lock);
359 for (q = qdisc_base; q; q = q->next) {
360 if (nla_strcmp(kind, q->id) == 0) {
361 if (!try_module_get(q->owner))
362 q = NULL;
363 break;
364 }
365 }
366 read_unlock(&qdisc_mod_lock);
367 }
368 return q;
369}
370
371/* The linklayer setting were not transferred from iproute2, in older
372 * versions, and the rate tables lookup systems have been dropped in
373 * the kernel. To keep backward compatible with older iproute2 tc
374 * utils, we detect the linklayer setting by detecting if the rate
375 * table were modified.
376 *
377 * For linklayer ATM table entries, the rate table will be aligned to
378 * 48 bytes, thus some table entries will contain the same value. The
379 * mpu (min packet unit) is also encoded into the old rate table, thus
380 * starting from the mpu, we find low and high table entries for
381 * mapping this cell. If these entries contain the same value, when
382 * the rate tables have been modified for linklayer ATM.
383 *
384 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
385 * and then roundup to the next cell, calc the table entry one below,
386 * and compare.
387 */
388static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
389{
390 int low = roundup(r->mpu, 48);
391 int high = roundup(low+1, 48);
392 int cell_low = low >> r->cell_log;
393 int cell_high = (high >> r->cell_log) - 1;
394
395 /* rtab is too inaccurate at rates > 100Mbit/s */
396 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
397 pr_debug("TC linklayer: Giving up ATM detection\n");
398 return TC_LINKLAYER_ETHERNET;
399 }
400
401 if ((cell_high > cell_low) && (cell_high < 256)
402 && (rtab[cell_low] == rtab[cell_high])) {
403 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
404 cell_low, cell_high, rtab[cell_high]);
405 return TC_LINKLAYER_ATM;
406 }
407 return TC_LINKLAYER_ETHERNET;
408}
409
410static struct qdisc_rate_table *qdisc_rtab_list;
411
412struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
413 struct nlattr *tab,
414 struct netlink_ext_ack *extack)
415{
416 struct qdisc_rate_table *rtab;
417
418 if (tab == NULL || r->rate == 0 ||
419 r->cell_log == 0 || r->cell_log >= 32 ||
420 nla_len(tab) != TC_RTAB_SIZE) {
421 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
422 return NULL;
423 }
424
425 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
426 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
427 !memcmp(&rtab->data, nla_data(tab), 1024)) {
428 rtab->refcnt++;
429 return rtab;
430 }
431 }
432
433 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
434 if (rtab) {
435 rtab->rate = *r;
436 rtab->refcnt = 1;
437 memcpy(rtab->data, nla_data(tab), 1024);
438 if (r->linklayer == TC_LINKLAYER_UNAWARE)
439 r->linklayer = __detect_linklayer(r, rtab->data);
440 rtab->next = qdisc_rtab_list;
441 qdisc_rtab_list = rtab;
442 } else {
443 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
444 }
445 return rtab;
446}
447EXPORT_SYMBOL(qdisc_get_rtab);
448
449void qdisc_put_rtab(struct qdisc_rate_table *tab)
450{
451 struct qdisc_rate_table *rtab, **rtabp;
452
453 if (!tab || --tab->refcnt)
454 return;
455
456 for (rtabp = &qdisc_rtab_list;
457 (rtab = *rtabp) != NULL;
458 rtabp = &rtab->next) {
459 if (rtab == tab) {
460 *rtabp = rtab->next;
461 kfree(rtab);
462 return;
463 }
464 }
465}
466EXPORT_SYMBOL(qdisc_put_rtab);
467
468static LIST_HEAD(qdisc_stab_list);
469
470static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
471 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
472 [TCA_STAB_DATA] = { .type = NLA_BINARY },
473};
474
475static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
476 struct netlink_ext_ack *extack)
477{
478 struct nlattr *tb[TCA_STAB_MAX + 1];
479 struct qdisc_size_table *stab;
480 struct tc_sizespec *s;
481 unsigned int tsize = 0;
482 u16 *tab = NULL;
483 int err;
484
485 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
486 extack);
487 if (err < 0)
488 return ERR_PTR(err);
489 if (!tb[TCA_STAB_BASE]) {
490 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
491 return ERR_PTR(-EINVAL);
492 }
493
494 s = nla_data(tb[TCA_STAB_BASE]);
495
496 if (s->tsize > 0) {
497 if (!tb[TCA_STAB_DATA]) {
498 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
499 return ERR_PTR(-EINVAL);
500 }
501 tab = nla_data(tb[TCA_STAB_DATA]);
502 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
503 }
504
505 if (tsize != s->tsize || (!tab && tsize > 0)) {
506 NL_SET_ERR_MSG(extack, "Invalid size of size table");
507 return ERR_PTR(-EINVAL);
508 }
509
510 list_for_each_entry(stab, &qdisc_stab_list, list) {
511 if (memcmp(&stab->szopts, s, sizeof(*s)))
512 continue;
513 if (tsize > 0 &&
514 memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
515 continue;
516 stab->refcnt++;
517 return stab;
518 }
519
520 if (s->size_log > STAB_SIZE_LOG_MAX ||
521 s->cell_log > STAB_SIZE_LOG_MAX) {
522 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
523 return ERR_PTR(-EINVAL);
524 }
525
526 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
527 if (!stab)
528 return ERR_PTR(-ENOMEM);
529
530 stab->refcnt = 1;
531 stab->szopts = *s;
532 if (tsize > 0)
533 memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
534
535 list_add_tail(&stab->list, &qdisc_stab_list);
536
537 return stab;
538}
539
540void qdisc_put_stab(struct qdisc_size_table *tab)
541{
542 if (!tab)
543 return;
544
545 if (--tab->refcnt == 0) {
546 list_del(&tab->list);
547 kfree_rcu(tab, rcu);
548 }
549}
550EXPORT_SYMBOL(qdisc_put_stab);
551
552static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
553{
554 struct nlattr *nest;
555
556 nest = nla_nest_start_noflag(skb, TCA_STAB);
557 if (nest == NULL)
558 goto nla_put_failure;
559 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
560 goto nla_put_failure;
561 nla_nest_end(skb, nest);
562
563 return skb->len;
564
565nla_put_failure:
566 return -1;
567}
568
569void __qdisc_calculate_pkt_len(struct sk_buff *skb,
570 const struct qdisc_size_table *stab)
571{
572 int pkt_len, slot;
573
574 pkt_len = skb->len + stab->szopts.overhead;
575 if (unlikely(!stab->szopts.tsize))
576 goto out;
577
578 slot = pkt_len + stab->szopts.cell_align;
579 if (unlikely(slot < 0))
580 slot = 0;
581
582 slot >>= stab->szopts.cell_log;
583 if (likely(slot < stab->szopts.tsize))
584 pkt_len = stab->data[slot];
585 else
586 pkt_len = stab->data[stab->szopts.tsize - 1] *
587 (slot / stab->szopts.tsize) +
588 stab->data[slot % stab->szopts.tsize];
589
590 pkt_len <<= stab->szopts.size_log;
591out:
592 if (unlikely(pkt_len < 1))
593 pkt_len = 1;
594 qdisc_skb_cb(skb)->pkt_len = pkt_len;
595}
596
597void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
598{
599 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
600 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
601 txt, qdisc->ops->id, qdisc->handle >> 16);
602 qdisc->flags |= TCQ_F_WARN_NONWC;
603 }
604}
605EXPORT_SYMBOL(qdisc_warn_nonwc);
606
607static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
608{
609 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
610 timer);
611
612 rcu_read_lock();
613 __netif_schedule(qdisc_root(wd->qdisc));
614 rcu_read_unlock();
615
616 return HRTIMER_NORESTART;
617}
618
619void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
620 clockid_t clockid)
621{
622 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
623 wd->timer.function = qdisc_watchdog;
624 wd->qdisc = qdisc;
625}
626EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
627
628void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
629{
630 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
631}
632EXPORT_SYMBOL(qdisc_watchdog_init);
633
634void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
635 u64 delta_ns)
636{
637 bool deactivated;
638
639 rcu_read_lock();
640 deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
641 &qdisc_root_sleeping(wd->qdisc)->state);
642 rcu_read_unlock();
643 if (deactivated)
644 return;
645
646 if (hrtimer_is_queued(&wd->timer)) {
647 u64 softexpires;
648
649 softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer));
650 /* If timer is already set in [expires, expires + delta_ns],
651 * do not reprogram it.
652 */
653 if (softexpires - expires <= delta_ns)
654 return;
655 }
656
657 hrtimer_start_range_ns(&wd->timer,
658 ns_to_ktime(expires),
659 delta_ns,
660 HRTIMER_MODE_ABS_PINNED);
661}
662EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
663
664void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
665{
666 hrtimer_cancel(&wd->timer);
667}
668EXPORT_SYMBOL(qdisc_watchdog_cancel);
669
670static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
671{
672 struct hlist_head *h;
673 unsigned int i;
674
675 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
676
677 if (h != NULL) {
678 for (i = 0; i < n; i++)
679 INIT_HLIST_HEAD(&h[i]);
680 }
681 return h;
682}
683
684void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
685{
686 struct Qdisc_class_common *cl;
687 struct hlist_node *next;
688 struct hlist_head *nhash, *ohash;
689 unsigned int nsize, nmask, osize;
690 unsigned int i, h;
691
692 /* Rehash when load factor exceeds 0.75 */
693 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
694 return;
695 nsize = clhash->hashsize * 2;
696 nmask = nsize - 1;
697 nhash = qdisc_class_hash_alloc(nsize);
698 if (nhash == NULL)
699 return;
700
701 ohash = clhash->hash;
702 osize = clhash->hashsize;
703
704 sch_tree_lock(sch);
705 for (i = 0; i < osize; i++) {
706 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
707 h = qdisc_class_hash(cl->classid, nmask);
708 hlist_add_head(&cl->hnode, &nhash[h]);
709 }
710 }
711 clhash->hash = nhash;
712 clhash->hashsize = nsize;
713 clhash->hashmask = nmask;
714 sch_tree_unlock(sch);
715
716 kvfree(ohash);
717}
718EXPORT_SYMBOL(qdisc_class_hash_grow);
719
720int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
721{
722 unsigned int size = 4;
723
724 clhash->hash = qdisc_class_hash_alloc(size);
725 if (!clhash->hash)
726 return -ENOMEM;
727 clhash->hashsize = size;
728 clhash->hashmask = size - 1;
729 clhash->hashelems = 0;
730 return 0;
731}
732EXPORT_SYMBOL(qdisc_class_hash_init);
733
734void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
735{
736 kvfree(clhash->hash);
737}
738EXPORT_SYMBOL(qdisc_class_hash_destroy);
739
740void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
741 struct Qdisc_class_common *cl)
742{
743 unsigned int h;
744
745 INIT_HLIST_NODE(&cl->hnode);
746 h = qdisc_class_hash(cl->classid, clhash->hashmask);
747 hlist_add_head(&cl->hnode, &clhash->hash[h]);
748 clhash->hashelems++;
749}
750EXPORT_SYMBOL(qdisc_class_hash_insert);
751
752void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
753 struct Qdisc_class_common *cl)
754{
755 hlist_del(&cl->hnode);
756 clhash->hashelems--;
757}
758EXPORT_SYMBOL(qdisc_class_hash_remove);
759
760/* Allocate an unique handle from space managed by kernel
761 * Possible range is [8000-FFFF]:0000 (0x8000 values)
762 */
763static u32 qdisc_alloc_handle(struct net_device *dev)
764{
765 int i = 0x8000;
766 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
767
768 do {
769 autohandle += TC_H_MAKE(0x10000U, 0);
770 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
771 autohandle = TC_H_MAKE(0x80000000U, 0);
772 if (!qdisc_lookup(dev, autohandle))
773 return autohandle;
774 cond_resched();
775 } while (--i > 0);
776
777 return 0;
778}
779
780void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
781{
782 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
783 const struct Qdisc_class_ops *cops;
784 unsigned long cl;
785 u32 parentid;
786 bool notify;
787 int drops;
788
789 if (n == 0 && len == 0)
790 return;
791 drops = max_t(int, n, 0);
792 rcu_read_lock();
793 while ((parentid = sch->parent)) {
794 if (parentid == TC_H_ROOT)
795 break;
796
797 if (sch->flags & TCQ_F_NOPARENT)
798 break;
799 /* Notify parent qdisc only if child qdisc becomes empty.
800 *
801 * If child was empty even before update then backlog
802 * counter is screwed and we skip notification because
803 * parent class is already passive.
804 *
805 * If the original child was offloaded then it is allowed
806 * to be seem as empty, so the parent is notified anyway.
807 */
808 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
809 !qdisc_is_offloaded);
810 /* TODO: perform the search on a per txq basis */
811 sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
812 if (sch == NULL) {
813 WARN_ON_ONCE(parentid != TC_H_ROOT);
814 break;
815 }
816 cops = sch->ops->cl_ops;
817 if (notify && cops->qlen_notify) {
818 cl = cops->find(sch, parentid);
819 cops->qlen_notify(sch, cl);
820 }
821 sch->q.qlen -= n;
822 sch->qstats.backlog -= len;
823 __qdisc_qstats_drop(sch, drops);
824 }
825 rcu_read_unlock();
826}
827EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
828
829int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
830 void *type_data)
831{
832 struct net_device *dev = qdisc_dev(sch);
833 int err;
834
835 sch->flags &= ~TCQ_F_OFFLOADED;
836 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
837 return 0;
838
839 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
840 if (err == -EOPNOTSUPP)
841 return 0;
842
843 if (!err)
844 sch->flags |= TCQ_F_OFFLOADED;
845
846 return err;
847}
848EXPORT_SYMBOL(qdisc_offload_dump_helper);
849
850void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
851 struct Qdisc *new, struct Qdisc *old,
852 enum tc_setup_type type, void *type_data,
853 struct netlink_ext_ack *extack)
854{
855 bool any_qdisc_is_offloaded;
856 int err;
857
858 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
859 return;
860
861 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
862
863 /* Don't report error if the graft is part of destroy operation. */
864 if (!err || !new || new == &noop_qdisc)
865 return;
866
867 /* Don't report error if the parent, the old child and the new
868 * one are not offloaded.
869 */
870 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
871 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
872 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
873
874 if (any_qdisc_is_offloaded)
875 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
876}
877EXPORT_SYMBOL(qdisc_offload_graft_helper);
878
879void qdisc_offload_query_caps(struct net_device *dev,
880 enum tc_setup_type type,
881 void *caps, size_t caps_len)
882{
883 const struct net_device_ops *ops = dev->netdev_ops;
884 struct tc_query_caps_base base = {
885 .type = type,
886 .caps = caps,
887 };
888
889 memset(caps, 0, caps_len);
890
891 if (ops->ndo_setup_tc)
892 ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
893}
894EXPORT_SYMBOL(qdisc_offload_query_caps);
895
896static void qdisc_offload_graft_root(struct net_device *dev,
897 struct Qdisc *new, struct Qdisc *old,
898 struct netlink_ext_ack *extack)
899{
900 struct tc_root_qopt_offload graft_offload = {
901 .command = TC_ROOT_GRAFT,
902 .handle = new ? new->handle : 0,
903 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
904 (old && old->flags & TCQ_F_INGRESS),
905 };
906
907 qdisc_offload_graft_helper(dev, NULL, new, old,
908 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
909}
910
911static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
912 u32 portid, u32 seq, u16 flags, int event,
913 struct netlink_ext_ack *extack)
914{
915 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
916 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
917 struct tcmsg *tcm;
918 struct nlmsghdr *nlh;
919 unsigned char *b = skb_tail_pointer(skb);
920 struct gnet_dump d;
921 struct qdisc_size_table *stab;
922 u32 block_index;
923 __u32 qlen;
924
925 cond_resched();
926 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
927 if (!nlh)
928 goto out_nlmsg_trim;
929 tcm = nlmsg_data(nlh);
930 tcm->tcm_family = AF_UNSPEC;
931 tcm->tcm__pad1 = 0;
932 tcm->tcm__pad2 = 0;
933 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
934 tcm->tcm_parent = clid;
935 tcm->tcm_handle = q->handle;
936 tcm->tcm_info = refcount_read(&q->refcnt);
937 if (nla_put_string(skb, TCA_KIND, q->ops->id))
938 goto nla_put_failure;
939 if (q->ops->ingress_block_get) {
940 block_index = q->ops->ingress_block_get(q);
941 if (block_index &&
942 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
943 goto nla_put_failure;
944 }
945 if (q->ops->egress_block_get) {
946 block_index = q->ops->egress_block_get(q);
947 if (block_index &&
948 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
949 goto nla_put_failure;
950 }
951 if (q->ops->dump && q->ops->dump(q, skb) < 0)
952 goto nla_put_failure;
953 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
954 goto nla_put_failure;
955 qlen = qdisc_qlen_sum(q);
956
957 stab = rtnl_dereference(q->stab);
958 if (stab && qdisc_dump_stab(skb, stab) < 0)
959 goto nla_put_failure;
960
961 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
962 NULL, &d, TCA_PAD) < 0)
963 goto nla_put_failure;
964
965 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
966 goto nla_put_failure;
967
968 if (qdisc_is_percpu_stats(q)) {
969 cpu_bstats = q->cpu_bstats;
970 cpu_qstats = q->cpu_qstats;
971 }
972
973 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
974 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
975 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
976 goto nla_put_failure;
977
978 if (gnet_stats_finish_copy(&d) < 0)
979 goto nla_put_failure;
980
981 if (extack && extack->_msg &&
982 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
983 goto out_nlmsg_trim;
984
985 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
986
987 return skb->len;
988
989out_nlmsg_trim:
990nla_put_failure:
991 nlmsg_trim(skb, b);
992 return -1;
993}
994
995static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
996{
997 if (q->flags & TCQ_F_BUILTIN)
998 return true;
999 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1000 return true;
1001
1002 return false;
1003}
1004
1005static int qdisc_get_notify(struct net *net, struct sk_buff *oskb,
1006 struct nlmsghdr *n, u32 clid, struct Qdisc *q,
1007 struct netlink_ext_ack *extack)
1008{
1009 struct sk_buff *skb;
1010 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1011
1012 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1013 if (!skb)
1014 return -ENOBUFS;
1015
1016 if (!tc_qdisc_dump_ignore(q, false)) {
1017 if (tc_fill_qdisc(skb, q, clid, portid, n->nlmsg_seq, 0,
1018 RTM_NEWQDISC, extack) < 0)
1019 goto err_out;
1020 }
1021
1022 if (skb->len)
1023 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1024 n->nlmsg_flags & NLM_F_ECHO);
1025
1026err_out:
1027 kfree_skb(skb);
1028 return -EINVAL;
1029}
1030
1031static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1032 struct nlmsghdr *n, u32 clid,
1033 struct Qdisc *old, struct Qdisc *new,
1034 struct netlink_ext_ack *extack)
1035{
1036 struct sk_buff *skb;
1037 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1038
1039 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
1040 return 0;
1041
1042 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1043 if (!skb)
1044 return -ENOBUFS;
1045
1046 if (old && !tc_qdisc_dump_ignore(old, false)) {
1047 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1048 0, RTM_DELQDISC, extack) < 0)
1049 goto err_out;
1050 }
1051 if (new && !tc_qdisc_dump_ignore(new, false)) {
1052 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1053 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
1054 goto err_out;
1055 }
1056
1057 if (skb->len)
1058 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1059 n->nlmsg_flags & NLM_F_ECHO);
1060
1061err_out:
1062 kfree_skb(skb);
1063 return -EINVAL;
1064}
1065
1066static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1067 struct nlmsghdr *n, u32 clid,
1068 struct Qdisc *old, struct Qdisc *new,
1069 struct netlink_ext_ack *extack)
1070{
1071 if (new || old)
1072 qdisc_notify(net, skb, n, clid, old, new, extack);
1073
1074 if (old)
1075 qdisc_put(old);
1076}
1077
1078static void qdisc_clear_nolock(struct Qdisc *sch)
1079{
1080 sch->flags &= ~TCQ_F_NOLOCK;
1081 if (!(sch->flags & TCQ_F_CPUSTATS))
1082 return;
1083
1084 free_percpu(sch->cpu_bstats);
1085 free_percpu(sch->cpu_qstats);
1086 sch->cpu_bstats = NULL;
1087 sch->cpu_qstats = NULL;
1088 sch->flags &= ~TCQ_F_CPUSTATS;
1089}
1090
1091/* Graft qdisc "new" to class "classid" of qdisc "parent" or
1092 * to device "dev".
1093 *
1094 * When appropriate send a netlink notification using 'skb'
1095 * and "n".
1096 *
1097 * On success, destroy old qdisc.
1098 */
1099
1100static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1101 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1102 struct Qdisc *new, struct Qdisc *old,
1103 struct netlink_ext_ack *extack)
1104{
1105 struct Qdisc *q = old;
1106 struct net *net = dev_net(dev);
1107
1108 if (parent == NULL) {
1109 unsigned int i, num_q, ingress;
1110 struct netdev_queue *dev_queue;
1111
1112 ingress = 0;
1113 num_q = dev->num_tx_queues;
1114 if ((q && q->flags & TCQ_F_INGRESS) ||
1115 (new && new->flags & TCQ_F_INGRESS)) {
1116 ingress = 1;
1117 dev_queue = dev_ingress_queue(dev);
1118 if (!dev_queue) {
1119 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1120 return -ENOENT;
1121 }
1122
1123 q = rtnl_dereference(dev_queue->qdisc_sleeping);
1124
1125 /* This is the counterpart of that qdisc_refcount_inc_nz() call in
1126 * __tcf_qdisc_find() for filter requests.
1127 */
1128 if (!qdisc_refcount_dec_if_one(q)) {
1129 NL_SET_ERR_MSG(extack,
1130 "Current ingress or clsact Qdisc has ongoing filter requests");
1131 return -EBUSY;
1132 }
1133 }
1134
1135 if (dev->flags & IFF_UP)
1136 dev_deactivate(dev);
1137
1138 qdisc_offload_graft_root(dev, new, old, extack);
1139
1140 if (new && new->ops->attach && !ingress)
1141 goto skip;
1142
1143 if (!ingress) {
1144 for (i = 0; i < num_q; i++) {
1145 dev_queue = netdev_get_tx_queue(dev, i);
1146 old = dev_graft_qdisc(dev_queue, new);
1147
1148 if (new && i > 0)
1149 qdisc_refcount_inc(new);
1150 qdisc_put(old);
1151 }
1152 } else {
1153 old = dev_graft_qdisc(dev_queue, NULL);
1154
1155 /* {ingress,clsact}_destroy() @old before grafting @new to avoid
1156 * unprotected concurrent accesses to net_device::miniq_{in,e}gress
1157 * pointer(s) in mini_qdisc_pair_swap().
1158 */
1159 qdisc_notify(net, skb, n, classid, old, new, extack);
1160 qdisc_destroy(old);
1161
1162 dev_graft_qdisc(dev_queue, new);
1163 }
1164
1165skip:
1166 if (!ingress) {
1167 old = rtnl_dereference(dev->qdisc);
1168 if (new && !new->ops->attach)
1169 qdisc_refcount_inc(new);
1170 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1171
1172 notify_and_destroy(net, skb, n, classid, old, new, extack);
1173
1174 if (new && new->ops->attach)
1175 new->ops->attach(new);
1176 }
1177
1178 if (dev->flags & IFF_UP)
1179 dev_activate(dev);
1180 } else {
1181 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1182 unsigned long cl;
1183 int err;
1184
1185 /* Only support running class lockless if parent is lockless */
1186 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1187 qdisc_clear_nolock(new);
1188
1189 if (!cops || !cops->graft)
1190 return -EOPNOTSUPP;
1191
1192 cl = cops->find(parent, classid);
1193 if (!cl) {
1194 NL_SET_ERR_MSG(extack, "Specified class not found");
1195 return -ENOENT;
1196 }
1197
1198 if (new && new->ops == &noqueue_qdisc_ops) {
1199 NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1200 return -EINVAL;
1201 }
1202
1203 if (new &&
1204 !(parent->flags & TCQ_F_MQROOT) &&
1205 rcu_access_pointer(new->stab)) {
1206 NL_SET_ERR_MSG(extack, "STAB not supported on a non root");
1207 return -EINVAL;
1208 }
1209 err = cops->graft(parent, cl, new, &old, extack);
1210 if (err)
1211 return err;
1212 notify_and_destroy(net, skb, n, classid, old, new, extack);
1213 }
1214 return 0;
1215}
1216
1217static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1218 struct netlink_ext_ack *extack)
1219{
1220 u32 block_index;
1221
1222 if (tca[TCA_INGRESS_BLOCK]) {
1223 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1224
1225 if (!block_index) {
1226 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1227 return -EINVAL;
1228 }
1229 if (!sch->ops->ingress_block_set) {
1230 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1231 return -EOPNOTSUPP;
1232 }
1233 sch->ops->ingress_block_set(sch, block_index);
1234 }
1235 if (tca[TCA_EGRESS_BLOCK]) {
1236 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1237
1238 if (!block_index) {
1239 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1240 return -EINVAL;
1241 }
1242 if (!sch->ops->egress_block_set) {
1243 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1244 return -EOPNOTSUPP;
1245 }
1246 sch->ops->egress_block_set(sch, block_index);
1247 }
1248 return 0;
1249}
1250
1251/*
1252 Allocate and initialize new qdisc.
1253
1254 Parameters are passed via opt.
1255 */
1256
1257static struct Qdisc *qdisc_create(struct net_device *dev,
1258 struct netdev_queue *dev_queue,
1259 u32 parent, u32 handle,
1260 struct nlattr **tca, int *errp,
1261 struct netlink_ext_ack *extack)
1262{
1263 int err;
1264 struct nlattr *kind = tca[TCA_KIND];
1265 struct Qdisc *sch;
1266 struct Qdisc_ops *ops;
1267 struct qdisc_size_table *stab;
1268
1269 ops = qdisc_lookup_ops(kind);
1270#ifdef CONFIG_MODULES
1271 if (ops == NULL && kind != NULL) {
1272 char name[IFNAMSIZ];
1273 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1274 /* We dropped the RTNL semaphore in order to
1275 * perform the module load. So, even if we
1276 * succeeded in loading the module we have to
1277 * tell the caller to replay the request. We
1278 * indicate this using -EAGAIN.
1279 * We replay the request because the device may
1280 * go away in the mean time.
1281 */
1282 rtnl_unlock();
1283 request_module(NET_SCH_ALIAS_PREFIX "%s", name);
1284 rtnl_lock();
1285 ops = qdisc_lookup_ops(kind);
1286 if (ops != NULL) {
1287 /* We will try again qdisc_lookup_ops,
1288 * so don't keep a reference.
1289 */
1290 module_put(ops->owner);
1291 err = -EAGAIN;
1292 goto err_out;
1293 }
1294 }
1295 }
1296#endif
1297
1298 err = -ENOENT;
1299 if (!ops) {
1300 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1301 goto err_out;
1302 }
1303
1304 sch = qdisc_alloc(dev_queue, ops, extack);
1305 if (IS_ERR(sch)) {
1306 err = PTR_ERR(sch);
1307 goto err_out2;
1308 }
1309
1310 sch->parent = parent;
1311
1312 if (handle == TC_H_INGRESS) {
1313 if (!(sch->flags & TCQ_F_INGRESS)) {
1314 NL_SET_ERR_MSG(extack,
1315 "Specified parent ID is reserved for ingress and clsact Qdiscs");
1316 err = -EINVAL;
1317 goto err_out3;
1318 }
1319 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1320 } else {
1321 if (handle == 0) {
1322 handle = qdisc_alloc_handle(dev);
1323 if (handle == 0) {
1324 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1325 err = -ENOSPC;
1326 goto err_out3;
1327 }
1328 }
1329 if (!netif_is_multiqueue(dev))
1330 sch->flags |= TCQ_F_ONETXQUEUE;
1331 }
1332
1333 sch->handle = handle;
1334
1335 /* This exist to keep backward compatible with a userspace
1336 * loophole, what allowed userspace to get IFF_NO_QUEUE
1337 * facility on older kernels by setting tx_queue_len=0 (prior
1338 * to qdisc init), and then forgot to reinit tx_queue_len
1339 * before again attaching a qdisc.
1340 */
1341 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1342 WRITE_ONCE(dev->tx_queue_len, DEFAULT_TX_QUEUE_LEN);
1343 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1344 }
1345
1346 err = qdisc_block_indexes_set(sch, tca, extack);
1347 if (err)
1348 goto err_out3;
1349
1350 if (tca[TCA_STAB]) {
1351 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1352 if (IS_ERR(stab)) {
1353 err = PTR_ERR(stab);
1354 goto err_out3;
1355 }
1356 rcu_assign_pointer(sch->stab, stab);
1357 }
1358
1359 if (ops->init) {
1360 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1361 if (err != 0)
1362 goto err_out4;
1363 }
1364
1365 if (tca[TCA_RATE]) {
1366 err = -EOPNOTSUPP;
1367 if (sch->flags & TCQ_F_MQROOT) {
1368 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1369 goto err_out4;
1370 }
1371
1372 err = gen_new_estimator(&sch->bstats,
1373 sch->cpu_bstats,
1374 &sch->rate_est,
1375 NULL,
1376 true,
1377 tca[TCA_RATE]);
1378 if (err) {
1379 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1380 goto err_out4;
1381 }
1382 }
1383
1384 qdisc_hash_add(sch, false);
1385 trace_qdisc_create(ops, dev, parent);
1386
1387 return sch;
1388
1389err_out4:
1390 /* Even if ops->init() failed, we call ops->destroy()
1391 * like qdisc_create_dflt().
1392 */
1393 if (ops->destroy)
1394 ops->destroy(sch);
1395 qdisc_put_stab(rtnl_dereference(sch->stab));
1396err_out3:
1397 lockdep_unregister_key(&sch->root_lock_key);
1398 netdev_put(dev, &sch->dev_tracker);
1399 qdisc_free(sch);
1400err_out2:
1401 module_put(ops->owner);
1402err_out:
1403 *errp = err;
1404 return NULL;
1405}
1406
1407static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1408 struct netlink_ext_ack *extack)
1409{
1410 struct qdisc_size_table *ostab, *stab = NULL;
1411 int err = 0;
1412
1413 if (tca[TCA_OPTIONS]) {
1414 if (!sch->ops->change) {
1415 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1416 return -EINVAL;
1417 }
1418 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1419 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1420 return -EOPNOTSUPP;
1421 }
1422 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1423 if (err)
1424 return err;
1425 }
1426
1427 if (tca[TCA_STAB]) {
1428 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1429 if (IS_ERR(stab))
1430 return PTR_ERR(stab);
1431 }
1432
1433 ostab = rtnl_dereference(sch->stab);
1434 rcu_assign_pointer(sch->stab, stab);
1435 qdisc_put_stab(ostab);
1436
1437 if (tca[TCA_RATE]) {
1438 /* NB: ignores errors from replace_estimator
1439 because change can't be undone. */
1440 if (sch->flags & TCQ_F_MQROOT)
1441 goto out;
1442 gen_replace_estimator(&sch->bstats,
1443 sch->cpu_bstats,
1444 &sch->rate_est,
1445 NULL,
1446 true,
1447 tca[TCA_RATE]);
1448 }
1449out:
1450 return 0;
1451}
1452
1453struct check_loop_arg {
1454 struct qdisc_walker w;
1455 struct Qdisc *p;
1456 int depth;
1457};
1458
1459static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1460 struct qdisc_walker *w);
1461
1462static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1463{
1464 struct check_loop_arg arg;
1465
1466 if (q->ops->cl_ops == NULL)
1467 return 0;
1468
1469 arg.w.stop = arg.w.skip = arg.w.count = 0;
1470 arg.w.fn = check_loop_fn;
1471 arg.depth = depth;
1472 arg.p = p;
1473 q->ops->cl_ops->walk(q, &arg.w);
1474 return arg.w.stop ? -ELOOP : 0;
1475}
1476
1477static int
1478check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1479{
1480 struct Qdisc *leaf;
1481 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1482 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1483
1484 leaf = cops->leaf(q, cl);
1485 if (leaf) {
1486 if (leaf == arg->p || arg->depth > 7)
1487 return -ELOOP;
1488 return check_loop(leaf, arg->p, arg->depth + 1);
1489 }
1490 return 0;
1491}
1492
1493const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1494 [TCA_KIND] = { .type = NLA_STRING },
1495 [TCA_RATE] = { .type = NLA_BINARY,
1496 .len = sizeof(struct tc_estimator) },
1497 [TCA_STAB] = { .type = NLA_NESTED },
1498 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1499 [TCA_CHAIN] = { .type = NLA_U32 },
1500 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1501 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1502};
1503
1504/*
1505 * Delete/get qdisc.
1506 */
1507
1508static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1509 struct netlink_ext_ack *extack)
1510{
1511 struct net *net = sock_net(skb->sk);
1512 struct tcmsg *tcm = nlmsg_data(n);
1513 struct nlattr *tca[TCA_MAX + 1];
1514 struct net_device *dev;
1515 u32 clid;
1516 struct Qdisc *q = NULL;
1517 struct Qdisc *p = NULL;
1518 int err;
1519
1520 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1521 rtm_tca_policy, extack);
1522 if (err < 0)
1523 return err;
1524
1525 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1526 if (!dev)
1527 return -ENODEV;
1528
1529 clid = tcm->tcm_parent;
1530 if (clid) {
1531 if (clid != TC_H_ROOT) {
1532 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1533 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1534 if (!p) {
1535 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1536 return -ENOENT;
1537 }
1538 q = qdisc_leaf(p, clid);
1539 } else if (dev_ingress_queue(dev)) {
1540 q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1541 }
1542 } else {
1543 q = rtnl_dereference(dev->qdisc);
1544 }
1545 if (!q) {
1546 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1547 return -ENOENT;
1548 }
1549
1550 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1551 NL_SET_ERR_MSG(extack, "Invalid handle");
1552 return -EINVAL;
1553 }
1554 } else {
1555 q = qdisc_lookup(dev, tcm->tcm_handle);
1556 if (!q) {
1557 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1558 return -ENOENT;
1559 }
1560 }
1561
1562 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1563 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1564 return -EINVAL;
1565 }
1566
1567 if (n->nlmsg_type == RTM_DELQDISC) {
1568 if (!clid) {
1569 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1570 return -EINVAL;
1571 }
1572 if (q->handle == 0) {
1573 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1574 return -ENOENT;
1575 }
1576 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1577 if (err != 0)
1578 return err;
1579 } else {
1580 qdisc_get_notify(net, skb, n, clid, q, NULL);
1581 }
1582 return 0;
1583}
1584
1585static bool req_create_or_replace(struct nlmsghdr *n)
1586{
1587 return (n->nlmsg_flags & NLM_F_CREATE &&
1588 n->nlmsg_flags & NLM_F_REPLACE);
1589}
1590
1591static bool req_create_exclusive(struct nlmsghdr *n)
1592{
1593 return (n->nlmsg_flags & NLM_F_CREATE &&
1594 n->nlmsg_flags & NLM_F_EXCL);
1595}
1596
1597static bool req_change(struct nlmsghdr *n)
1598{
1599 return (!(n->nlmsg_flags & NLM_F_CREATE) &&
1600 !(n->nlmsg_flags & NLM_F_REPLACE) &&
1601 !(n->nlmsg_flags & NLM_F_EXCL));
1602}
1603
1604/*
1605 * Create/change qdisc.
1606 */
1607static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1608 struct netlink_ext_ack *extack)
1609{
1610 struct net *net = sock_net(skb->sk);
1611 struct tcmsg *tcm;
1612 struct nlattr *tca[TCA_MAX + 1];
1613 struct net_device *dev;
1614 u32 clid;
1615 struct Qdisc *q, *p;
1616 int err;
1617
1618replay:
1619 /* Reinit, just in case something touches this. */
1620 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1621 rtm_tca_policy, extack);
1622 if (err < 0)
1623 return err;
1624
1625 tcm = nlmsg_data(n);
1626 clid = tcm->tcm_parent;
1627 q = p = NULL;
1628
1629 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1630 if (!dev)
1631 return -ENODEV;
1632
1633
1634 if (clid) {
1635 if (clid != TC_H_ROOT) {
1636 if (clid != TC_H_INGRESS) {
1637 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1638 if (!p) {
1639 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1640 return -ENOENT;
1641 }
1642 q = qdisc_leaf(p, clid);
1643 } else if (dev_ingress_queue_create(dev)) {
1644 q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1645 }
1646 } else {
1647 q = rtnl_dereference(dev->qdisc);
1648 }
1649
1650 /* It may be default qdisc, ignore it */
1651 if (q && q->handle == 0)
1652 q = NULL;
1653
1654 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1655 if (tcm->tcm_handle) {
1656 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1657 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1658 return -EEXIST;
1659 }
1660 if (TC_H_MIN(tcm->tcm_handle)) {
1661 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1662 return -EINVAL;
1663 }
1664 q = qdisc_lookup(dev, tcm->tcm_handle);
1665 if (!q)
1666 goto create_n_graft;
1667 if (q->parent != tcm->tcm_parent) {
1668 NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent");
1669 return -EINVAL;
1670 }
1671 if (n->nlmsg_flags & NLM_F_EXCL) {
1672 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1673 return -EEXIST;
1674 }
1675 if (tca[TCA_KIND] &&
1676 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1677 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1678 return -EINVAL;
1679 }
1680 if (q->flags & TCQ_F_INGRESS) {
1681 NL_SET_ERR_MSG(extack,
1682 "Cannot regraft ingress or clsact Qdiscs");
1683 return -EINVAL;
1684 }
1685 if (q == p ||
1686 (p && check_loop(q, p, 0))) {
1687 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1688 return -ELOOP;
1689 }
1690 if (clid == TC_H_INGRESS) {
1691 NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
1692 return -EINVAL;
1693 }
1694 qdisc_refcount_inc(q);
1695 goto graft;
1696 } else {
1697 if (!q)
1698 goto create_n_graft;
1699
1700 /* This magic test requires explanation.
1701 *
1702 * We know, that some child q is already
1703 * attached to this parent and have choice:
1704 * 1) change it or 2) create/graft new one.
1705 * If the requested qdisc kind is different
1706 * than the existing one, then we choose graft.
1707 * If they are the same then this is "change"
1708 * operation - just let it fallthrough..
1709 *
1710 * 1. We are allowed to create/graft only
1711 * if the request is explicitly stating
1712 * "please create if it doesn't exist".
1713 *
1714 * 2. If the request is to exclusive create
1715 * then the qdisc tcm_handle is not expected
1716 * to exist, so that we choose create/graft too.
1717 *
1718 * 3. The last case is when no flags are set.
1719 * This will happen when for example tc
1720 * utility issues a "change" command.
1721 * Alas, it is sort of hole in API, we
1722 * cannot decide what to do unambiguously.
1723 * For now we select create/graft.
1724 */
1725 if (tca[TCA_KIND] &&
1726 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1727 if (req_create_or_replace(n) ||
1728 req_create_exclusive(n))
1729 goto create_n_graft;
1730 else if (req_change(n))
1731 goto create_n_graft2;
1732 }
1733 }
1734 }
1735 } else {
1736 if (!tcm->tcm_handle) {
1737 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1738 return -EINVAL;
1739 }
1740 q = qdisc_lookup(dev, tcm->tcm_handle);
1741 }
1742
1743 /* Change qdisc parameters */
1744 if (!q) {
1745 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1746 return -ENOENT;
1747 }
1748 if (n->nlmsg_flags & NLM_F_EXCL) {
1749 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1750 return -EEXIST;
1751 }
1752 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1753 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1754 return -EINVAL;
1755 }
1756 err = qdisc_change(q, tca, extack);
1757 if (err == 0)
1758 qdisc_notify(net, skb, n, clid, NULL, q, extack);
1759 return err;
1760
1761create_n_graft:
1762 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1763 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1764 return -ENOENT;
1765 }
1766create_n_graft2:
1767 if (clid == TC_H_INGRESS) {
1768 if (dev_ingress_queue(dev)) {
1769 q = qdisc_create(dev, dev_ingress_queue(dev),
1770 tcm->tcm_parent, tcm->tcm_parent,
1771 tca, &err, extack);
1772 } else {
1773 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1774 err = -ENOENT;
1775 }
1776 } else {
1777 struct netdev_queue *dev_queue;
1778
1779 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1780 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1781 else if (p)
1782 dev_queue = p->dev_queue;
1783 else
1784 dev_queue = netdev_get_tx_queue(dev, 0);
1785
1786 q = qdisc_create(dev, dev_queue,
1787 tcm->tcm_parent, tcm->tcm_handle,
1788 tca, &err, extack);
1789 }
1790 if (q == NULL) {
1791 if (err == -EAGAIN)
1792 goto replay;
1793 return err;
1794 }
1795
1796graft:
1797 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1798 if (err) {
1799 if (q)
1800 qdisc_put(q);
1801 return err;
1802 }
1803
1804 return 0;
1805}
1806
1807static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1808 struct netlink_callback *cb,
1809 int *q_idx_p, int s_q_idx, bool recur,
1810 bool dump_invisible)
1811{
1812 int ret = 0, q_idx = *q_idx_p;
1813 struct Qdisc *q;
1814 int b;
1815
1816 if (!root)
1817 return 0;
1818
1819 q = root;
1820 if (q_idx < s_q_idx) {
1821 q_idx++;
1822 } else {
1823 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1824 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1825 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1826 RTM_NEWQDISC, NULL) <= 0)
1827 goto done;
1828 q_idx++;
1829 }
1830
1831 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1832 * itself has already been dumped.
1833 *
1834 * If we've already dumped the top-level (ingress) qdisc above and the global
1835 * qdisc hashtable, we don't want to hit it again
1836 */
1837 if (!qdisc_dev(root) || !recur)
1838 goto out;
1839
1840 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1841 if (q_idx < s_q_idx) {
1842 q_idx++;
1843 continue;
1844 }
1845 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1846 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1847 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1848 RTM_NEWQDISC, NULL) <= 0)
1849 goto done;
1850 q_idx++;
1851 }
1852
1853out:
1854 *q_idx_p = q_idx;
1855 return ret;
1856done:
1857 ret = -1;
1858 goto out;
1859}
1860
1861static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1862{
1863 struct net *net = sock_net(skb->sk);
1864 int idx, q_idx;
1865 int s_idx, s_q_idx;
1866 struct net_device *dev;
1867 const struct nlmsghdr *nlh = cb->nlh;
1868 struct nlattr *tca[TCA_MAX + 1];
1869 int err;
1870
1871 s_idx = cb->args[0];
1872 s_q_idx = q_idx = cb->args[1];
1873
1874 idx = 0;
1875 ASSERT_RTNL();
1876
1877 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1878 rtm_tca_policy, cb->extack);
1879 if (err < 0)
1880 return err;
1881
1882 for_each_netdev(net, dev) {
1883 struct netdev_queue *dev_queue;
1884
1885 if (idx < s_idx)
1886 goto cont;
1887 if (idx > s_idx)
1888 s_q_idx = 0;
1889 q_idx = 0;
1890
1891 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1892 skb, cb, &q_idx, s_q_idx,
1893 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1894 goto done;
1895
1896 dev_queue = dev_ingress_queue(dev);
1897 if (dev_queue &&
1898 tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
1899 skb, cb, &q_idx, s_q_idx, false,
1900 tca[TCA_DUMP_INVISIBLE]) < 0)
1901 goto done;
1902
1903cont:
1904 idx++;
1905 }
1906
1907done:
1908 cb->args[0] = idx;
1909 cb->args[1] = q_idx;
1910
1911 return skb->len;
1912}
1913
1914
1915
1916/************************************************
1917 * Traffic classes manipulation. *
1918 ************************************************/
1919
1920static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1921 unsigned long cl, u32 portid, u32 seq, u16 flags,
1922 int event, struct netlink_ext_ack *extack)
1923{
1924 struct tcmsg *tcm;
1925 struct nlmsghdr *nlh;
1926 unsigned char *b = skb_tail_pointer(skb);
1927 struct gnet_dump d;
1928 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1929
1930 cond_resched();
1931 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1932 if (!nlh)
1933 goto out_nlmsg_trim;
1934 tcm = nlmsg_data(nlh);
1935 tcm->tcm_family = AF_UNSPEC;
1936 tcm->tcm__pad1 = 0;
1937 tcm->tcm__pad2 = 0;
1938 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1939 tcm->tcm_parent = q->handle;
1940 tcm->tcm_handle = q->handle;
1941 tcm->tcm_info = 0;
1942 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1943 goto nla_put_failure;
1944 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1945 goto nla_put_failure;
1946
1947 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1948 NULL, &d, TCA_PAD) < 0)
1949 goto nla_put_failure;
1950
1951 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1952 goto nla_put_failure;
1953
1954 if (gnet_stats_finish_copy(&d) < 0)
1955 goto nla_put_failure;
1956
1957 if (extack && extack->_msg &&
1958 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1959 goto out_nlmsg_trim;
1960
1961 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1962
1963 return skb->len;
1964
1965out_nlmsg_trim:
1966nla_put_failure:
1967 nlmsg_trim(skb, b);
1968 return -1;
1969}
1970
1971static int tclass_notify(struct net *net, struct sk_buff *oskb,
1972 struct nlmsghdr *n, struct Qdisc *q,
1973 unsigned long cl, int event, struct netlink_ext_ack *extack)
1974{
1975 struct sk_buff *skb;
1976 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1977
1978 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
1979 return 0;
1980
1981 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1982 if (!skb)
1983 return -ENOBUFS;
1984
1985 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
1986 kfree_skb(skb);
1987 return -EINVAL;
1988 }
1989
1990 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1991 n->nlmsg_flags & NLM_F_ECHO);
1992}
1993
1994static int tclass_get_notify(struct net *net, struct sk_buff *oskb,
1995 struct nlmsghdr *n, struct Qdisc *q,
1996 unsigned long cl, struct netlink_ext_ack *extack)
1997{
1998 struct sk_buff *skb;
1999 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2000
2001 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2002 if (!skb)
2003 return -ENOBUFS;
2004
2005 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, RTM_NEWTCLASS,
2006 extack) < 0) {
2007 kfree_skb(skb);
2008 return -EINVAL;
2009 }
2010
2011 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2012 n->nlmsg_flags & NLM_F_ECHO);
2013}
2014
2015static int tclass_del_notify(struct net *net,
2016 const struct Qdisc_class_ops *cops,
2017 struct sk_buff *oskb, struct nlmsghdr *n,
2018 struct Qdisc *q, unsigned long cl,
2019 struct netlink_ext_ack *extack)
2020{
2021 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2022 struct sk_buff *skb;
2023 int err = 0;
2024
2025 if (!cops->delete)
2026 return -EOPNOTSUPP;
2027
2028 if (rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
2029 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2030 if (!skb)
2031 return -ENOBUFS;
2032
2033 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
2034 RTM_DELTCLASS, extack) < 0) {
2035 kfree_skb(skb);
2036 return -EINVAL;
2037 }
2038 } else {
2039 skb = NULL;
2040 }
2041
2042 err = cops->delete(q, cl, extack);
2043 if (err) {
2044 kfree_skb(skb);
2045 return err;
2046 }
2047
2048 err = rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
2049 n->nlmsg_flags & NLM_F_ECHO);
2050 return err;
2051}
2052
2053#ifdef CONFIG_NET_CLS
2054
2055struct tcf_bind_args {
2056 struct tcf_walker w;
2057 unsigned long base;
2058 unsigned long cl;
2059 u32 classid;
2060};
2061
2062static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2063{
2064 struct tcf_bind_args *a = (void *)arg;
2065
2066 if (n && tp->ops->bind_class) {
2067 struct Qdisc *q = tcf_block_q(tp->chain->block);
2068
2069 sch_tree_lock(q);
2070 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
2071 sch_tree_unlock(q);
2072 }
2073 return 0;
2074}
2075
2076struct tc_bind_class_args {
2077 struct qdisc_walker w;
2078 unsigned long new_cl;
2079 u32 portid;
2080 u32 clid;
2081};
2082
2083static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
2084 struct qdisc_walker *w)
2085{
2086 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
2087 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2088 struct tcf_block *block;
2089 struct tcf_chain *chain;
2090
2091 block = cops->tcf_block(q, cl, NULL);
2092 if (!block)
2093 return 0;
2094 for (chain = tcf_get_next_chain(block, NULL);
2095 chain;
2096 chain = tcf_get_next_chain(block, chain)) {
2097 struct tcf_proto *tp;
2098
2099 for (tp = tcf_get_next_proto(chain, NULL);
2100 tp; tp = tcf_get_next_proto(chain, tp)) {
2101 struct tcf_bind_args arg = {};
2102
2103 arg.w.fn = tcf_node_bind;
2104 arg.classid = a->clid;
2105 arg.base = cl;
2106 arg.cl = a->new_cl;
2107 tp->ops->walk(tp, &arg.w, true);
2108 }
2109 }
2110
2111 return 0;
2112}
2113
2114static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2115 unsigned long new_cl)
2116{
2117 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2118 struct tc_bind_class_args args = {};
2119
2120 if (!cops->tcf_block)
2121 return;
2122 args.portid = portid;
2123 args.clid = clid;
2124 args.new_cl = new_cl;
2125 args.w.fn = tc_bind_class_walker;
2126 q->ops->cl_ops->walk(q, &args.w);
2127}
2128
2129#else
2130
2131static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2132 unsigned long new_cl)
2133{
2134}
2135
2136#endif
2137
2138static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2139 struct netlink_ext_ack *extack)
2140{
2141 struct net *net = sock_net(skb->sk);
2142 struct tcmsg *tcm = nlmsg_data(n);
2143 struct nlattr *tca[TCA_MAX + 1];
2144 struct net_device *dev;
2145 struct Qdisc *q = NULL;
2146 const struct Qdisc_class_ops *cops;
2147 unsigned long cl = 0;
2148 unsigned long new_cl;
2149 u32 portid;
2150 u32 clid;
2151 u32 qid;
2152 int err;
2153
2154 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2155 rtm_tca_policy, extack);
2156 if (err < 0)
2157 return err;
2158
2159 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2160 if (!dev)
2161 return -ENODEV;
2162
2163 /*
2164 parent == TC_H_UNSPEC - unspecified parent.
2165 parent == TC_H_ROOT - class is root, which has no parent.
2166 parent == X:0 - parent is root class.
2167 parent == X:Y - parent is a node in hierarchy.
2168 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2169
2170 handle == 0:0 - generate handle from kernel pool.
2171 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2172 handle == X:Y - clear.
2173 handle == X:0 - root class.
2174 */
2175
2176 /* Step 1. Determine qdisc handle X:0 */
2177
2178 portid = tcm->tcm_parent;
2179 clid = tcm->tcm_handle;
2180 qid = TC_H_MAJ(clid);
2181
2182 if (portid != TC_H_ROOT) {
2183 u32 qid1 = TC_H_MAJ(portid);
2184
2185 if (qid && qid1) {
2186 /* If both majors are known, they must be identical. */
2187 if (qid != qid1)
2188 return -EINVAL;
2189 } else if (qid1) {
2190 qid = qid1;
2191 } else if (qid == 0)
2192 qid = rtnl_dereference(dev->qdisc)->handle;
2193
2194 /* Now qid is genuine qdisc handle consistent
2195 * both with parent and child.
2196 *
2197 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2198 */
2199 if (portid)
2200 portid = TC_H_MAKE(qid, portid);
2201 } else {
2202 if (qid == 0)
2203 qid = rtnl_dereference(dev->qdisc)->handle;
2204 }
2205
2206 /* OK. Locate qdisc */
2207 q = qdisc_lookup(dev, qid);
2208 if (!q)
2209 return -ENOENT;
2210
2211 /* An check that it supports classes */
2212 cops = q->ops->cl_ops;
2213 if (cops == NULL)
2214 return -EINVAL;
2215
2216 /* Now try to get class */
2217 if (clid == 0) {
2218 if (portid == TC_H_ROOT)
2219 clid = qid;
2220 } else
2221 clid = TC_H_MAKE(qid, clid);
2222
2223 if (clid)
2224 cl = cops->find(q, clid);
2225
2226 if (cl == 0) {
2227 err = -ENOENT;
2228 if (n->nlmsg_type != RTM_NEWTCLASS ||
2229 !(n->nlmsg_flags & NLM_F_CREATE))
2230 goto out;
2231 } else {
2232 switch (n->nlmsg_type) {
2233 case RTM_NEWTCLASS:
2234 err = -EEXIST;
2235 if (n->nlmsg_flags & NLM_F_EXCL)
2236 goto out;
2237 break;
2238 case RTM_DELTCLASS:
2239 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2240 /* Unbind the class with flilters with 0 */
2241 tc_bind_tclass(q, portid, clid, 0);
2242 goto out;
2243 case RTM_GETTCLASS:
2244 err = tclass_get_notify(net, skb, n, q, cl, extack);
2245 goto out;
2246 default:
2247 err = -EINVAL;
2248 goto out;
2249 }
2250 }
2251
2252 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2253 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2254 return -EOPNOTSUPP;
2255 }
2256
2257 new_cl = cl;
2258 err = -EOPNOTSUPP;
2259 if (cops->change)
2260 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2261 if (err == 0) {
2262 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
2263 /* We just create a new class, need to do reverse binding. */
2264 if (cl != new_cl)
2265 tc_bind_tclass(q, portid, clid, new_cl);
2266 }
2267out:
2268 return err;
2269}
2270
2271struct qdisc_dump_args {
2272 struct qdisc_walker w;
2273 struct sk_buff *skb;
2274 struct netlink_callback *cb;
2275};
2276
2277static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2278 struct qdisc_walker *arg)
2279{
2280 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2281
2282 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2283 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2284 RTM_NEWTCLASS, NULL);
2285}
2286
2287static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2288 struct tcmsg *tcm, struct netlink_callback *cb,
2289 int *t_p, int s_t)
2290{
2291 struct qdisc_dump_args arg;
2292
2293 if (tc_qdisc_dump_ignore(q, false) ||
2294 *t_p < s_t || !q->ops->cl_ops ||
2295 (tcm->tcm_parent &&
2296 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2297 (*t_p)++;
2298 return 0;
2299 }
2300 if (*t_p > s_t)
2301 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2302 arg.w.fn = qdisc_class_dump;
2303 arg.skb = skb;
2304 arg.cb = cb;
2305 arg.w.stop = 0;
2306 arg.w.skip = cb->args[1];
2307 arg.w.count = 0;
2308 q->ops->cl_ops->walk(q, &arg.w);
2309 cb->args[1] = arg.w.count;
2310 if (arg.w.stop)
2311 return -1;
2312 (*t_p)++;
2313 return 0;
2314}
2315
2316static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2317 struct tcmsg *tcm, struct netlink_callback *cb,
2318 int *t_p, int s_t, bool recur)
2319{
2320 struct Qdisc *q;
2321 int b;
2322
2323 if (!root)
2324 return 0;
2325
2326 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2327 return -1;
2328
2329 if (!qdisc_dev(root) || !recur)
2330 return 0;
2331
2332 if (tcm->tcm_parent) {
2333 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2334 if (q && q != root &&
2335 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2336 return -1;
2337 return 0;
2338 }
2339 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2340 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2341 return -1;
2342 }
2343
2344 return 0;
2345}
2346
2347static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2348{
2349 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2350 struct net *net = sock_net(skb->sk);
2351 struct netdev_queue *dev_queue;
2352 struct net_device *dev;
2353 int t, s_t;
2354
2355 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2356 return 0;
2357 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2358 if (!dev)
2359 return 0;
2360
2361 s_t = cb->args[0];
2362 t = 0;
2363
2364 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2365 skb, tcm, cb, &t, s_t, true) < 0)
2366 goto done;
2367
2368 dev_queue = dev_ingress_queue(dev);
2369 if (dev_queue &&
2370 tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
2371 skb, tcm, cb, &t, s_t, false) < 0)
2372 goto done;
2373
2374done:
2375 cb->args[0] = t;
2376
2377 dev_put(dev);
2378 return skb->len;
2379}
2380
2381#ifdef CONFIG_PROC_FS
2382static int psched_show(struct seq_file *seq, void *v)
2383{
2384 seq_printf(seq, "%08x %08x %08x %08x\n",
2385 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2386 1000000,
2387 (u32)NSEC_PER_SEC / hrtimer_resolution);
2388
2389 return 0;
2390}
2391
2392static int __net_init psched_net_init(struct net *net)
2393{
2394 struct proc_dir_entry *e;
2395
2396 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2397 if (e == NULL)
2398 return -ENOMEM;
2399
2400 return 0;
2401}
2402
2403static void __net_exit psched_net_exit(struct net *net)
2404{
2405 remove_proc_entry("psched", net->proc_net);
2406}
2407#else
2408static int __net_init psched_net_init(struct net *net)
2409{
2410 return 0;
2411}
2412
2413static void __net_exit psched_net_exit(struct net *net)
2414{
2415}
2416#endif
2417
2418static struct pernet_operations psched_net_ops = {
2419 .init = psched_net_init,
2420 .exit = psched_net_exit,
2421};
2422
2423#if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)
2424DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2425#endif
2426
2427static const struct rtnl_msg_handler psched_rtnl_msg_handlers[] __initconst = {
2428 {.msgtype = RTM_NEWQDISC, .doit = tc_modify_qdisc},
2429 {.msgtype = RTM_DELQDISC, .doit = tc_get_qdisc},
2430 {.msgtype = RTM_GETQDISC, .doit = tc_get_qdisc,
2431 .dumpit = tc_dump_qdisc},
2432 {.msgtype = RTM_NEWTCLASS, .doit = tc_ctl_tclass},
2433 {.msgtype = RTM_DELTCLASS, .doit = tc_ctl_tclass},
2434 {.msgtype = RTM_GETTCLASS, .doit = tc_ctl_tclass,
2435 .dumpit = tc_dump_tclass},
2436};
2437
2438static int __init pktsched_init(void)
2439{
2440 int err;
2441
2442 err = register_pernet_subsys(&psched_net_ops);
2443 if (err) {
2444 pr_err("pktsched_init: "
2445 "cannot initialize per netns operations\n");
2446 return err;
2447 }
2448
2449 register_qdisc(&pfifo_fast_ops);
2450 register_qdisc(&pfifo_qdisc_ops);
2451 register_qdisc(&bfifo_qdisc_ops);
2452 register_qdisc(&pfifo_head_drop_qdisc_ops);
2453 register_qdisc(&mq_qdisc_ops);
2454 register_qdisc(&noqueue_qdisc_ops);
2455
2456 rtnl_register_many(psched_rtnl_msg_handlers);
2457
2458 tc_wrapper_init();
2459
2460 return 0;
2461}
2462
2463subsys_initcall(pktsched_init);