Loading...
1/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/skbuff.h>
24#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
29#include <linux/hrtimer.h>
30#include <linux/lockdep.h>
31#include <linux/slab.h>
32#include <linux/hashtable.h>
33
34#include <net/net_namespace.h>
35#include <net/sock.h>
36#include <net/netlink.h>
37#include <net/pkt_sched.h>
38#include <net/pkt_cls.h>
39
40/*
41
42 Short review.
43 -------------
44
45 This file consists of two interrelated parts:
46
47 1. queueing disciplines manager frontend.
48 2. traffic classes manager frontend.
49
50 Generally, queueing discipline ("qdisc") is a black box,
51 which is able to enqueue packets and to dequeue them (when
52 device is ready to send something) in order and at times
53 determined by algorithm hidden in it.
54
55 qdisc's are divided to two categories:
56 - "queues", which have no internal structure visible from outside.
57 - "schedulers", which split all the packets to "traffic classes",
58 using "packet classifiers" (look at cls_api.c)
59
60 In turn, classes may have child qdiscs (as rule, queues)
61 attached to them etc. etc. etc.
62
63 The goal of the routines in this file is to translate
64 information supplied by user in the form of handles
65 to more intelligible for kernel form, to make some sanity
66 checks and part of work, which is common to all qdiscs
67 and to provide rtnetlink notifications.
68
69 All real intelligent work is done inside qdisc modules.
70
71
72
73 Every discipline has two major routines: enqueue and dequeue.
74
75 ---dequeue
76
77 dequeue usually returns a skb to send. It is allowed to return NULL,
78 but it does not mean that queue is empty, it just means that
79 discipline does not want to send anything this time.
80 Queue is really empty if q->q.qlen == 0.
81 For complicated disciplines with multiple queues q->q is not
82 real packet queue, but however q->q.qlen must be valid.
83
84 ---enqueue
85
86 enqueue returns 0, if packet was enqueued successfully.
87 If packet (this one or another one) was dropped, it returns
88 not zero error code.
89 NET_XMIT_DROP - this packet dropped
90 Expected action: do not backoff, but wait until queue will clear.
91 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
92 Expected action: backoff or ignore
93
94 Auxiliary routines:
95
96 ---peek
97
98 like dequeue but without removing a packet from the queue
99
100 ---reset
101
102 returns qdisc to initial state: purge all buffers, clear all
103 timers, counters (except for statistics) etc.
104
105 ---init
106
107 initializes newly created qdisc.
108
109 ---destroy
110
111 destroys resources allocated by init and during lifetime of qdisc.
112
113 ---change
114
115 changes qdisc parameters.
116 */
117
118/* Protects list of registered TC modules. It is pure SMP lock. */
119static DEFINE_RWLOCK(qdisc_mod_lock);
120
121
122/************************************************
123 * Queueing disciplines manipulation. *
124 ************************************************/
125
126
127/* The list of all installed queueing disciplines. */
128
129static struct Qdisc_ops *qdisc_base;
130
131/* Register/unregister queueing discipline */
132
133int register_qdisc(struct Qdisc_ops *qops)
134{
135 struct Qdisc_ops *q, **qp;
136 int rc = -EEXIST;
137
138 write_lock(&qdisc_mod_lock);
139 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
140 if (!strcmp(qops->id, q->id))
141 goto out;
142
143 if (qops->enqueue == NULL)
144 qops->enqueue = noop_qdisc_ops.enqueue;
145 if (qops->peek == NULL) {
146 if (qops->dequeue == NULL)
147 qops->peek = noop_qdisc_ops.peek;
148 else
149 goto out_einval;
150 }
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
153
154 if (qops->cl_ops) {
155 const struct Qdisc_class_ops *cops = qops->cl_ops;
156
157 if (!(cops->find && cops->walk && cops->leaf))
158 goto out_einval;
159
160 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
161 goto out_einval;
162 }
163
164 qops->next = NULL;
165 *qp = qops;
166 rc = 0;
167out:
168 write_unlock(&qdisc_mod_lock);
169 return rc;
170
171out_einval:
172 rc = -EINVAL;
173 goto out;
174}
175EXPORT_SYMBOL(register_qdisc);
176
177int unregister_qdisc(struct Qdisc_ops *qops)
178{
179 struct Qdisc_ops *q, **qp;
180 int err = -ENOENT;
181
182 write_lock(&qdisc_mod_lock);
183 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
184 if (q == qops)
185 break;
186 if (q) {
187 *qp = q->next;
188 q->next = NULL;
189 err = 0;
190 }
191 write_unlock(&qdisc_mod_lock);
192 return err;
193}
194EXPORT_SYMBOL(unregister_qdisc);
195
196/* Get default qdisc if not otherwise specified */
197void qdisc_get_default(char *name, size_t len)
198{
199 read_lock(&qdisc_mod_lock);
200 strlcpy(name, default_qdisc_ops->id, len);
201 read_unlock(&qdisc_mod_lock);
202}
203
204static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205{
206 struct Qdisc_ops *q = NULL;
207
208 for (q = qdisc_base; q; q = q->next) {
209 if (!strcmp(name, q->id)) {
210 if (!try_module_get(q->owner))
211 q = NULL;
212 break;
213 }
214 }
215
216 return q;
217}
218
219/* Set new default qdisc to use */
220int qdisc_set_default(const char *name)
221{
222 const struct Qdisc_ops *ops;
223
224 if (!capable(CAP_NET_ADMIN))
225 return -EPERM;
226
227 write_lock(&qdisc_mod_lock);
228 ops = qdisc_lookup_default(name);
229 if (!ops) {
230 /* Not found, drop lock and try to load module */
231 write_unlock(&qdisc_mod_lock);
232 request_module("sch_%s", name);
233 write_lock(&qdisc_mod_lock);
234
235 ops = qdisc_lookup_default(name);
236 }
237
238 if (ops) {
239 /* Set new default */
240 module_put(default_qdisc_ops->owner);
241 default_qdisc_ops = ops;
242 }
243 write_unlock(&qdisc_mod_lock);
244
245 return ops ? 0 : -ENOENT;
246}
247
248#ifdef CONFIG_NET_SCH_DEFAULT
249/* Set default value from kernel config */
250static int __init sch_default_qdisc(void)
251{
252 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253}
254late_initcall(sch_default_qdisc);
255#endif
256
257/* We know handle. Find qdisc among all qdisc's attached to device
258 * (root qdisc, all its children, children of children etc.)
259 * Note: caller either uses rtnl or rcu_read_lock()
260 */
261
262static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
263{
264 struct Qdisc *q;
265
266 if (!qdisc_dev(root))
267 return (root->handle == handle ? root : NULL);
268
269 if (!(root->flags & TCQ_F_BUILTIN) &&
270 root->handle == handle)
271 return root;
272
273 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
274 if (q->handle == handle)
275 return q;
276 }
277 return NULL;
278}
279
280void qdisc_hash_add(struct Qdisc *q, bool invisible)
281{
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 ASSERT_RTNL();
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 if (invisible)
286 q->flags |= TCQ_F_INVISIBLE;
287 }
288}
289EXPORT_SYMBOL(qdisc_hash_add);
290
291void qdisc_hash_del(struct Qdisc *q)
292{
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 ASSERT_RTNL();
295 hash_del_rcu(&q->hash);
296 }
297}
298EXPORT_SYMBOL(qdisc_hash_del);
299
300struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301{
302 struct Qdisc *q;
303
304 if (!handle)
305 return NULL;
306 q = qdisc_match_from_root(dev->qdisc, handle);
307 if (q)
308 goto out;
309
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 dev_ingress_queue(dev)->qdisc_sleeping,
313 handle);
314out:
315 return q;
316}
317
318static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
319{
320 unsigned long cl;
321 struct Qdisc *leaf;
322 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
323
324 if (cops == NULL)
325 return NULL;
326 cl = cops->find(p, classid);
327
328 if (cl == 0)
329 return NULL;
330 leaf = cops->leaf(p, cl);
331 return leaf;
332}
333
334/* Find queueing discipline by name */
335
336static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
337{
338 struct Qdisc_ops *q = NULL;
339
340 if (kind) {
341 read_lock(&qdisc_mod_lock);
342 for (q = qdisc_base; q; q = q->next) {
343 if (nla_strcmp(kind, q->id) == 0) {
344 if (!try_module_get(q->owner))
345 q = NULL;
346 break;
347 }
348 }
349 read_unlock(&qdisc_mod_lock);
350 }
351 return q;
352}
353
354/* The linklayer setting were not transferred from iproute2, in older
355 * versions, and the rate tables lookup systems have been dropped in
356 * the kernel. To keep backward compatible with older iproute2 tc
357 * utils, we detect the linklayer setting by detecting if the rate
358 * table were modified.
359 *
360 * For linklayer ATM table entries, the rate table will be aligned to
361 * 48 bytes, thus some table entries will contain the same value. The
362 * mpu (min packet unit) is also encoded into the old rate table, thus
363 * starting from the mpu, we find low and high table entries for
364 * mapping this cell. If these entries contain the same value, when
365 * the rate tables have been modified for linklayer ATM.
366 *
367 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
368 * and then roundup to the next cell, calc the table entry one below,
369 * and compare.
370 */
371static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
372{
373 int low = roundup(r->mpu, 48);
374 int high = roundup(low+1, 48);
375 int cell_low = low >> r->cell_log;
376 int cell_high = (high >> r->cell_log) - 1;
377
378 /* rtab is too inaccurate at rates > 100Mbit/s */
379 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
380 pr_debug("TC linklayer: Giving up ATM detection\n");
381 return TC_LINKLAYER_ETHERNET;
382 }
383
384 if ((cell_high > cell_low) && (cell_high < 256)
385 && (rtab[cell_low] == rtab[cell_high])) {
386 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
387 cell_low, cell_high, rtab[cell_high]);
388 return TC_LINKLAYER_ATM;
389 }
390 return TC_LINKLAYER_ETHERNET;
391}
392
393static struct qdisc_rate_table *qdisc_rtab_list;
394
395struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
396 struct nlattr *tab,
397 struct netlink_ext_ack *extack)
398{
399 struct qdisc_rate_table *rtab;
400
401 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
402 nla_len(tab) != TC_RTAB_SIZE) {
403 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
404 return NULL;
405 }
406
407 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
408 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
409 !memcmp(&rtab->data, nla_data(tab), 1024)) {
410 rtab->refcnt++;
411 return rtab;
412 }
413 }
414
415 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
416 if (rtab) {
417 rtab->rate = *r;
418 rtab->refcnt = 1;
419 memcpy(rtab->data, nla_data(tab), 1024);
420 if (r->linklayer == TC_LINKLAYER_UNAWARE)
421 r->linklayer = __detect_linklayer(r, rtab->data);
422 rtab->next = qdisc_rtab_list;
423 qdisc_rtab_list = rtab;
424 } else {
425 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
426 }
427 return rtab;
428}
429EXPORT_SYMBOL(qdisc_get_rtab);
430
431void qdisc_put_rtab(struct qdisc_rate_table *tab)
432{
433 struct qdisc_rate_table *rtab, **rtabp;
434
435 if (!tab || --tab->refcnt)
436 return;
437
438 for (rtabp = &qdisc_rtab_list;
439 (rtab = *rtabp) != NULL;
440 rtabp = &rtab->next) {
441 if (rtab == tab) {
442 *rtabp = rtab->next;
443 kfree(rtab);
444 return;
445 }
446 }
447}
448EXPORT_SYMBOL(qdisc_put_rtab);
449
450static LIST_HEAD(qdisc_stab_list);
451
452static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
453 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
454 [TCA_STAB_DATA] = { .type = NLA_BINARY },
455};
456
457static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
458 struct netlink_ext_ack *extack)
459{
460 struct nlattr *tb[TCA_STAB_MAX + 1];
461 struct qdisc_size_table *stab;
462 struct tc_sizespec *s;
463 unsigned int tsize = 0;
464 u16 *tab = NULL;
465 int err;
466
467 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
468 if (err < 0)
469 return ERR_PTR(err);
470 if (!tb[TCA_STAB_BASE]) {
471 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
472 return ERR_PTR(-EINVAL);
473 }
474
475 s = nla_data(tb[TCA_STAB_BASE]);
476
477 if (s->tsize > 0) {
478 if (!tb[TCA_STAB_DATA]) {
479 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
480 return ERR_PTR(-EINVAL);
481 }
482 tab = nla_data(tb[TCA_STAB_DATA]);
483 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
484 }
485
486 if (tsize != s->tsize || (!tab && tsize > 0)) {
487 NL_SET_ERR_MSG(extack, "Invalid size of size table");
488 return ERR_PTR(-EINVAL);
489 }
490
491 list_for_each_entry(stab, &qdisc_stab_list, list) {
492 if (memcmp(&stab->szopts, s, sizeof(*s)))
493 continue;
494 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
495 continue;
496 stab->refcnt++;
497 return stab;
498 }
499
500 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
501 if (!stab)
502 return ERR_PTR(-ENOMEM);
503
504 stab->refcnt = 1;
505 stab->szopts = *s;
506 if (tsize > 0)
507 memcpy(stab->data, tab, tsize * sizeof(u16));
508
509 list_add_tail(&stab->list, &qdisc_stab_list);
510
511 return stab;
512}
513
514static void stab_kfree_rcu(struct rcu_head *head)
515{
516 kfree(container_of(head, struct qdisc_size_table, rcu));
517}
518
519void qdisc_put_stab(struct qdisc_size_table *tab)
520{
521 if (!tab)
522 return;
523
524 if (--tab->refcnt == 0) {
525 list_del(&tab->list);
526 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
527 }
528}
529EXPORT_SYMBOL(qdisc_put_stab);
530
531static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
532{
533 struct nlattr *nest;
534
535 nest = nla_nest_start(skb, TCA_STAB);
536 if (nest == NULL)
537 goto nla_put_failure;
538 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
539 goto nla_put_failure;
540 nla_nest_end(skb, nest);
541
542 return skb->len;
543
544nla_put_failure:
545 return -1;
546}
547
548void __qdisc_calculate_pkt_len(struct sk_buff *skb,
549 const struct qdisc_size_table *stab)
550{
551 int pkt_len, slot;
552
553 pkt_len = skb->len + stab->szopts.overhead;
554 if (unlikely(!stab->szopts.tsize))
555 goto out;
556
557 slot = pkt_len + stab->szopts.cell_align;
558 if (unlikely(slot < 0))
559 slot = 0;
560
561 slot >>= stab->szopts.cell_log;
562 if (likely(slot < stab->szopts.tsize))
563 pkt_len = stab->data[slot];
564 else
565 pkt_len = stab->data[stab->szopts.tsize - 1] *
566 (slot / stab->szopts.tsize) +
567 stab->data[slot % stab->szopts.tsize];
568
569 pkt_len <<= stab->szopts.size_log;
570out:
571 if (unlikely(pkt_len < 1))
572 pkt_len = 1;
573 qdisc_skb_cb(skb)->pkt_len = pkt_len;
574}
575EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
576
577void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
578{
579 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
580 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
581 txt, qdisc->ops->id, qdisc->handle >> 16);
582 qdisc->flags |= TCQ_F_WARN_NONWC;
583 }
584}
585EXPORT_SYMBOL(qdisc_warn_nonwc);
586
587static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
588{
589 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
590 timer);
591
592 rcu_read_lock();
593 __netif_schedule(qdisc_root(wd->qdisc));
594 rcu_read_unlock();
595
596 return HRTIMER_NORESTART;
597}
598
599void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
600{
601 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
602 wd->timer.function = qdisc_watchdog;
603 wd->qdisc = qdisc;
604}
605EXPORT_SYMBOL(qdisc_watchdog_init);
606
607void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
608{
609 if (test_bit(__QDISC_STATE_DEACTIVATED,
610 &qdisc_root_sleeping(wd->qdisc)->state))
611 return;
612
613 if (wd->last_expires == expires)
614 return;
615
616 wd->last_expires = expires;
617 hrtimer_start(&wd->timer,
618 ns_to_ktime(expires),
619 HRTIMER_MODE_ABS_PINNED);
620}
621EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
622
623void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
624{
625 hrtimer_cancel(&wd->timer);
626}
627EXPORT_SYMBOL(qdisc_watchdog_cancel);
628
629static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
630{
631 struct hlist_head *h;
632 unsigned int i;
633
634 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
635
636 if (h != NULL) {
637 for (i = 0; i < n; i++)
638 INIT_HLIST_HEAD(&h[i]);
639 }
640 return h;
641}
642
643void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
644{
645 struct Qdisc_class_common *cl;
646 struct hlist_node *next;
647 struct hlist_head *nhash, *ohash;
648 unsigned int nsize, nmask, osize;
649 unsigned int i, h;
650
651 /* Rehash when load factor exceeds 0.75 */
652 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
653 return;
654 nsize = clhash->hashsize * 2;
655 nmask = nsize - 1;
656 nhash = qdisc_class_hash_alloc(nsize);
657 if (nhash == NULL)
658 return;
659
660 ohash = clhash->hash;
661 osize = clhash->hashsize;
662
663 sch_tree_lock(sch);
664 for (i = 0; i < osize; i++) {
665 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
666 h = qdisc_class_hash(cl->classid, nmask);
667 hlist_add_head(&cl->hnode, &nhash[h]);
668 }
669 }
670 clhash->hash = nhash;
671 clhash->hashsize = nsize;
672 clhash->hashmask = nmask;
673 sch_tree_unlock(sch);
674
675 kvfree(ohash);
676}
677EXPORT_SYMBOL(qdisc_class_hash_grow);
678
679int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
680{
681 unsigned int size = 4;
682
683 clhash->hash = qdisc_class_hash_alloc(size);
684 if (!clhash->hash)
685 return -ENOMEM;
686 clhash->hashsize = size;
687 clhash->hashmask = size - 1;
688 clhash->hashelems = 0;
689 return 0;
690}
691EXPORT_SYMBOL(qdisc_class_hash_init);
692
693void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
694{
695 kvfree(clhash->hash);
696}
697EXPORT_SYMBOL(qdisc_class_hash_destroy);
698
699void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
700 struct Qdisc_class_common *cl)
701{
702 unsigned int h;
703
704 INIT_HLIST_NODE(&cl->hnode);
705 h = qdisc_class_hash(cl->classid, clhash->hashmask);
706 hlist_add_head(&cl->hnode, &clhash->hash[h]);
707 clhash->hashelems++;
708}
709EXPORT_SYMBOL(qdisc_class_hash_insert);
710
711void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
712 struct Qdisc_class_common *cl)
713{
714 hlist_del(&cl->hnode);
715 clhash->hashelems--;
716}
717EXPORT_SYMBOL(qdisc_class_hash_remove);
718
719/* Allocate an unique handle from space managed by kernel
720 * Possible range is [8000-FFFF]:0000 (0x8000 values)
721 */
722static u32 qdisc_alloc_handle(struct net_device *dev)
723{
724 int i = 0x8000;
725 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
726
727 do {
728 autohandle += TC_H_MAKE(0x10000U, 0);
729 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
730 autohandle = TC_H_MAKE(0x80000000U, 0);
731 if (!qdisc_lookup(dev, autohandle))
732 return autohandle;
733 cond_resched();
734 } while (--i > 0);
735
736 return 0;
737}
738
739void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
740 unsigned int len)
741{
742 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
743 const struct Qdisc_class_ops *cops;
744 unsigned long cl;
745 u32 parentid;
746 bool notify;
747 int drops;
748
749 if (n == 0 && len == 0)
750 return;
751 drops = max_t(int, n, 0);
752 rcu_read_lock();
753 while ((parentid = sch->parent)) {
754 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
755 break;
756
757 if (sch->flags & TCQ_F_NOPARENT)
758 break;
759 /* Notify parent qdisc only if child qdisc becomes empty.
760 *
761 * If child was empty even before update then backlog
762 * counter is screwed and we skip notification because
763 * parent class is already passive.
764 *
765 * If the original child was offloaded then it is allowed
766 * to be seem as empty, so the parent is notified anyway.
767 */
768 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
769 !qdisc_is_offloaded);
770 /* TODO: perform the search on a per txq basis */
771 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
772 if (sch == NULL) {
773 WARN_ON_ONCE(parentid != TC_H_ROOT);
774 break;
775 }
776 cops = sch->ops->cl_ops;
777 if (notify && cops->qlen_notify) {
778 cl = cops->find(sch, parentid);
779 cops->qlen_notify(sch, cl);
780 }
781 sch->q.qlen -= n;
782 sch->qstats.backlog -= len;
783 __qdisc_qstats_drop(sch, drops);
784 }
785 rcu_read_unlock();
786}
787EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
788
789static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
790 u32 portid, u32 seq, u16 flags, int event)
791{
792 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
793 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
794 struct tcmsg *tcm;
795 struct nlmsghdr *nlh;
796 unsigned char *b = skb_tail_pointer(skb);
797 struct gnet_dump d;
798 struct qdisc_size_table *stab;
799 u32 block_index;
800 __u32 qlen;
801
802 cond_resched();
803 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
804 if (!nlh)
805 goto out_nlmsg_trim;
806 tcm = nlmsg_data(nlh);
807 tcm->tcm_family = AF_UNSPEC;
808 tcm->tcm__pad1 = 0;
809 tcm->tcm__pad2 = 0;
810 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
811 tcm->tcm_parent = clid;
812 tcm->tcm_handle = q->handle;
813 tcm->tcm_info = refcount_read(&q->refcnt);
814 if (nla_put_string(skb, TCA_KIND, q->ops->id))
815 goto nla_put_failure;
816 if (q->ops->ingress_block_get) {
817 block_index = q->ops->ingress_block_get(q);
818 if (block_index &&
819 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
820 goto nla_put_failure;
821 }
822 if (q->ops->egress_block_get) {
823 block_index = q->ops->egress_block_get(q);
824 if (block_index &&
825 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
826 goto nla_put_failure;
827 }
828 if (q->ops->dump && q->ops->dump(q, skb) < 0)
829 goto nla_put_failure;
830 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
831 goto nla_put_failure;
832 qlen = qdisc_qlen_sum(q);
833
834 stab = rtnl_dereference(q->stab);
835 if (stab && qdisc_dump_stab(skb, stab) < 0)
836 goto nla_put_failure;
837
838 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
839 NULL, &d, TCA_PAD) < 0)
840 goto nla_put_failure;
841
842 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
843 goto nla_put_failure;
844
845 if (qdisc_is_percpu_stats(q)) {
846 cpu_bstats = q->cpu_bstats;
847 cpu_qstats = q->cpu_qstats;
848 }
849
850 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
851 &d, cpu_bstats, &q->bstats) < 0 ||
852 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
853 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
854 goto nla_put_failure;
855
856 if (gnet_stats_finish_copy(&d) < 0)
857 goto nla_put_failure;
858
859 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
860 return skb->len;
861
862out_nlmsg_trim:
863nla_put_failure:
864 nlmsg_trim(skb, b);
865 return -1;
866}
867
868static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
869{
870 if (q->flags & TCQ_F_BUILTIN)
871 return true;
872 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
873 return true;
874
875 return false;
876}
877
878static int qdisc_notify(struct net *net, struct sk_buff *oskb,
879 struct nlmsghdr *n, u32 clid,
880 struct Qdisc *old, struct Qdisc *new)
881{
882 struct sk_buff *skb;
883 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
884
885 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
886 if (!skb)
887 return -ENOBUFS;
888
889 if (old && !tc_qdisc_dump_ignore(old, false)) {
890 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
891 0, RTM_DELQDISC) < 0)
892 goto err_out;
893 }
894 if (new && !tc_qdisc_dump_ignore(new, false)) {
895 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
896 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
897 goto err_out;
898 }
899
900 if (skb->len)
901 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
902 n->nlmsg_flags & NLM_F_ECHO);
903
904err_out:
905 kfree_skb(skb);
906 return -EINVAL;
907}
908
909static void notify_and_destroy(struct net *net, struct sk_buff *skb,
910 struct nlmsghdr *n, u32 clid,
911 struct Qdisc *old, struct Qdisc *new)
912{
913 if (new || old)
914 qdisc_notify(net, skb, n, clid, old, new);
915
916 if (old)
917 qdisc_destroy(old);
918}
919
920/* Graft qdisc "new" to class "classid" of qdisc "parent" or
921 * to device "dev".
922 *
923 * When appropriate send a netlink notification using 'skb'
924 * and "n".
925 *
926 * On success, destroy old qdisc.
927 */
928
929static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
930 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
931 struct Qdisc *new, struct Qdisc *old,
932 struct netlink_ext_ack *extack)
933{
934 struct Qdisc *q = old;
935 struct net *net = dev_net(dev);
936 int err = 0;
937
938 if (parent == NULL) {
939 unsigned int i, num_q, ingress;
940
941 ingress = 0;
942 num_q = dev->num_tx_queues;
943 if ((q && q->flags & TCQ_F_INGRESS) ||
944 (new && new->flags & TCQ_F_INGRESS)) {
945 num_q = 1;
946 ingress = 1;
947 if (!dev_ingress_queue(dev)) {
948 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
949 return -ENOENT;
950 }
951 }
952
953 if (dev->flags & IFF_UP)
954 dev_deactivate(dev);
955
956 if (new && new->ops->attach)
957 goto skip;
958
959 for (i = 0; i < num_q; i++) {
960 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
961
962 if (!ingress)
963 dev_queue = netdev_get_tx_queue(dev, i);
964
965 old = dev_graft_qdisc(dev_queue, new);
966 if (new && i > 0)
967 qdisc_refcount_inc(new);
968
969 if (!ingress)
970 qdisc_destroy(old);
971 }
972
973skip:
974 if (!ingress) {
975 notify_and_destroy(net, skb, n, classid,
976 dev->qdisc, new);
977 if (new && !new->ops->attach)
978 qdisc_refcount_inc(new);
979 dev->qdisc = new ? : &noop_qdisc;
980
981 if (new && new->ops->attach)
982 new->ops->attach(new);
983 } else {
984 notify_and_destroy(net, skb, n, classid, old, new);
985 }
986
987 if (dev->flags & IFF_UP)
988 dev_activate(dev);
989 } else {
990 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
991
992 /* Only support running class lockless if parent is lockless */
993 if (new && (new->flags & TCQ_F_NOLOCK) &&
994 parent && !(parent->flags & TCQ_F_NOLOCK))
995 new->flags &= ~TCQ_F_NOLOCK;
996
997 err = -EOPNOTSUPP;
998 if (cops && cops->graft) {
999 unsigned long cl = cops->find(parent, classid);
1000
1001 if (cl) {
1002 err = cops->graft(parent, cl, new, &old,
1003 extack);
1004 } else {
1005 NL_SET_ERR_MSG(extack, "Specified class not found");
1006 err = -ENOENT;
1007 }
1008 }
1009 if (!err)
1010 notify_and_destroy(net, skb, n, classid, old, new);
1011 }
1012 return err;
1013}
1014
1015static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1016 struct netlink_ext_ack *extack)
1017{
1018 u32 block_index;
1019
1020 if (tca[TCA_INGRESS_BLOCK]) {
1021 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1022
1023 if (!block_index) {
1024 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1025 return -EINVAL;
1026 }
1027 if (!sch->ops->ingress_block_set) {
1028 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1029 return -EOPNOTSUPP;
1030 }
1031 sch->ops->ingress_block_set(sch, block_index);
1032 }
1033 if (tca[TCA_EGRESS_BLOCK]) {
1034 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1035
1036 if (!block_index) {
1037 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1038 return -EINVAL;
1039 }
1040 if (!sch->ops->egress_block_set) {
1041 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1042 return -EOPNOTSUPP;
1043 }
1044 sch->ops->egress_block_set(sch, block_index);
1045 }
1046 return 0;
1047}
1048
1049/* lockdep annotation is needed for ingress; egress gets it only for name */
1050static struct lock_class_key qdisc_tx_lock;
1051static struct lock_class_key qdisc_rx_lock;
1052
1053/*
1054 Allocate and initialize new qdisc.
1055
1056 Parameters are passed via opt.
1057 */
1058
1059static struct Qdisc *qdisc_create(struct net_device *dev,
1060 struct netdev_queue *dev_queue,
1061 struct Qdisc *p, u32 parent, u32 handle,
1062 struct nlattr **tca, int *errp,
1063 struct netlink_ext_ack *extack)
1064{
1065 int err;
1066 struct nlattr *kind = tca[TCA_KIND];
1067 struct Qdisc *sch;
1068 struct Qdisc_ops *ops;
1069 struct qdisc_size_table *stab;
1070
1071 ops = qdisc_lookup_ops(kind);
1072#ifdef CONFIG_MODULES
1073 if (ops == NULL && kind != NULL) {
1074 char name[IFNAMSIZ];
1075 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1076 /* We dropped the RTNL semaphore in order to
1077 * perform the module load. So, even if we
1078 * succeeded in loading the module we have to
1079 * tell the caller to replay the request. We
1080 * indicate this using -EAGAIN.
1081 * We replay the request because the device may
1082 * go away in the mean time.
1083 */
1084 rtnl_unlock();
1085 request_module("sch_%s", name);
1086 rtnl_lock();
1087 ops = qdisc_lookup_ops(kind);
1088 if (ops != NULL) {
1089 /* We will try again qdisc_lookup_ops,
1090 * so don't keep a reference.
1091 */
1092 module_put(ops->owner);
1093 err = -EAGAIN;
1094 goto err_out;
1095 }
1096 }
1097 }
1098#endif
1099
1100 err = -ENOENT;
1101 if (!ops) {
1102 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1103 goto err_out;
1104 }
1105
1106 sch = qdisc_alloc(dev_queue, ops, extack);
1107 if (IS_ERR(sch)) {
1108 err = PTR_ERR(sch);
1109 goto err_out2;
1110 }
1111
1112 sch->parent = parent;
1113
1114 if (handle == TC_H_INGRESS) {
1115 sch->flags |= TCQ_F_INGRESS;
1116 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1117 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
1118 } else {
1119 if (handle == 0) {
1120 handle = qdisc_alloc_handle(dev);
1121 err = -ENOMEM;
1122 if (handle == 0)
1123 goto err_out3;
1124 }
1125 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
1126 if (!netif_is_multiqueue(dev))
1127 sch->flags |= TCQ_F_ONETXQUEUE;
1128 }
1129
1130 sch->handle = handle;
1131
1132 /* This exist to keep backward compatible with a userspace
1133 * loophole, what allowed userspace to get IFF_NO_QUEUE
1134 * facility on older kernels by setting tx_queue_len=0 (prior
1135 * to qdisc init), and then forgot to reinit tx_queue_len
1136 * before again attaching a qdisc.
1137 */
1138 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1139 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1140 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1141 }
1142
1143 err = qdisc_block_indexes_set(sch, tca, extack);
1144 if (err)
1145 goto err_out3;
1146
1147 if (ops->init) {
1148 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1149 if (err != 0)
1150 goto err_out5;
1151 }
1152
1153 if (tca[TCA_STAB]) {
1154 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1155 if (IS_ERR(stab)) {
1156 err = PTR_ERR(stab);
1157 goto err_out4;
1158 }
1159 rcu_assign_pointer(sch->stab, stab);
1160 }
1161 if (tca[TCA_RATE]) {
1162 seqcount_t *running;
1163
1164 err = -EOPNOTSUPP;
1165 if (sch->flags & TCQ_F_MQROOT) {
1166 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1167 goto err_out4;
1168 }
1169
1170 if (sch->parent != TC_H_ROOT &&
1171 !(sch->flags & TCQ_F_INGRESS) &&
1172 (!p || !(p->flags & TCQ_F_MQROOT)))
1173 running = qdisc_root_sleeping_running(sch);
1174 else
1175 running = &sch->running;
1176
1177 err = gen_new_estimator(&sch->bstats,
1178 sch->cpu_bstats,
1179 &sch->rate_est,
1180 NULL,
1181 running,
1182 tca[TCA_RATE]);
1183 if (err) {
1184 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1185 goto err_out4;
1186 }
1187 }
1188
1189 qdisc_hash_add(sch, false);
1190
1191 return sch;
1192
1193err_out5:
1194 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1195 if (ops->destroy)
1196 ops->destroy(sch);
1197err_out3:
1198 dev_put(dev);
1199 qdisc_free(sch);
1200err_out2:
1201 module_put(ops->owner);
1202err_out:
1203 *errp = err;
1204 return NULL;
1205
1206err_out4:
1207 /*
1208 * Any broken qdiscs that would require a ops->reset() here?
1209 * The qdisc was never in action so it shouldn't be necessary.
1210 */
1211 qdisc_put_stab(rtnl_dereference(sch->stab));
1212 if (ops->destroy)
1213 ops->destroy(sch);
1214 goto err_out3;
1215}
1216
1217static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1218 struct netlink_ext_ack *extack)
1219{
1220 struct qdisc_size_table *ostab, *stab = NULL;
1221 int err = 0;
1222
1223 if (tca[TCA_OPTIONS]) {
1224 if (!sch->ops->change) {
1225 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1226 return -EINVAL;
1227 }
1228 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1229 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1230 return -EOPNOTSUPP;
1231 }
1232 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1233 if (err)
1234 return err;
1235 }
1236
1237 if (tca[TCA_STAB]) {
1238 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1239 if (IS_ERR(stab))
1240 return PTR_ERR(stab);
1241 }
1242
1243 ostab = rtnl_dereference(sch->stab);
1244 rcu_assign_pointer(sch->stab, stab);
1245 qdisc_put_stab(ostab);
1246
1247 if (tca[TCA_RATE]) {
1248 /* NB: ignores errors from replace_estimator
1249 because change can't be undone. */
1250 if (sch->flags & TCQ_F_MQROOT)
1251 goto out;
1252 gen_replace_estimator(&sch->bstats,
1253 sch->cpu_bstats,
1254 &sch->rate_est,
1255 NULL,
1256 qdisc_root_sleeping_running(sch),
1257 tca[TCA_RATE]);
1258 }
1259out:
1260 return 0;
1261}
1262
1263struct check_loop_arg {
1264 struct qdisc_walker w;
1265 struct Qdisc *p;
1266 int depth;
1267};
1268
1269static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1270 struct qdisc_walker *w);
1271
1272static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1273{
1274 struct check_loop_arg arg;
1275
1276 if (q->ops->cl_ops == NULL)
1277 return 0;
1278
1279 arg.w.stop = arg.w.skip = arg.w.count = 0;
1280 arg.w.fn = check_loop_fn;
1281 arg.depth = depth;
1282 arg.p = p;
1283 q->ops->cl_ops->walk(q, &arg.w);
1284 return arg.w.stop ? -ELOOP : 0;
1285}
1286
1287static int
1288check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1289{
1290 struct Qdisc *leaf;
1291 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1292 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1293
1294 leaf = cops->leaf(q, cl);
1295 if (leaf) {
1296 if (leaf == arg->p || arg->depth > 7)
1297 return -ELOOP;
1298 return check_loop(leaf, arg->p, arg->depth + 1);
1299 }
1300 return 0;
1301}
1302
1303/*
1304 * Delete/get qdisc.
1305 */
1306
1307static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1308 struct netlink_ext_ack *extack)
1309{
1310 struct net *net = sock_net(skb->sk);
1311 struct tcmsg *tcm = nlmsg_data(n);
1312 struct nlattr *tca[TCA_MAX + 1];
1313 struct net_device *dev;
1314 u32 clid;
1315 struct Qdisc *q = NULL;
1316 struct Qdisc *p = NULL;
1317 int err;
1318
1319 if ((n->nlmsg_type != RTM_GETQDISC) &&
1320 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1321 return -EPERM;
1322
1323 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1324 if (err < 0)
1325 return err;
1326
1327 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1328 if (!dev)
1329 return -ENODEV;
1330
1331 clid = tcm->tcm_parent;
1332 if (clid) {
1333 if (clid != TC_H_ROOT) {
1334 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1335 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1336 if (!p) {
1337 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1338 return -ENOENT;
1339 }
1340 q = qdisc_leaf(p, clid);
1341 } else if (dev_ingress_queue(dev)) {
1342 q = dev_ingress_queue(dev)->qdisc_sleeping;
1343 }
1344 } else {
1345 q = dev->qdisc;
1346 }
1347 if (!q) {
1348 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1349 return -ENOENT;
1350 }
1351
1352 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1353 NL_SET_ERR_MSG(extack, "Invalid handle");
1354 return -EINVAL;
1355 }
1356 } else {
1357 q = qdisc_lookup(dev, tcm->tcm_handle);
1358 if (!q) {
1359 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1360 return -ENOENT;
1361 }
1362 }
1363
1364 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1365 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1366 return -EINVAL;
1367 }
1368
1369 if (n->nlmsg_type == RTM_DELQDISC) {
1370 if (!clid) {
1371 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1372 return -EINVAL;
1373 }
1374 if (q->handle == 0) {
1375 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1376 return -ENOENT;
1377 }
1378 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1379 if (err != 0)
1380 return err;
1381 } else {
1382 qdisc_notify(net, skb, n, clid, NULL, q);
1383 }
1384 return 0;
1385}
1386
1387/*
1388 * Create/change qdisc.
1389 */
1390
1391static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1392 struct netlink_ext_ack *extack)
1393{
1394 struct net *net = sock_net(skb->sk);
1395 struct tcmsg *tcm;
1396 struct nlattr *tca[TCA_MAX + 1];
1397 struct net_device *dev;
1398 u32 clid;
1399 struct Qdisc *q, *p;
1400 int err;
1401
1402 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1403 return -EPERM;
1404
1405replay:
1406 /* Reinit, just in case something touches this. */
1407 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1408 if (err < 0)
1409 return err;
1410
1411 tcm = nlmsg_data(n);
1412 clid = tcm->tcm_parent;
1413 q = p = NULL;
1414
1415 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1416 if (!dev)
1417 return -ENODEV;
1418
1419
1420 if (clid) {
1421 if (clid != TC_H_ROOT) {
1422 if (clid != TC_H_INGRESS) {
1423 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1424 if (!p) {
1425 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1426 return -ENOENT;
1427 }
1428 q = qdisc_leaf(p, clid);
1429 } else if (dev_ingress_queue_create(dev)) {
1430 q = dev_ingress_queue(dev)->qdisc_sleeping;
1431 }
1432 } else {
1433 q = dev->qdisc;
1434 }
1435
1436 /* It may be default qdisc, ignore it */
1437 if (q && q->handle == 0)
1438 q = NULL;
1439
1440 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1441 if (tcm->tcm_handle) {
1442 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1443 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1444 return -EEXIST;
1445 }
1446 if (TC_H_MIN(tcm->tcm_handle)) {
1447 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1448 return -EINVAL;
1449 }
1450 q = qdisc_lookup(dev, tcm->tcm_handle);
1451 if (!q)
1452 goto create_n_graft;
1453 if (n->nlmsg_flags & NLM_F_EXCL) {
1454 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1455 return -EEXIST;
1456 }
1457 if (tca[TCA_KIND] &&
1458 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1459 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1460 return -EINVAL;
1461 }
1462 if (q == p ||
1463 (p && check_loop(q, p, 0))) {
1464 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1465 return -ELOOP;
1466 }
1467 qdisc_refcount_inc(q);
1468 goto graft;
1469 } else {
1470 if (!q)
1471 goto create_n_graft;
1472
1473 /* This magic test requires explanation.
1474 *
1475 * We know, that some child q is already
1476 * attached to this parent and have choice:
1477 * either to change it or to create/graft new one.
1478 *
1479 * 1. We are allowed to create/graft only
1480 * if CREATE and REPLACE flags are set.
1481 *
1482 * 2. If EXCL is set, requestor wanted to say,
1483 * that qdisc tcm_handle is not expected
1484 * to exist, so that we choose create/graft too.
1485 *
1486 * 3. The last case is when no flags are set.
1487 * Alas, it is sort of hole in API, we
1488 * cannot decide what to do unambiguously.
1489 * For now we select create/graft, if
1490 * user gave KIND, which does not match existing.
1491 */
1492 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1493 (n->nlmsg_flags & NLM_F_REPLACE) &&
1494 ((n->nlmsg_flags & NLM_F_EXCL) ||
1495 (tca[TCA_KIND] &&
1496 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1497 goto create_n_graft;
1498 }
1499 }
1500 } else {
1501 if (!tcm->tcm_handle) {
1502 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1503 return -EINVAL;
1504 }
1505 q = qdisc_lookup(dev, tcm->tcm_handle);
1506 }
1507
1508 /* Change qdisc parameters */
1509 if (!q) {
1510 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1511 return -ENOENT;
1512 }
1513 if (n->nlmsg_flags & NLM_F_EXCL) {
1514 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1515 return -EEXIST;
1516 }
1517 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1518 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1519 return -EINVAL;
1520 }
1521 err = qdisc_change(q, tca, extack);
1522 if (err == 0)
1523 qdisc_notify(net, skb, n, clid, NULL, q);
1524 return err;
1525
1526create_n_graft:
1527 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1528 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1529 return -ENOENT;
1530 }
1531 if (clid == TC_H_INGRESS) {
1532 if (dev_ingress_queue(dev)) {
1533 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1534 tcm->tcm_parent, tcm->tcm_parent,
1535 tca, &err, extack);
1536 } else {
1537 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1538 err = -ENOENT;
1539 }
1540 } else {
1541 struct netdev_queue *dev_queue;
1542
1543 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1544 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1545 else if (p)
1546 dev_queue = p->dev_queue;
1547 else
1548 dev_queue = netdev_get_tx_queue(dev, 0);
1549
1550 q = qdisc_create(dev, dev_queue, p,
1551 tcm->tcm_parent, tcm->tcm_handle,
1552 tca, &err, extack);
1553 }
1554 if (q == NULL) {
1555 if (err == -EAGAIN)
1556 goto replay;
1557 return err;
1558 }
1559
1560graft:
1561 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1562 if (err) {
1563 if (q)
1564 qdisc_destroy(q);
1565 return err;
1566 }
1567
1568 return 0;
1569}
1570
1571static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1572 struct netlink_callback *cb,
1573 int *q_idx_p, int s_q_idx, bool recur,
1574 bool dump_invisible)
1575{
1576 int ret = 0, q_idx = *q_idx_p;
1577 struct Qdisc *q;
1578 int b;
1579
1580 if (!root)
1581 return 0;
1582
1583 q = root;
1584 if (q_idx < s_q_idx) {
1585 q_idx++;
1586 } else {
1587 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1588 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1589 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1590 RTM_NEWQDISC) <= 0)
1591 goto done;
1592 q_idx++;
1593 }
1594
1595 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1596 * itself has already been dumped.
1597 *
1598 * If we've already dumped the top-level (ingress) qdisc above and the global
1599 * qdisc hashtable, we don't want to hit it again
1600 */
1601 if (!qdisc_dev(root) || !recur)
1602 goto out;
1603
1604 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1605 if (q_idx < s_q_idx) {
1606 q_idx++;
1607 continue;
1608 }
1609 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1610 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1611 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1612 RTM_NEWQDISC) <= 0)
1613 goto done;
1614 q_idx++;
1615 }
1616
1617out:
1618 *q_idx_p = q_idx;
1619 return ret;
1620done:
1621 ret = -1;
1622 goto out;
1623}
1624
1625static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1626{
1627 struct net *net = sock_net(skb->sk);
1628 int idx, q_idx;
1629 int s_idx, s_q_idx;
1630 struct net_device *dev;
1631 const struct nlmsghdr *nlh = cb->nlh;
1632 struct nlattr *tca[TCA_MAX + 1];
1633 int err;
1634
1635 s_idx = cb->args[0];
1636 s_q_idx = q_idx = cb->args[1];
1637
1638 idx = 0;
1639 ASSERT_RTNL();
1640
1641 err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
1642 if (err < 0)
1643 return err;
1644
1645 for_each_netdev(net, dev) {
1646 struct netdev_queue *dev_queue;
1647
1648 if (idx < s_idx)
1649 goto cont;
1650 if (idx > s_idx)
1651 s_q_idx = 0;
1652 q_idx = 0;
1653
1654 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1655 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1656 goto done;
1657
1658 dev_queue = dev_ingress_queue(dev);
1659 if (dev_queue &&
1660 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1661 &q_idx, s_q_idx, false,
1662 tca[TCA_DUMP_INVISIBLE]) < 0)
1663 goto done;
1664
1665cont:
1666 idx++;
1667 }
1668
1669done:
1670 cb->args[0] = idx;
1671 cb->args[1] = q_idx;
1672
1673 return skb->len;
1674}
1675
1676
1677
1678/************************************************
1679 * Traffic classes manipulation. *
1680 ************************************************/
1681
1682static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1683 unsigned long cl,
1684 u32 portid, u32 seq, u16 flags, int event)
1685{
1686 struct tcmsg *tcm;
1687 struct nlmsghdr *nlh;
1688 unsigned char *b = skb_tail_pointer(skb);
1689 struct gnet_dump d;
1690 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1691
1692 cond_resched();
1693 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1694 if (!nlh)
1695 goto out_nlmsg_trim;
1696 tcm = nlmsg_data(nlh);
1697 tcm->tcm_family = AF_UNSPEC;
1698 tcm->tcm__pad1 = 0;
1699 tcm->tcm__pad2 = 0;
1700 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1701 tcm->tcm_parent = q->handle;
1702 tcm->tcm_handle = q->handle;
1703 tcm->tcm_info = 0;
1704 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1705 goto nla_put_failure;
1706 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1707 goto nla_put_failure;
1708
1709 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1710 NULL, &d, TCA_PAD) < 0)
1711 goto nla_put_failure;
1712
1713 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1714 goto nla_put_failure;
1715
1716 if (gnet_stats_finish_copy(&d) < 0)
1717 goto nla_put_failure;
1718
1719 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1720 return skb->len;
1721
1722out_nlmsg_trim:
1723nla_put_failure:
1724 nlmsg_trim(skb, b);
1725 return -1;
1726}
1727
1728static int tclass_notify(struct net *net, struct sk_buff *oskb,
1729 struct nlmsghdr *n, struct Qdisc *q,
1730 unsigned long cl, int event)
1731{
1732 struct sk_buff *skb;
1733 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1734
1735 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1736 if (!skb)
1737 return -ENOBUFS;
1738
1739 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1740 kfree_skb(skb);
1741 return -EINVAL;
1742 }
1743
1744 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1745 n->nlmsg_flags & NLM_F_ECHO);
1746}
1747
1748static int tclass_del_notify(struct net *net,
1749 const struct Qdisc_class_ops *cops,
1750 struct sk_buff *oskb, struct nlmsghdr *n,
1751 struct Qdisc *q, unsigned long cl)
1752{
1753 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1754 struct sk_buff *skb;
1755 int err = 0;
1756
1757 if (!cops->delete)
1758 return -EOPNOTSUPP;
1759
1760 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1761 if (!skb)
1762 return -ENOBUFS;
1763
1764 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1765 RTM_DELTCLASS) < 0) {
1766 kfree_skb(skb);
1767 return -EINVAL;
1768 }
1769
1770 err = cops->delete(q, cl);
1771 if (err) {
1772 kfree_skb(skb);
1773 return err;
1774 }
1775
1776 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1777 n->nlmsg_flags & NLM_F_ECHO);
1778}
1779
1780#ifdef CONFIG_NET_CLS
1781
1782struct tcf_bind_args {
1783 struct tcf_walker w;
1784 u32 classid;
1785 unsigned long cl;
1786};
1787
1788static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1789{
1790 struct tcf_bind_args *a = (void *)arg;
1791
1792 if (tp->ops->bind_class) {
1793 struct Qdisc *q = tcf_block_q(tp->chain->block);
1794
1795 sch_tree_lock(q);
1796 tp->ops->bind_class(n, a->classid, a->cl);
1797 sch_tree_unlock(q);
1798 }
1799 return 0;
1800}
1801
1802static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1803 unsigned long new_cl)
1804{
1805 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1806 struct tcf_block *block;
1807 struct tcf_chain *chain;
1808 unsigned long cl;
1809
1810 cl = cops->find(q, portid);
1811 if (!cl)
1812 return;
1813 block = cops->tcf_block(q, cl, NULL);
1814 if (!block)
1815 return;
1816 list_for_each_entry(chain, &block->chain_list, list) {
1817 struct tcf_proto *tp;
1818
1819 for (tp = rtnl_dereference(chain->filter_chain);
1820 tp; tp = rtnl_dereference(tp->next)) {
1821 struct tcf_bind_args arg = {};
1822
1823 arg.w.fn = tcf_node_bind;
1824 arg.classid = clid;
1825 arg.cl = new_cl;
1826 tp->ops->walk(tp, &arg.w);
1827 }
1828 }
1829}
1830
1831#else
1832
1833static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1834 unsigned long new_cl)
1835{
1836}
1837
1838#endif
1839
1840static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1841 struct netlink_ext_ack *extack)
1842{
1843 struct net *net = sock_net(skb->sk);
1844 struct tcmsg *tcm = nlmsg_data(n);
1845 struct nlattr *tca[TCA_MAX + 1];
1846 struct net_device *dev;
1847 struct Qdisc *q = NULL;
1848 const struct Qdisc_class_ops *cops;
1849 unsigned long cl = 0;
1850 unsigned long new_cl;
1851 u32 portid;
1852 u32 clid;
1853 u32 qid;
1854 int err;
1855
1856 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1857 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1858 return -EPERM;
1859
1860 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1861 if (err < 0)
1862 return err;
1863
1864 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1865 if (!dev)
1866 return -ENODEV;
1867
1868 /*
1869 parent == TC_H_UNSPEC - unspecified parent.
1870 parent == TC_H_ROOT - class is root, which has no parent.
1871 parent == X:0 - parent is root class.
1872 parent == X:Y - parent is a node in hierarchy.
1873 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1874
1875 handle == 0:0 - generate handle from kernel pool.
1876 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1877 handle == X:Y - clear.
1878 handle == X:0 - root class.
1879 */
1880
1881 /* Step 1. Determine qdisc handle X:0 */
1882
1883 portid = tcm->tcm_parent;
1884 clid = tcm->tcm_handle;
1885 qid = TC_H_MAJ(clid);
1886
1887 if (portid != TC_H_ROOT) {
1888 u32 qid1 = TC_H_MAJ(portid);
1889
1890 if (qid && qid1) {
1891 /* If both majors are known, they must be identical. */
1892 if (qid != qid1)
1893 return -EINVAL;
1894 } else if (qid1) {
1895 qid = qid1;
1896 } else if (qid == 0)
1897 qid = dev->qdisc->handle;
1898
1899 /* Now qid is genuine qdisc handle consistent
1900 * both with parent and child.
1901 *
1902 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1903 */
1904 if (portid)
1905 portid = TC_H_MAKE(qid, portid);
1906 } else {
1907 if (qid == 0)
1908 qid = dev->qdisc->handle;
1909 }
1910
1911 /* OK. Locate qdisc */
1912 q = qdisc_lookup(dev, qid);
1913 if (!q)
1914 return -ENOENT;
1915
1916 /* An check that it supports classes */
1917 cops = q->ops->cl_ops;
1918 if (cops == NULL)
1919 return -EINVAL;
1920
1921 /* Now try to get class */
1922 if (clid == 0) {
1923 if (portid == TC_H_ROOT)
1924 clid = qid;
1925 } else
1926 clid = TC_H_MAKE(qid, clid);
1927
1928 if (clid)
1929 cl = cops->find(q, clid);
1930
1931 if (cl == 0) {
1932 err = -ENOENT;
1933 if (n->nlmsg_type != RTM_NEWTCLASS ||
1934 !(n->nlmsg_flags & NLM_F_CREATE))
1935 goto out;
1936 } else {
1937 switch (n->nlmsg_type) {
1938 case RTM_NEWTCLASS:
1939 err = -EEXIST;
1940 if (n->nlmsg_flags & NLM_F_EXCL)
1941 goto out;
1942 break;
1943 case RTM_DELTCLASS:
1944 err = tclass_del_notify(net, cops, skb, n, q, cl);
1945 /* Unbind the class with flilters with 0 */
1946 tc_bind_tclass(q, portid, clid, 0);
1947 goto out;
1948 case RTM_GETTCLASS:
1949 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1950 goto out;
1951 default:
1952 err = -EINVAL;
1953 goto out;
1954 }
1955 }
1956
1957 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1958 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
1959 return -EOPNOTSUPP;
1960 }
1961
1962 new_cl = cl;
1963 err = -EOPNOTSUPP;
1964 if (cops->change)
1965 err = cops->change(q, clid, portid, tca, &new_cl, extack);
1966 if (err == 0) {
1967 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1968 /* We just create a new class, need to do reverse binding. */
1969 if (cl != new_cl)
1970 tc_bind_tclass(q, portid, clid, new_cl);
1971 }
1972out:
1973 return err;
1974}
1975
1976struct qdisc_dump_args {
1977 struct qdisc_walker w;
1978 struct sk_buff *skb;
1979 struct netlink_callback *cb;
1980};
1981
1982static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
1983 struct qdisc_walker *arg)
1984{
1985 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1986
1987 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1988 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1989 RTM_NEWTCLASS);
1990}
1991
1992static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1993 struct tcmsg *tcm, struct netlink_callback *cb,
1994 int *t_p, int s_t)
1995{
1996 struct qdisc_dump_args arg;
1997
1998 if (tc_qdisc_dump_ignore(q, false) ||
1999 *t_p < s_t || !q->ops->cl_ops ||
2000 (tcm->tcm_parent &&
2001 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2002 (*t_p)++;
2003 return 0;
2004 }
2005 if (*t_p > s_t)
2006 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2007 arg.w.fn = qdisc_class_dump;
2008 arg.skb = skb;
2009 arg.cb = cb;
2010 arg.w.stop = 0;
2011 arg.w.skip = cb->args[1];
2012 arg.w.count = 0;
2013 q->ops->cl_ops->walk(q, &arg.w);
2014 cb->args[1] = arg.w.count;
2015 if (arg.w.stop)
2016 return -1;
2017 (*t_p)++;
2018 return 0;
2019}
2020
2021static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2022 struct tcmsg *tcm, struct netlink_callback *cb,
2023 int *t_p, int s_t)
2024{
2025 struct Qdisc *q;
2026 int b;
2027
2028 if (!root)
2029 return 0;
2030
2031 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2032 return -1;
2033
2034 if (!qdisc_dev(root))
2035 return 0;
2036
2037 if (tcm->tcm_parent) {
2038 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2039 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2040 return -1;
2041 return 0;
2042 }
2043 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2044 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2045 return -1;
2046 }
2047
2048 return 0;
2049}
2050
2051static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2052{
2053 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2054 struct net *net = sock_net(skb->sk);
2055 struct netdev_queue *dev_queue;
2056 struct net_device *dev;
2057 int t, s_t;
2058
2059 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2060 return 0;
2061 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2062 if (!dev)
2063 return 0;
2064
2065 s_t = cb->args[0];
2066 t = 0;
2067
2068 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
2069 goto done;
2070
2071 dev_queue = dev_ingress_queue(dev);
2072 if (dev_queue &&
2073 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2074 &t, s_t) < 0)
2075 goto done;
2076
2077done:
2078 cb->args[0] = t;
2079
2080 dev_put(dev);
2081 return skb->len;
2082}
2083
2084#ifdef CONFIG_PROC_FS
2085static int psched_show(struct seq_file *seq, void *v)
2086{
2087 seq_printf(seq, "%08x %08x %08x %08x\n",
2088 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2089 1000000,
2090 (u32)NSEC_PER_SEC / hrtimer_resolution);
2091
2092 return 0;
2093}
2094
2095static int psched_open(struct inode *inode, struct file *file)
2096{
2097 return single_open(file, psched_show, NULL);
2098}
2099
2100static const struct file_operations psched_fops = {
2101 .open = psched_open,
2102 .read = seq_read,
2103 .llseek = seq_lseek,
2104 .release = single_release,
2105};
2106
2107static int __net_init psched_net_init(struct net *net)
2108{
2109 struct proc_dir_entry *e;
2110
2111 e = proc_create("psched", 0, net->proc_net, &psched_fops);
2112 if (e == NULL)
2113 return -ENOMEM;
2114
2115 return 0;
2116}
2117
2118static void __net_exit psched_net_exit(struct net *net)
2119{
2120 remove_proc_entry("psched", net->proc_net);
2121}
2122#else
2123static int __net_init psched_net_init(struct net *net)
2124{
2125 return 0;
2126}
2127
2128static void __net_exit psched_net_exit(struct net *net)
2129{
2130}
2131#endif
2132
2133static struct pernet_operations psched_net_ops = {
2134 .init = psched_net_init,
2135 .exit = psched_net_exit,
2136};
2137
2138static int __init pktsched_init(void)
2139{
2140 int err;
2141
2142 err = register_pernet_subsys(&psched_net_ops);
2143 if (err) {
2144 pr_err("pktsched_init: "
2145 "cannot initialize per netns operations\n");
2146 return err;
2147 }
2148
2149 register_qdisc(&pfifo_fast_ops);
2150 register_qdisc(&pfifo_qdisc_ops);
2151 register_qdisc(&bfifo_qdisc_ops);
2152 register_qdisc(&pfifo_head_drop_qdisc_ops);
2153 register_qdisc(&mq_qdisc_ops);
2154 register_qdisc(&noqueue_qdisc_ops);
2155
2156 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2157 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2158 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2159 0);
2160 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2161 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2162 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2163 0);
2164
2165 return 0;
2166}
2167
2168subsys_initcall(pktsched_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_api.c Packet scheduler API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Fixes:
8 *
9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <linux/init.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/kmod.h>
24#include <linux/list.h>
25#include <linux/hrtimer.h>
26#include <linux/slab.h>
27#include <linux/hashtable.h>
28
29#include <net/net_namespace.h>
30#include <net/sock.h>
31#include <net/netlink.h>
32#include <net/pkt_sched.h>
33#include <net/pkt_cls.h>
34#include <net/tc_wrapper.h>
35
36#include <trace/events/qdisc.h>
37
38/*
39
40 Short review.
41 -------------
42
43 This file consists of two interrelated parts:
44
45 1. queueing disciplines manager frontend.
46 2. traffic classes manager frontend.
47
48 Generally, queueing discipline ("qdisc") is a black box,
49 which is able to enqueue packets and to dequeue them (when
50 device is ready to send something) in order and at times
51 determined by algorithm hidden in it.
52
53 qdisc's are divided to two categories:
54 - "queues", which have no internal structure visible from outside.
55 - "schedulers", which split all the packets to "traffic classes",
56 using "packet classifiers" (look at cls_api.c)
57
58 In turn, classes may have child qdiscs (as rule, queues)
59 attached to them etc. etc. etc.
60
61 The goal of the routines in this file is to translate
62 information supplied by user in the form of handles
63 to more intelligible for kernel form, to make some sanity
64 checks and part of work, which is common to all qdiscs
65 and to provide rtnetlink notifications.
66
67 All real intelligent work is done inside qdisc modules.
68
69
70
71 Every discipline has two major routines: enqueue and dequeue.
72
73 ---dequeue
74
75 dequeue usually returns a skb to send. It is allowed to return NULL,
76 but it does not mean that queue is empty, it just means that
77 discipline does not want to send anything this time.
78 Queue is really empty if q->q.qlen == 0.
79 For complicated disciplines with multiple queues q->q is not
80 real packet queue, but however q->q.qlen must be valid.
81
82 ---enqueue
83
84 enqueue returns 0, if packet was enqueued successfully.
85 If packet (this one or another one) was dropped, it returns
86 not zero error code.
87 NET_XMIT_DROP - this packet dropped
88 Expected action: do not backoff, but wait until queue will clear.
89 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
90 Expected action: backoff or ignore
91
92 Auxiliary routines:
93
94 ---peek
95
96 like dequeue but without removing a packet from the queue
97
98 ---reset
99
100 returns qdisc to initial state: purge all buffers, clear all
101 timers, counters (except for statistics) etc.
102
103 ---init
104
105 initializes newly created qdisc.
106
107 ---destroy
108
109 destroys resources allocated by init and during lifetime of qdisc.
110
111 ---change
112
113 changes qdisc parameters.
114 */
115
116/* Protects list of registered TC modules. It is pure SMP lock. */
117static DEFINE_RWLOCK(qdisc_mod_lock);
118
119
120/************************************************
121 * Queueing disciplines manipulation. *
122 ************************************************/
123
124
125/* The list of all installed queueing disciplines. */
126
127static struct Qdisc_ops *qdisc_base;
128
129/* Register/unregister queueing discipline */
130
131int register_qdisc(struct Qdisc_ops *qops)
132{
133 struct Qdisc_ops *q, **qp;
134 int rc = -EEXIST;
135
136 write_lock(&qdisc_mod_lock);
137 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 if (!strcmp(qops->id, q->id))
139 goto out;
140
141 if (qops->enqueue == NULL)
142 qops->enqueue = noop_qdisc_ops.enqueue;
143 if (qops->peek == NULL) {
144 if (qops->dequeue == NULL)
145 qops->peek = noop_qdisc_ops.peek;
146 else
147 goto out_einval;
148 }
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
151
152 if (qops->cl_ops) {
153 const struct Qdisc_class_ops *cops = qops->cl_ops;
154
155 if (!(cops->find && cops->walk && cops->leaf))
156 goto out_einval;
157
158 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
159 goto out_einval;
160 }
161
162 qops->next = NULL;
163 *qp = qops;
164 rc = 0;
165out:
166 write_unlock(&qdisc_mod_lock);
167 return rc;
168
169out_einval:
170 rc = -EINVAL;
171 goto out;
172}
173EXPORT_SYMBOL(register_qdisc);
174
175void unregister_qdisc(struct Qdisc_ops *qops)
176{
177 struct Qdisc_ops *q, **qp;
178 int err = -ENOENT;
179
180 write_lock(&qdisc_mod_lock);
181 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
182 if (q == qops)
183 break;
184 if (q) {
185 *qp = q->next;
186 q->next = NULL;
187 err = 0;
188 }
189 write_unlock(&qdisc_mod_lock);
190
191 WARN(err, "unregister qdisc(%s) failed\n", qops->id);
192}
193EXPORT_SYMBOL(unregister_qdisc);
194
195/* Get default qdisc if not otherwise specified */
196void qdisc_get_default(char *name, size_t len)
197{
198 read_lock(&qdisc_mod_lock);
199 strscpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
201}
202
203static struct Qdisc_ops *qdisc_lookup_default(const char *name)
204{
205 struct Qdisc_ops *q = NULL;
206
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
210 q = NULL;
211 break;
212 }
213 }
214
215 return q;
216}
217
218/* Set new default qdisc to use */
219int qdisc_set_default(const char *name)
220{
221 const struct Qdisc_ops *ops;
222
223 if (!capable(CAP_NET_ADMIN))
224 return -EPERM;
225
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
228 if (!ops) {
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module("sch_%s", name);
232 write_lock(&qdisc_mod_lock);
233
234 ops = qdisc_lookup_default(name);
235 }
236
237 if (ops) {
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
241 }
242 write_unlock(&qdisc_mod_lock);
243
244 return ops ? 0 : -ENOENT;
245}
246
247#ifdef CONFIG_NET_SCH_DEFAULT
248/* Set default value from kernel config */
249static int __init sch_default_qdisc(void)
250{
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
252}
253late_initcall(sch_default_qdisc);
254#endif
255
256/* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
259 */
260
261static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
262{
263 struct Qdisc *q;
264
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
267
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
270 return root;
271
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 lockdep_rtnl_is_held()) {
274 if (q->handle == handle)
275 return q;
276 }
277 return NULL;
278}
279
280void qdisc_hash_add(struct Qdisc *q, bool invisible)
281{
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 ASSERT_RTNL();
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 if (invisible)
286 q->flags |= TCQ_F_INVISIBLE;
287 }
288}
289EXPORT_SYMBOL(qdisc_hash_add);
290
291void qdisc_hash_del(struct Qdisc *q)
292{
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 ASSERT_RTNL();
295 hash_del_rcu(&q->hash);
296 }
297}
298EXPORT_SYMBOL(qdisc_hash_del);
299
300struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301{
302 struct Qdisc *q;
303
304 if (!handle)
305 return NULL;
306 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
307 if (q)
308 goto out;
309
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
313 handle);
314out:
315 return q;
316}
317
318struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
319{
320 struct netdev_queue *nq;
321 struct Qdisc *q;
322
323 if (!handle)
324 return NULL;
325 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
326 if (q)
327 goto out;
328
329 nq = dev_ingress_queue_rcu(dev);
330 if (nq)
331 q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
332 handle);
333out:
334 return q;
335}
336
337static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
338{
339 unsigned long cl;
340 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
341
342 if (cops == NULL)
343 return NULL;
344 cl = cops->find(p, classid);
345
346 if (cl == 0)
347 return NULL;
348 return cops->leaf(p, cl);
349}
350
351/* Find queueing discipline by name */
352
353static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
354{
355 struct Qdisc_ops *q = NULL;
356
357 if (kind) {
358 read_lock(&qdisc_mod_lock);
359 for (q = qdisc_base; q; q = q->next) {
360 if (nla_strcmp(kind, q->id) == 0) {
361 if (!try_module_get(q->owner))
362 q = NULL;
363 break;
364 }
365 }
366 read_unlock(&qdisc_mod_lock);
367 }
368 return q;
369}
370
371/* The linklayer setting were not transferred from iproute2, in older
372 * versions, and the rate tables lookup systems have been dropped in
373 * the kernel. To keep backward compatible with older iproute2 tc
374 * utils, we detect the linklayer setting by detecting if the rate
375 * table were modified.
376 *
377 * For linklayer ATM table entries, the rate table will be aligned to
378 * 48 bytes, thus some table entries will contain the same value. The
379 * mpu (min packet unit) is also encoded into the old rate table, thus
380 * starting from the mpu, we find low and high table entries for
381 * mapping this cell. If these entries contain the same value, when
382 * the rate tables have been modified for linklayer ATM.
383 *
384 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
385 * and then roundup to the next cell, calc the table entry one below,
386 * and compare.
387 */
388static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
389{
390 int low = roundup(r->mpu, 48);
391 int high = roundup(low+1, 48);
392 int cell_low = low >> r->cell_log;
393 int cell_high = (high >> r->cell_log) - 1;
394
395 /* rtab is too inaccurate at rates > 100Mbit/s */
396 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
397 pr_debug("TC linklayer: Giving up ATM detection\n");
398 return TC_LINKLAYER_ETHERNET;
399 }
400
401 if ((cell_high > cell_low) && (cell_high < 256)
402 && (rtab[cell_low] == rtab[cell_high])) {
403 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
404 cell_low, cell_high, rtab[cell_high]);
405 return TC_LINKLAYER_ATM;
406 }
407 return TC_LINKLAYER_ETHERNET;
408}
409
410static struct qdisc_rate_table *qdisc_rtab_list;
411
412struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
413 struct nlattr *tab,
414 struct netlink_ext_ack *extack)
415{
416 struct qdisc_rate_table *rtab;
417
418 if (tab == NULL || r->rate == 0 ||
419 r->cell_log == 0 || r->cell_log >= 32 ||
420 nla_len(tab) != TC_RTAB_SIZE) {
421 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
422 return NULL;
423 }
424
425 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
426 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
427 !memcmp(&rtab->data, nla_data(tab), 1024)) {
428 rtab->refcnt++;
429 return rtab;
430 }
431 }
432
433 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
434 if (rtab) {
435 rtab->rate = *r;
436 rtab->refcnt = 1;
437 memcpy(rtab->data, nla_data(tab), 1024);
438 if (r->linklayer == TC_LINKLAYER_UNAWARE)
439 r->linklayer = __detect_linklayer(r, rtab->data);
440 rtab->next = qdisc_rtab_list;
441 qdisc_rtab_list = rtab;
442 } else {
443 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
444 }
445 return rtab;
446}
447EXPORT_SYMBOL(qdisc_get_rtab);
448
449void qdisc_put_rtab(struct qdisc_rate_table *tab)
450{
451 struct qdisc_rate_table *rtab, **rtabp;
452
453 if (!tab || --tab->refcnt)
454 return;
455
456 for (rtabp = &qdisc_rtab_list;
457 (rtab = *rtabp) != NULL;
458 rtabp = &rtab->next) {
459 if (rtab == tab) {
460 *rtabp = rtab->next;
461 kfree(rtab);
462 return;
463 }
464 }
465}
466EXPORT_SYMBOL(qdisc_put_rtab);
467
468static LIST_HEAD(qdisc_stab_list);
469
470static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
471 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
472 [TCA_STAB_DATA] = { .type = NLA_BINARY },
473};
474
475static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
476 struct netlink_ext_ack *extack)
477{
478 struct nlattr *tb[TCA_STAB_MAX + 1];
479 struct qdisc_size_table *stab;
480 struct tc_sizespec *s;
481 unsigned int tsize = 0;
482 u16 *tab = NULL;
483 int err;
484
485 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
486 extack);
487 if (err < 0)
488 return ERR_PTR(err);
489 if (!tb[TCA_STAB_BASE]) {
490 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
491 return ERR_PTR(-EINVAL);
492 }
493
494 s = nla_data(tb[TCA_STAB_BASE]);
495
496 if (s->tsize > 0) {
497 if (!tb[TCA_STAB_DATA]) {
498 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
499 return ERR_PTR(-EINVAL);
500 }
501 tab = nla_data(tb[TCA_STAB_DATA]);
502 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
503 }
504
505 if (tsize != s->tsize || (!tab && tsize > 0)) {
506 NL_SET_ERR_MSG(extack, "Invalid size of size table");
507 return ERR_PTR(-EINVAL);
508 }
509
510 list_for_each_entry(stab, &qdisc_stab_list, list) {
511 if (memcmp(&stab->szopts, s, sizeof(*s)))
512 continue;
513 if (tsize > 0 &&
514 memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
515 continue;
516 stab->refcnt++;
517 return stab;
518 }
519
520 if (s->size_log > STAB_SIZE_LOG_MAX ||
521 s->cell_log > STAB_SIZE_LOG_MAX) {
522 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
523 return ERR_PTR(-EINVAL);
524 }
525
526 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
527 if (!stab)
528 return ERR_PTR(-ENOMEM);
529
530 stab->refcnt = 1;
531 stab->szopts = *s;
532 if (tsize > 0)
533 memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
534
535 list_add_tail(&stab->list, &qdisc_stab_list);
536
537 return stab;
538}
539
540void qdisc_put_stab(struct qdisc_size_table *tab)
541{
542 if (!tab)
543 return;
544
545 if (--tab->refcnt == 0) {
546 list_del(&tab->list);
547 kfree_rcu(tab, rcu);
548 }
549}
550EXPORT_SYMBOL(qdisc_put_stab);
551
552static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
553{
554 struct nlattr *nest;
555
556 nest = nla_nest_start_noflag(skb, TCA_STAB);
557 if (nest == NULL)
558 goto nla_put_failure;
559 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
560 goto nla_put_failure;
561 nla_nest_end(skb, nest);
562
563 return skb->len;
564
565nla_put_failure:
566 return -1;
567}
568
569void __qdisc_calculate_pkt_len(struct sk_buff *skb,
570 const struct qdisc_size_table *stab)
571{
572 int pkt_len, slot;
573
574 pkt_len = skb->len + stab->szopts.overhead;
575 if (unlikely(!stab->szopts.tsize))
576 goto out;
577
578 slot = pkt_len + stab->szopts.cell_align;
579 if (unlikely(slot < 0))
580 slot = 0;
581
582 slot >>= stab->szopts.cell_log;
583 if (likely(slot < stab->szopts.tsize))
584 pkt_len = stab->data[slot];
585 else
586 pkt_len = stab->data[stab->szopts.tsize - 1] *
587 (slot / stab->szopts.tsize) +
588 stab->data[slot % stab->szopts.tsize];
589
590 pkt_len <<= stab->szopts.size_log;
591out:
592 if (unlikely(pkt_len < 1))
593 pkt_len = 1;
594 qdisc_skb_cb(skb)->pkt_len = pkt_len;
595}
596EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
597
598void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
599{
600 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
601 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
602 txt, qdisc->ops->id, qdisc->handle >> 16);
603 qdisc->flags |= TCQ_F_WARN_NONWC;
604 }
605}
606EXPORT_SYMBOL(qdisc_warn_nonwc);
607
608static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
609{
610 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
611 timer);
612
613 rcu_read_lock();
614 __netif_schedule(qdisc_root(wd->qdisc));
615 rcu_read_unlock();
616
617 return HRTIMER_NORESTART;
618}
619
620void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
621 clockid_t clockid)
622{
623 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
624 wd->timer.function = qdisc_watchdog;
625 wd->qdisc = qdisc;
626}
627EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
628
629void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
630{
631 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
632}
633EXPORT_SYMBOL(qdisc_watchdog_init);
634
635void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
636 u64 delta_ns)
637{
638 bool deactivated;
639
640 rcu_read_lock();
641 deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
642 &qdisc_root_sleeping(wd->qdisc)->state);
643 rcu_read_unlock();
644 if (deactivated)
645 return;
646
647 if (hrtimer_is_queued(&wd->timer)) {
648 u64 softexpires;
649
650 softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer));
651 /* If timer is already set in [expires, expires + delta_ns],
652 * do not reprogram it.
653 */
654 if (softexpires - expires <= delta_ns)
655 return;
656 }
657
658 hrtimer_start_range_ns(&wd->timer,
659 ns_to_ktime(expires),
660 delta_ns,
661 HRTIMER_MODE_ABS_PINNED);
662}
663EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
664
665void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
666{
667 hrtimer_cancel(&wd->timer);
668}
669EXPORT_SYMBOL(qdisc_watchdog_cancel);
670
671static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
672{
673 struct hlist_head *h;
674 unsigned int i;
675
676 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
677
678 if (h != NULL) {
679 for (i = 0; i < n; i++)
680 INIT_HLIST_HEAD(&h[i]);
681 }
682 return h;
683}
684
685void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
686{
687 struct Qdisc_class_common *cl;
688 struct hlist_node *next;
689 struct hlist_head *nhash, *ohash;
690 unsigned int nsize, nmask, osize;
691 unsigned int i, h;
692
693 /* Rehash when load factor exceeds 0.75 */
694 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
695 return;
696 nsize = clhash->hashsize * 2;
697 nmask = nsize - 1;
698 nhash = qdisc_class_hash_alloc(nsize);
699 if (nhash == NULL)
700 return;
701
702 ohash = clhash->hash;
703 osize = clhash->hashsize;
704
705 sch_tree_lock(sch);
706 for (i = 0; i < osize; i++) {
707 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
708 h = qdisc_class_hash(cl->classid, nmask);
709 hlist_add_head(&cl->hnode, &nhash[h]);
710 }
711 }
712 clhash->hash = nhash;
713 clhash->hashsize = nsize;
714 clhash->hashmask = nmask;
715 sch_tree_unlock(sch);
716
717 kvfree(ohash);
718}
719EXPORT_SYMBOL(qdisc_class_hash_grow);
720
721int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
722{
723 unsigned int size = 4;
724
725 clhash->hash = qdisc_class_hash_alloc(size);
726 if (!clhash->hash)
727 return -ENOMEM;
728 clhash->hashsize = size;
729 clhash->hashmask = size - 1;
730 clhash->hashelems = 0;
731 return 0;
732}
733EXPORT_SYMBOL(qdisc_class_hash_init);
734
735void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
736{
737 kvfree(clhash->hash);
738}
739EXPORT_SYMBOL(qdisc_class_hash_destroy);
740
741void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
742 struct Qdisc_class_common *cl)
743{
744 unsigned int h;
745
746 INIT_HLIST_NODE(&cl->hnode);
747 h = qdisc_class_hash(cl->classid, clhash->hashmask);
748 hlist_add_head(&cl->hnode, &clhash->hash[h]);
749 clhash->hashelems++;
750}
751EXPORT_SYMBOL(qdisc_class_hash_insert);
752
753void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
754 struct Qdisc_class_common *cl)
755{
756 hlist_del(&cl->hnode);
757 clhash->hashelems--;
758}
759EXPORT_SYMBOL(qdisc_class_hash_remove);
760
761/* Allocate an unique handle from space managed by kernel
762 * Possible range is [8000-FFFF]:0000 (0x8000 values)
763 */
764static u32 qdisc_alloc_handle(struct net_device *dev)
765{
766 int i = 0x8000;
767 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
768
769 do {
770 autohandle += TC_H_MAKE(0x10000U, 0);
771 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
772 autohandle = TC_H_MAKE(0x80000000U, 0);
773 if (!qdisc_lookup(dev, autohandle))
774 return autohandle;
775 cond_resched();
776 } while (--i > 0);
777
778 return 0;
779}
780
781void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
782{
783 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
784 const struct Qdisc_class_ops *cops;
785 unsigned long cl;
786 u32 parentid;
787 bool notify;
788 int drops;
789
790 if (n == 0 && len == 0)
791 return;
792 drops = max_t(int, n, 0);
793 rcu_read_lock();
794 while ((parentid = sch->parent)) {
795 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
796 break;
797
798 if (sch->flags & TCQ_F_NOPARENT)
799 break;
800 /* Notify parent qdisc only if child qdisc becomes empty.
801 *
802 * If child was empty even before update then backlog
803 * counter is screwed and we skip notification because
804 * parent class is already passive.
805 *
806 * If the original child was offloaded then it is allowed
807 * to be seem as empty, so the parent is notified anyway.
808 */
809 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
810 !qdisc_is_offloaded);
811 /* TODO: perform the search on a per txq basis */
812 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
813 if (sch == NULL) {
814 WARN_ON_ONCE(parentid != TC_H_ROOT);
815 break;
816 }
817 cops = sch->ops->cl_ops;
818 if (notify && cops->qlen_notify) {
819 cl = cops->find(sch, parentid);
820 cops->qlen_notify(sch, cl);
821 }
822 sch->q.qlen -= n;
823 sch->qstats.backlog -= len;
824 __qdisc_qstats_drop(sch, drops);
825 }
826 rcu_read_unlock();
827}
828EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
829
830int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
831 void *type_data)
832{
833 struct net_device *dev = qdisc_dev(sch);
834 int err;
835
836 sch->flags &= ~TCQ_F_OFFLOADED;
837 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
838 return 0;
839
840 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
841 if (err == -EOPNOTSUPP)
842 return 0;
843
844 if (!err)
845 sch->flags |= TCQ_F_OFFLOADED;
846
847 return err;
848}
849EXPORT_SYMBOL(qdisc_offload_dump_helper);
850
851void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
852 struct Qdisc *new, struct Qdisc *old,
853 enum tc_setup_type type, void *type_data,
854 struct netlink_ext_ack *extack)
855{
856 bool any_qdisc_is_offloaded;
857 int err;
858
859 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
860 return;
861
862 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
863
864 /* Don't report error if the graft is part of destroy operation. */
865 if (!err || !new || new == &noop_qdisc)
866 return;
867
868 /* Don't report error if the parent, the old child and the new
869 * one are not offloaded.
870 */
871 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
872 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
873 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
874
875 if (any_qdisc_is_offloaded)
876 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
877}
878EXPORT_SYMBOL(qdisc_offload_graft_helper);
879
880void qdisc_offload_query_caps(struct net_device *dev,
881 enum tc_setup_type type,
882 void *caps, size_t caps_len)
883{
884 const struct net_device_ops *ops = dev->netdev_ops;
885 struct tc_query_caps_base base = {
886 .type = type,
887 .caps = caps,
888 };
889
890 memset(caps, 0, caps_len);
891
892 if (ops->ndo_setup_tc)
893 ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
894}
895EXPORT_SYMBOL(qdisc_offload_query_caps);
896
897static void qdisc_offload_graft_root(struct net_device *dev,
898 struct Qdisc *new, struct Qdisc *old,
899 struct netlink_ext_ack *extack)
900{
901 struct tc_root_qopt_offload graft_offload = {
902 .command = TC_ROOT_GRAFT,
903 .handle = new ? new->handle : 0,
904 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
905 (old && old->flags & TCQ_F_INGRESS),
906 };
907
908 qdisc_offload_graft_helper(dev, NULL, new, old,
909 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
910}
911
912static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
913 u32 portid, u32 seq, u16 flags, int event,
914 struct netlink_ext_ack *extack)
915{
916 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
917 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
918 struct tcmsg *tcm;
919 struct nlmsghdr *nlh;
920 unsigned char *b = skb_tail_pointer(skb);
921 struct gnet_dump d;
922 struct qdisc_size_table *stab;
923 u32 block_index;
924 __u32 qlen;
925
926 cond_resched();
927 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
928 if (!nlh)
929 goto out_nlmsg_trim;
930 tcm = nlmsg_data(nlh);
931 tcm->tcm_family = AF_UNSPEC;
932 tcm->tcm__pad1 = 0;
933 tcm->tcm__pad2 = 0;
934 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
935 tcm->tcm_parent = clid;
936 tcm->tcm_handle = q->handle;
937 tcm->tcm_info = refcount_read(&q->refcnt);
938 if (nla_put_string(skb, TCA_KIND, q->ops->id))
939 goto nla_put_failure;
940 if (q->ops->ingress_block_get) {
941 block_index = q->ops->ingress_block_get(q);
942 if (block_index &&
943 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
944 goto nla_put_failure;
945 }
946 if (q->ops->egress_block_get) {
947 block_index = q->ops->egress_block_get(q);
948 if (block_index &&
949 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
950 goto nla_put_failure;
951 }
952 if (q->ops->dump && q->ops->dump(q, skb) < 0)
953 goto nla_put_failure;
954 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
955 goto nla_put_failure;
956 qlen = qdisc_qlen_sum(q);
957
958 stab = rtnl_dereference(q->stab);
959 if (stab && qdisc_dump_stab(skb, stab) < 0)
960 goto nla_put_failure;
961
962 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
963 NULL, &d, TCA_PAD) < 0)
964 goto nla_put_failure;
965
966 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
967 goto nla_put_failure;
968
969 if (qdisc_is_percpu_stats(q)) {
970 cpu_bstats = q->cpu_bstats;
971 cpu_qstats = q->cpu_qstats;
972 }
973
974 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
975 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
976 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
977 goto nla_put_failure;
978
979 if (gnet_stats_finish_copy(&d) < 0)
980 goto nla_put_failure;
981
982 if (extack && extack->_msg &&
983 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
984 goto out_nlmsg_trim;
985
986 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
987
988 return skb->len;
989
990out_nlmsg_trim:
991nla_put_failure:
992 nlmsg_trim(skb, b);
993 return -1;
994}
995
996static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
997{
998 if (q->flags & TCQ_F_BUILTIN)
999 return true;
1000 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1001 return true;
1002
1003 return false;
1004}
1005
1006static int qdisc_get_notify(struct net *net, struct sk_buff *oskb,
1007 struct nlmsghdr *n, u32 clid, struct Qdisc *q,
1008 struct netlink_ext_ack *extack)
1009{
1010 struct sk_buff *skb;
1011 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1012
1013 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1014 if (!skb)
1015 return -ENOBUFS;
1016
1017 if (!tc_qdisc_dump_ignore(q, false)) {
1018 if (tc_fill_qdisc(skb, q, clid, portid, n->nlmsg_seq, 0,
1019 RTM_NEWQDISC, extack) < 0)
1020 goto err_out;
1021 }
1022
1023 if (skb->len)
1024 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1025 n->nlmsg_flags & NLM_F_ECHO);
1026
1027err_out:
1028 kfree_skb(skb);
1029 return -EINVAL;
1030}
1031
1032static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1033 struct nlmsghdr *n, u32 clid,
1034 struct Qdisc *old, struct Qdisc *new,
1035 struct netlink_ext_ack *extack)
1036{
1037 struct sk_buff *skb;
1038 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1039
1040 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
1041 return 0;
1042
1043 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1044 if (!skb)
1045 return -ENOBUFS;
1046
1047 if (old && !tc_qdisc_dump_ignore(old, false)) {
1048 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1049 0, RTM_DELQDISC, extack) < 0)
1050 goto err_out;
1051 }
1052 if (new && !tc_qdisc_dump_ignore(new, false)) {
1053 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1054 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
1055 goto err_out;
1056 }
1057
1058 if (skb->len)
1059 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1060 n->nlmsg_flags & NLM_F_ECHO);
1061
1062err_out:
1063 kfree_skb(skb);
1064 return -EINVAL;
1065}
1066
1067static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1068 struct nlmsghdr *n, u32 clid,
1069 struct Qdisc *old, struct Qdisc *new,
1070 struct netlink_ext_ack *extack)
1071{
1072 if (new || old)
1073 qdisc_notify(net, skb, n, clid, old, new, extack);
1074
1075 if (old)
1076 qdisc_put(old);
1077}
1078
1079static void qdisc_clear_nolock(struct Qdisc *sch)
1080{
1081 sch->flags &= ~TCQ_F_NOLOCK;
1082 if (!(sch->flags & TCQ_F_CPUSTATS))
1083 return;
1084
1085 free_percpu(sch->cpu_bstats);
1086 free_percpu(sch->cpu_qstats);
1087 sch->cpu_bstats = NULL;
1088 sch->cpu_qstats = NULL;
1089 sch->flags &= ~TCQ_F_CPUSTATS;
1090}
1091
1092/* Graft qdisc "new" to class "classid" of qdisc "parent" or
1093 * to device "dev".
1094 *
1095 * When appropriate send a netlink notification using 'skb'
1096 * and "n".
1097 *
1098 * On success, destroy old qdisc.
1099 */
1100
1101static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1102 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1103 struct Qdisc *new, struct Qdisc *old,
1104 struct netlink_ext_ack *extack)
1105{
1106 struct Qdisc *q = old;
1107 struct net *net = dev_net(dev);
1108
1109 if (parent == NULL) {
1110 unsigned int i, num_q, ingress;
1111 struct netdev_queue *dev_queue;
1112
1113 ingress = 0;
1114 num_q = dev->num_tx_queues;
1115 if ((q && q->flags & TCQ_F_INGRESS) ||
1116 (new && new->flags & TCQ_F_INGRESS)) {
1117 ingress = 1;
1118 dev_queue = dev_ingress_queue(dev);
1119 if (!dev_queue) {
1120 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1121 return -ENOENT;
1122 }
1123
1124 q = rtnl_dereference(dev_queue->qdisc_sleeping);
1125
1126 /* This is the counterpart of that qdisc_refcount_inc_nz() call in
1127 * __tcf_qdisc_find() for filter requests.
1128 */
1129 if (!qdisc_refcount_dec_if_one(q)) {
1130 NL_SET_ERR_MSG(extack,
1131 "Current ingress or clsact Qdisc has ongoing filter requests");
1132 return -EBUSY;
1133 }
1134 }
1135
1136 if (dev->flags & IFF_UP)
1137 dev_deactivate(dev);
1138
1139 qdisc_offload_graft_root(dev, new, old, extack);
1140
1141 if (new && new->ops->attach && !ingress)
1142 goto skip;
1143
1144 if (!ingress) {
1145 for (i = 0; i < num_q; i++) {
1146 dev_queue = netdev_get_tx_queue(dev, i);
1147 old = dev_graft_qdisc(dev_queue, new);
1148
1149 if (new && i > 0)
1150 qdisc_refcount_inc(new);
1151 qdisc_put(old);
1152 }
1153 } else {
1154 old = dev_graft_qdisc(dev_queue, NULL);
1155
1156 /* {ingress,clsact}_destroy() @old before grafting @new to avoid
1157 * unprotected concurrent accesses to net_device::miniq_{in,e}gress
1158 * pointer(s) in mini_qdisc_pair_swap().
1159 */
1160 qdisc_notify(net, skb, n, classid, old, new, extack);
1161 qdisc_destroy(old);
1162
1163 dev_graft_qdisc(dev_queue, new);
1164 }
1165
1166skip:
1167 if (!ingress) {
1168 old = rtnl_dereference(dev->qdisc);
1169 if (new && !new->ops->attach)
1170 qdisc_refcount_inc(new);
1171 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1172
1173 notify_and_destroy(net, skb, n, classid, old, new, extack);
1174
1175 if (new && new->ops->attach)
1176 new->ops->attach(new);
1177 }
1178
1179 if (dev->flags & IFF_UP)
1180 dev_activate(dev);
1181 } else {
1182 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1183 unsigned long cl;
1184 int err;
1185
1186 /* Only support running class lockless if parent is lockless */
1187 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1188 qdisc_clear_nolock(new);
1189
1190 if (!cops || !cops->graft)
1191 return -EOPNOTSUPP;
1192
1193 cl = cops->find(parent, classid);
1194 if (!cl) {
1195 NL_SET_ERR_MSG(extack, "Specified class not found");
1196 return -ENOENT;
1197 }
1198
1199 if (new && new->ops == &noqueue_qdisc_ops) {
1200 NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1201 return -EINVAL;
1202 }
1203
1204 err = cops->graft(parent, cl, new, &old, extack);
1205 if (err)
1206 return err;
1207 notify_and_destroy(net, skb, n, classid, old, new, extack);
1208 }
1209 return 0;
1210}
1211
1212static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1213 struct netlink_ext_ack *extack)
1214{
1215 u32 block_index;
1216
1217 if (tca[TCA_INGRESS_BLOCK]) {
1218 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1219
1220 if (!block_index) {
1221 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1222 return -EINVAL;
1223 }
1224 if (!sch->ops->ingress_block_set) {
1225 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1226 return -EOPNOTSUPP;
1227 }
1228 sch->ops->ingress_block_set(sch, block_index);
1229 }
1230 if (tca[TCA_EGRESS_BLOCK]) {
1231 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1232
1233 if (!block_index) {
1234 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1235 return -EINVAL;
1236 }
1237 if (!sch->ops->egress_block_set) {
1238 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1239 return -EOPNOTSUPP;
1240 }
1241 sch->ops->egress_block_set(sch, block_index);
1242 }
1243 return 0;
1244}
1245
1246/*
1247 Allocate and initialize new qdisc.
1248
1249 Parameters are passed via opt.
1250 */
1251
1252static struct Qdisc *qdisc_create(struct net_device *dev,
1253 struct netdev_queue *dev_queue,
1254 u32 parent, u32 handle,
1255 struct nlattr **tca, int *errp,
1256 struct netlink_ext_ack *extack)
1257{
1258 int err;
1259 struct nlattr *kind = tca[TCA_KIND];
1260 struct Qdisc *sch;
1261 struct Qdisc_ops *ops;
1262 struct qdisc_size_table *stab;
1263
1264 ops = qdisc_lookup_ops(kind);
1265#ifdef CONFIG_MODULES
1266 if (ops == NULL && kind != NULL) {
1267 char name[IFNAMSIZ];
1268 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1269 /* We dropped the RTNL semaphore in order to
1270 * perform the module load. So, even if we
1271 * succeeded in loading the module we have to
1272 * tell the caller to replay the request. We
1273 * indicate this using -EAGAIN.
1274 * We replay the request because the device may
1275 * go away in the mean time.
1276 */
1277 rtnl_unlock();
1278 request_module("sch_%s", name);
1279 rtnl_lock();
1280 ops = qdisc_lookup_ops(kind);
1281 if (ops != NULL) {
1282 /* We will try again qdisc_lookup_ops,
1283 * so don't keep a reference.
1284 */
1285 module_put(ops->owner);
1286 err = -EAGAIN;
1287 goto err_out;
1288 }
1289 }
1290 }
1291#endif
1292
1293 err = -ENOENT;
1294 if (!ops) {
1295 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1296 goto err_out;
1297 }
1298
1299 sch = qdisc_alloc(dev_queue, ops, extack);
1300 if (IS_ERR(sch)) {
1301 err = PTR_ERR(sch);
1302 goto err_out2;
1303 }
1304
1305 sch->parent = parent;
1306
1307 if (handle == TC_H_INGRESS) {
1308 if (!(sch->flags & TCQ_F_INGRESS)) {
1309 NL_SET_ERR_MSG(extack,
1310 "Specified parent ID is reserved for ingress and clsact Qdiscs");
1311 err = -EINVAL;
1312 goto err_out3;
1313 }
1314 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1315 } else {
1316 if (handle == 0) {
1317 handle = qdisc_alloc_handle(dev);
1318 if (handle == 0) {
1319 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1320 err = -ENOSPC;
1321 goto err_out3;
1322 }
1323 }
1324 if (!netif_is_multiqueue(dev))
1325 sch->flags |= TCQ_F_ONETXQUEUE;
1326 }
1327
1328 sch->handle = handle;
1329
1330 /* This exist to keep backward compatible with a userspace
1331 * loophole, what allowed userspace to get IFF_NO_QUEUE
1332 * facility on older kernels by setting tx_queue_len=0 (prior
1333 * to qdisc init), and then forgot to reinit tx_queue_len
1334 * before again attaching a qdisc.
1335 */
1336 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1337 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1338 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1339 }
1340
1341 err = qdisc_block_indexes_set(sch, tca, extack);
1342 if (err)
1343 goto err_out3;
1344
1345 if (tca[TCA_STAB]) {
1346 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1347 if (IS_ERR(stab)) {
1348 err = PTR_ERR(stab);
1349 goto err_out3;
1350 }
1351 rcu_assign_pointer(sch->stab, stab);
1352 }
1353
1354 if (ops->init) {
1355 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1356 if (err != 0)
1357 goto err_out4;
1358 }
1359
1360 if (tca[TCA_RATE]) {
1361 err = -EOPNOTSUPP;
1362 if (sch->flags & TCQ_F_MQROOT) {
1363 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1364 goto err_out4;
1365 }
1366
1367 err = gen_new_estimator(&sch->bstats,
1368 sch->cpu_bstats,
1369 &sch->rate_est,
1370 NULL,
1371 true,
1372 tca[TCA_RATE]);
1373 if (err) {
1374 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1375 goto err_out4;
1376 }
1377 }
1378
1379 qdisc_hash_add(sch, false);
1380 trace_qdisc_create(ops, dev, parent);
1381
1382 return sch;
1383
1384err_out4:
1385 /* Even if ops->init() failed, we call ops->destroy()
1386 * like qdisc_create_dflt().
1387 */
1388 if (ops->destroy)
1389 ops->destroy(sch);
1390 qdisc_put_stab(rtnl_dereference(sch->stab));
1391err_out3:
1392 netdev_put(dev, &sch->dev_tracker);
1393 qdisc_free(sch);
1394err_out2:
1395 module_put(ops->owner);
1396err_out:
1397 *errp = err;
1398 return NULL;
1399}
1400
1401static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1402 struct netlink_ext_ack *extack)
1403{
1404 struct qdisc_size_table *ostab, *stab = NULL;
1405 int err = 0;
1406
1407 if (tca[TCA_OPTIONS]) {
1408 if (!sch->ops->change) {
1409 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1410 return -EINVAL;
1411 }
1412 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1413 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1414 return -EOPNOTSUPP;
1415 }
1416 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1417 if (err)
1418 return err;
1419 }
1420
1421 if (tca[TCA_STAB]) {
1422 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1423 if (IS_ERR(stab))
1424 return PTR_ERR(stab);
1425 }
1426
1427 ostab = rtnl_dereference(sch->stab);
1428 rcu_assign_pointer(sch->stab, stab);
1429 qdisc_put_stab(ostab);
1430
1431 if (tca[TCA_RATE]) {
1432 /* NB: ignores errors from replace_estimator
1433 because change can't be undone. */
1434 if (sch->flags & TCQ_F_MQROOT)
1435 goto out;
1436 gen_replace_estimator(&sch->bstats,
1437 sch->cpu_bstats,
1438 &sch->rate_est,
1439 NULL,
1440 true,
1441 tca[TCA_RATE]);
1442 }
1443out:
1444 return 0;
1445}
1446
1447struct check_loop_arg {
1448 struct qdisc_walker w;
1449 struct Qdisc *p;
1450 int depth;
1451};
1452
1453static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1454 struct qdisc_walker *w);
1455
1456static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1457{
1458 struct check_loop_arg arg;
1459
1460 if (q->ops->cl_ops == NULL)
1461 return 0;
1462
1463 arg.w.stop = arg.w.skip = arg.w.count = 0;
1464 arg.w.fn = check_loop_fn;
1465 arg.depth = depth;
1466 arg.p = p;
1467 q->ops->cl_ops->walk(q, &arg.w);
1468 return arg.w.stop ? -ELOOP : 0;
1469}
1470
1471static int
1472check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1473{
1474 struct Qdisc *leaf;
1475 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1476 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1477
1478 leaf = cops->leaf(q, cl);
1479 if (leaf) {
1480 if (leaf == arg->p || arg->depth > 7)
1481 return -ELOOP;
1482 return check_loop(leaf, arg->p, arg->depth + 1);
1483 }
1484 return 0;
1485}
1486
1487const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1488 [TCA_KIND] = { .type = NLA_STRING },
1489 [TCA_RATE] = { .type = NLA_BINARY,
1490 .len = sizeof(struct tc_estimator) },
1491 [TCA_STAB] = { .type = NLA_NESTED },
1492 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1493 [TCA_CHAIN] = { .type = NLA_U32 },
1494 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1495 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1496};
1497
1498/*
1499 * Delete/get qdisc.
1500 */
1501
1502static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1503 struct netlink_ext_ack *extack)
1504{
1505 struct net *net = sock_net(skb->sk);
1506 struct tcmsg *tcm = nlmsg_data(n);
1507 struct nlattr *tca[TCA_MAX + 1];
1508 struct net_device *dev;
1509 u32 clid;
1510 struct Qdisc *q = NULL;
1511 struct Qdisc *p = NULL;
1512 int err;
1513
1514 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1515 rtm_tca_policy, extack);
1516 if (err < 0)
1517 return err;
1518
1519 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1520 if (!dev)
1521 return -ENODEV;
1522
1523 clid = tcm->tcm_parent;
1524 if (clid) {
1525 if (clid != TC_H_ROOT) {
1526 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1527 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1528 if (!p) {
1529 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1530 return -ENOENT;
1531 }
1532 q = qdisc_leaf(p, clid);
1533 } else if (dev_ingress_queue(dev)) {
1534 q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1535 }
1536 } else {
1537 q = rtnl_dereference(dev->qdisc);
1538 }
1539 if (!q) {
1540 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1541 return -ENOENT;
1542 }
1543
1544 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1545 NL_SET_ERR_MSG(extack, "Invalid handle");
1546 return -EINVAL;
1547 }
1548 } else {
1549 q = qdisc_lookup(dev, tcm->tcm_handle);
1550 if (!q) {
1551 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1552 return -ENOENT;
1553 }
1554 }
1555
1556 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1557 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1558 return -EINVAL;
1559 }
1560
1561 if (n->nlmsg_type == RTM_DELQDISC) {
1562 if (!clid) {
1563 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1564 return -EINVAL;
1565 }
1566 if (q->handle == 0) {
1567 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1568 return -ENOENT;
1569 }
1570 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1571 if (err != 0)
1572 return err;
1573 } else {
1574 qdisc_get_notify(net, skb, n, clid, q, NULL);
1575 }
1576 return 0;
1577}
1578
1579static bool req_create_or_replace(struct nlmsghdr *n)
1580{
1581 return (n->nlmsg_flags & NLM_F_CREATE &&
1582 n->nlmsg_flags & NLM_F_REPLACE);
1583}
1584
1585static bool req_create_exclusive(struct nlmsghdr *n)
1586{
1587 return (n->nlmsg_flags & NLM_F_CREATE &&
1588 n->nlmsg_flags & NLM_F_EXCL);
1589}
1590
1591static bool req_change(struct nlmsghdr *n)
1592{
1593 return (!(n->nlmsg_flags & NLM_F_CREATE) &&
1594 !(n->nlmsg_flags & NLM_F_REPLACE) &&
1595 !(n->nlmsg_flags & NLM_F_EXCL));
1596}
1597
1598/*
1599 * Create/change qdisc.
1600 */
1601static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1602 struct netlink_ext_ack *extack)
1603{
1604 struct net *net = sock_net(skb->sk);
1605 struct tcmsg *tcm;
1606 struct nlattr *tca[TCA_MAX + 1];
1607 struct net_device *dev;
1608 u32 clid;
1609 struct Qdisc *q, *p;
1610 int err;
1611
1612replay:
1613 /* Reinit, just in case something touches this. */
1614 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1615 rtm_tca_policy, extack);
1616 if (err < 0)
1617 return err;
1618
1619 tcm = nlmsg_data(n);
1620 clid = tcm->tcm_parent;
1621 q = p = NULL;
1622
1623 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1624 if (!dev)
1625 return -ENODEV;
1626
1627
1628 if (clid) {
1629 if (clid != TC_H_ROOT) {
1630 if (clid != TC_H_INGRESS) {
1631 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1632 if (!p) {
1633 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1634 return -ENOENT;
1635 }
1636 q = qdisc_leaf(p, clid);
1637 } else if (dev_ingress_queue_create(dev)) {
1638 q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1639 }
1640 } else {
1641 q = rtnl_dereference(dev->qdisc);
1642 }
1643
1644 /* It may be default qdisc, ignore it */
1645 if (q && q->handle == 0)
1646 q = NULL;
1647
1648 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1649 if (tcm->tcm_handle) {
1650 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1651 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1652 return -EEXIST;
1653 }
1654 if (TC_H_MIN(tcm->tcm_handle)) {
1655 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1656 return -EINVAL;
1657 }
1658 q = qdisc_lookup(dev, tcm->tcm_handle);
1659 if (!q)
1660 goto create_n_graft;
1661 if (n->nlmsg_flags & NLM_F_EXCL) {
1662 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1663 return -EEXIST;
1664 }
1665 if (tca[TCA_KIND] &&
1666 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1667 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1668 return -EINVAL;
1669 }
1670 if (q->flags & TCQ_F_INGRESS) {
1671 NL_SET_ERR_MSG(extack,
1672 "Cannot regraft ingress or clsact Qdiscs");
1673 return -EINVAL;
1674 }
1675 if (q == p ||
1676 (p && check_loop(q, p, 0))) {
1677 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1678 return -ELOOP;
1679 }
1680 if (clid == TC_H_INGRESS) {
1681 NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
1682 return -EINVAL;
1683 }
1684 qdisc_refcount_inc(q);
1685 goto graft;
1686 } else {
1687 if (!q)
1688 goto create_n_graft;
1689
1690 /* This magic test requires explanation.
1691 *
1692 * We know, that some child q is already
1693 * attached to this parent and have choice:
1694 * 1) change it or 2) create/graft new one.
1695 * If the requested qdisc kind is different
1696 * than the existing one, then we choose graft.
1697 * If they are the same then this is "change"
1698 * operation - just let it fallthrough..
1699 *
1700 * 1. We are allowed to create/graft only
1701 * if the request is explicitly stating
1702 * "please create if it doesn't exist".
1703 *
1704 * 2. If the request is to exclusive create
1705 * then the qdisc tcm_handle is not expected
1706 * to exist, so that we choose create/graft too.
1707 *
1708 * 3. The last case is when no flags are set.
1709 * This will happen when for example tc
1710 * utility issues a "change" command.
1711 * Alas, it is sort of hole in API, we
1712 * cannot decide what to do unambiguously.
1713 * For now we select create/graft.
1714 */
1715 if (tca[TCA_KIND] &&
1716 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1717 if (req_create_or_replace(n) ||
1718 req_create_exclusive(n))
1719 goto create_n_graft;
1720 else if (req_change(n))
1721 goto create_n_graft2;
1722 }
1723 }
1724 }
1725 } else {
1726 if (!tcm->tcm_handle) {
1727 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1728 return -EINVAL;
1729 }
1730 q = qdisc_lookup(dev, tcm->tcm_handle);
1731 }
1732
1733 /* Change qdisc parameters */
1734 if (!q) {
1735 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1736 return -ENOENT;
1737 }
1738 if (n->nlmsg_flags & NLM_F_EXCL) {
1739 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1740 return -EEXIST;
1741 }
1742 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1743 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1744 return -EINVAL;
1745 }
1746 err = qdisc_change(q, tca, extack);
1747 if (err == 0)
1748 qdisc_notify(net, skb, n, clid, NULL, q, extack);
1749 return err;
1750
1751create_n_graft:
1752 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1753 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1754 return -ENOENT;
1755 }
1756create_n_graft2:
1757 if (clid == TC_H_INGRESS) {
1758 if (dev_ingress_queue(dev)) {
1759 q = qdisc_create(dev, dev_ingress_queue(dev),
1760 tcm->tcm_parent, tcm->tcm_parent,
1761 tca, &err, extack);
1762 } else {
1763 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1764 err = -ENOENT;
1765 }
1766 } else {
1767 struct netdev_queue *dev_queue;
1768
1769 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1770 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1771 else if (p)
1772 dev_queue = p->dev_queue;
1773 else
1774 dev_queue = netdev_get_tx_queue(dev, 0);
1775
1776 q = qdisc_create(dev, dev_queue,
1777 tcm->tcm_parent, tcm->tcm_handle,
1778 tca, &err, extack);
1779 }
1780 if (q == NULL) {
1781 if (err == -EAGAIN)
1782 goto replay;
1783 return err;
1784 }
1785
1786graft:
1787 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1788 if (err) {
1789 if (q)
1790 qdisc_put(q);
1791 return err;
1792 }
1793
1794 return 0;
1795}
1796
1797static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1798 struct netlink_callback *cb,
1799 int *q_idx_p, int s_q_idx, bool recur,
1800 bool dump_invisible)
1801{
1802 int ret = 0, q_idx = *q_idx_p;
1803 struct Qdisc *q;
1804 int b;
1805
1806 if (!root)
1807 return 0;
1808
1809 q = root;
1810 if (q_idx < s_q_idx) {
1811 q_idx++;
1812 } else {
1813 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1814 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1815 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1816 RTM_NEWQDISC, NULL) <= 0)
1817 goto done;
1818 q_idx++;
1819 }
1820
1821 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1822 * itself has already been dumped.
1823 *
1824 * If we've already dumped the top-level (ingress) qdisc above and the global
1825 * qdisc hashtable, we don't want to hit it again
1826 */
1827 if (!qdisc_dev(root) || !recur)
1828 goto out;
1829
1830 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1831 if (q_idx < s_q_idx) {
1832 q_idx++;
1833 continue;
1834 }
1835 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1836 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1837 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1838 RTM_NEWQDISC, NULL) <= 0)
1839 goto done;
1840 q_idx++;
1841 }
1842
1843out:
1844 *q_idx_p = q_idx;
1845 return ret;
1846done:
1847 ret = -1;
1848 goto out;
1849}
1850
1851static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1852{
1853 struct net *net = sock_net(skb->sk);
1854 int idx, q_idx;
1855 int s_idx, s_q_idx;
1856 struct net_device *dev;
1857 const struct nlmsghdr *nlh = cb->nlh;
1858 struct nlattr *tca[TCA_MAX + 1];
1859 int err;
1860
1861 s_idx = cb->args[0];
1862 s_q_idx = q_idx = cb->args[1];
1863
1864 idx = 0;
1865 ASSERT_RTNL();
1866
1867 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1868 rtm_tca_policy, cb->extack);
1869 if (err < 0)
1870 return err;
1871
1872 for_each_netdev(net, dev) {
1873 struct netdev_queue *dev_queue;
1874
1875 if (idx < s_idx)
1876 goto cont;
1877 if (idx > s_idx)
1878 s_q_idx = 0;
1879 q_idx = 0;
1880
1881 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1882 skb, cb, &q_idx, s_q_idx,
1883 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1884 goto done;
1885
1886 dev_queue = dev_ingress_queue(dev);
1887 if (dev_queue &&
1888 tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
1889 skb, cb, &q_idx, s_q_idx, false,
1890 tca[TCA_DUMP_INVISIBLE]) < 0)
1891 goto done;
1892
1893cont:
1894 idx++;
1895 }
1896
1897done:
1898 cb->args[0] = idx;
1899 cb->args[1] = q_idx;
1900
1901 return skb->len;
1902}
1903
1904
1905
1906/************************************************
1907 * Traffic classes manipulation. *
1908 ************************************************/
1909
1910static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1911 unsigned long cl, u32 portid, u32 seq, u16 flags,
1912 int event, struct netlink_ext_ack *extack)
1913{
1914 struct tcmsg *tcm;
1915 struct nlmsghdr *nlh;
1916 unsigned char *b = skb_tail_pointer(skb);
1917 struct gnet_dump d;
1918 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1919
1920 cond_resched();
1921 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1922 if (!nlh)
1923 goto out_nlmsg_trim;
1924 tcm = nlmsg_data(nlh);
1925 tcm->tcm_family = AF_UNSPEC;
1926 tcm->tcm__pad1 = 0;
1927 tcm->tcm__pad2 = 0;
1928 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1929 tcm->tcm_parent = q->handle;
1930 tcm->tcm_handle = q->handle;
1931 tcm->tcm_info = 0;
1932 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1933 goto nla_put_failure;
1934 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1935 goto nla_put_failure;
1936
1937 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1938 NULL, &d, TCA_PAD) < 0)
1939 goto nla_put_failure;
1940
1941 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1942 goto nla_put_failure;
1943
1944 if (gnet_stats_finish_copy(&d) < 0)
1945 goto nla_put_failure;
1946
1947 if (extack && extack->_msg &&
1948 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1949 goto out_nlmsg_trim;
1950
1951 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1952
1953 return skb->len;
1954
1955out_nlmsg_trim:
1956nla_put_failure:
1957 nlmsg_trim(skb, b);
1958 return -1;
1959}
1960
1961static int tclass_notify(struct net *net, struct sk_buff *oskb,
1962 struct nlmsghdr *n, struct Qdisc *q,
1963 unsigned long cl, int event, struct netlink_ext_ack *extack)
1964{
1965 struct sk_buff *skb;
1966 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1967
1968 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
1969 return 0;
1970
1971 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1972 if (!skb)
1973 return -ENOBUFS;
1974
1975 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
1976 kfree_skb(skb);
1977 return -EINVAL;
1978 }
1979
1980 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1981 n->nlmsg_flags & NLM_F_ECHO);
1982}
1983
1984static int tclass_get_notify(struct net *net, struct sk_buff *oskb,
1985 struct nlmsghdr *n, struct Qdisc *q,
1986 unsigned long cl, struct netlink_ext_ack *extack)
1987{
1988 struct sk_buff *skb;
1989 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1990
1991 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1992 if (!skb)
1993 return -ENOBUFS;
1994
1995 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, RTM_NEWTCLASS,
1996 extack) < 0) {
1997 kfree_skb(skb);
1998 return -EINVAL;
1999 }
2000
2001 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2002 n->nlmsg_flags & NLM_F_ECHO);
2003}
2004
2005static int tclass_del_notify(struct net *net,
2006 const struct Qdisc_class_ops *cops,
2007 struct sk_buff *oskb, struct nlmsghdr *n,
2008 struct Qdisc *q, unsigned long cl,
2009 struct netlink_ext_ack *extack)
2010{
2011 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2012 struct sk_buff *skb;
2013 int err = 0;
2014
2015 if (!cops->delete)
2016 return -EOPNOTSUPP;
2017
2018 if (rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
2019 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2020 if (!skb)
2021 return -ENOBUFS;
2022
2023 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
2024 RTM_DELTCLASS, extack) < 0) {
2025 kfree_skb(skb);
2026 return -EINVAL;
2027 }
2028 } else {
2029 skb = NULL;
2030 }
2031
2032 err = cops->delete(q, cl, extack);
2033 if (err) {
2034 kfree_skb(skb);
2035 return err;
2036 }
2037
2038 err = rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
2039 n->nlmsg_flags & NLM_F_ECHO);
2040 return err;
2041}
2042
2043#ifdef CONFIG_NET_CLS
2044
2045struct tcf_bind_args {
2046 struct tcf_walker w;
2047 unsigned long base;
2048 unsigned long cl;
2049 u32 classid;
2050};
2051
2052static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2053{
2054 struct tcf_bind_args *a = (void *)arg;
2055
2056 if (n && tp->ops->bind_class) {
2057 struct Qdisc *q = tcf_block_q(tp->chain->block);
2058
2059 sch_tree_lock(q);
2060 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
2061 sch_tree_unlock(q);
2062 }
2063 return 0;
2064}
2065
2066struct tc_bind_class_args {
2067 struct qdisc_walker w;
2068 unsigned long new_cl;
2069 u32 portid;
2070 u32 clid;
2071};
2072
2073static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
2074 struct qdisc_walker *w)
2075{
2076 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
2077 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2078 struct tcf_block *block;
2079 struct tcf_chain *chain;
2080
2081 block = cops->tcf_block(q, cl, NULL);
2082 if (!block)
2083 return 0;
2084 for (chain = tcf_get_next_chain(block, NULL);
2085 chain;
2086 chain = tcf_get_next_chain(block, chain)) {
2087 struct tcf_proto *tp;
2088
2089 for (tp = tcf_get_next_proto(chain, NULL);
2090 tp; tp = tcf_get_next_proto(chain, tp)) {
2091 struct tcf_bind_args arg = {};
2092
2093 arg.w.fn = tcf_node_bind;
2094 arg.classid = a->clid;
2095 arg.base = cl;
2096 arg.cl = a->new_cl;
2097 tp->ops->walk(tp, &arg.w, true);
2098 }
2099 }
2100
2101 return 0;
2102}
2103
2104static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2105 unsigned long new_cl)
2106{
2107 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2108 struct tc_bind_class_args args = {};
2109
2110 if (!cops->tcf_block)
2111 return;
2112 args.portid = portid;
2113 args.clid = clid;
2114 args.new_cl = new_cl;
2115 args.w.fn = tc_bind_class_walker;
2116 q->ops->cl_ops->walk(q, &args.w);
2117}
2118
2119#else
2120
2121static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2122 unsigned long new_cl)
2123{
2124}
2125
2126#endif
2127
2128static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2129 struct netlink_ext_ack *extack)
2130{
2131 struct net *net = sock_net(skb->sk);
2132 struct tcmsg *tcm = nlmsg_data(n);
2133 struct nlattr *tca[TCA_MAX + 1];
2134 struct net_device *dev;
2135 struct Qdisc *q = NULL;
2136 const struct Qdisc_class_ops *cops;
2137 unsigned long cl = 0;
2138 unsigned long new_cl;
2139 u32 portid;
2140 u32 clid;
2141 u32 qid;
2142 int err;
2143
2144 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2145 rtm_tca_policy, extack);
2146 if (err < 0)
2147 return err;
2148
2149 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2150 if (!dev)
2151 return -ENODEV;
2152
2153 /*
2154 parent == TC_H_UNSPEC - unspecified parent.
2155 parent == TC_H_ROOT - class is root, which has no parent.
2156 parent == X:0 - parent is root class.
2157 parent == X:Y - parent is a node in hierarchy.
2158 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2159
2160 handle == 0:0 - generate handle from kernel pool.
2161 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2162 handle == X:Y - clear.
2163 handle == X:0 - root class.
2164 */
2165
2166 /* Step 1. Determine qdisc handle X:0 */
2167
2168 portid = tcm->tcm_parent;
2169 clid = tcm->tcm_handle;
2170 qid = TC_H_MAJ(clid);
2171
2172 if (portid != TC_H_ROOT) {
2173 u32 qid1 = TC_H_MAJ(portid);
2174
2175 if (qid && qid1) {
2176 /* If both majors are known, they must be identical. */
2177 if (qid != qid1)
2178 return -EINVAL;
2179 } else if (qid1) {
2180 qid = qid1;
2181 } else if (qid == 0)
2182 qid = rtnl_dereference(dev->qdisc)->handle;
2183
2184 /* Now qid is genuine qdisc handle consistent
2185 * both with parent and child.
2186 *
2187 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2188 */
2189 if (portid)
2190 portid = TC_H_MAKE(qid, portid);
2191 } else {
2192 if (qid == 0)
2193 qid = rtnl_dereference(dev->qdisc)->handle;
2194 }
2195
2196 /* OK. Locate qdisc */
2197 q = qdisc_lookup(dev, qid);
2198 if (!q)
2199 return -ENOENT;
2200
2201 /* An check that it supports classes */
2202 cops = q->ops->cl_ops;
2203 if (cops == NULL)
2204 return -EINVAL;
2205
2206 /* Now try to get class */
2207 if (clid == 0) {
2208 if (portid == TC_H_ROOT)
2209 clid = qid;
2210 } else
2211 clid = TC_H_MAKE(qid, clid);
2212
2213 if (clid)
2214 cl = cops->find(q, clid);
2215
2216 if (cl == 0) {
2217 err = -ENOENT;
2218 if (n->nlmsg_type != RTM_NEWTCLASS ||
2219 !(n->nlmsg_flags & NLM_F_CREATE))
2220 goto out;
2221 } else {
2222 switch (n->nlmsg_type) {
2223 case RTM_NEWTCLASS:
2224 err = -EEXIST;
2225 if (n->nlmsg_flags & NLM_F_EXCL)
2226 goto out;
2227 break;
2228 case RTM_DELTCLASS:
2229 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2230 /* Unbind the class with flilters with 0 */
2231 tc_bind_tclass(q, portid, clid, 0);
2232 goto out;
2233 case RTM_GETTCLASS:
2234 err = tclass_get_notify(net, skb, n, q, cl, extack);
2235 goto out;
2236 default:
2237 err = -EINVAL;
2238 goto out;
2239 }
2240 }
2241
2242 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2243 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2244 return -EOPNOTSUPP;
2245 }
2246
2247 new_cl = cl;
2248 err = -EOPNOTSUPP;
2249 if (cops->change)
2250 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2251 if (err == 0) {
2252 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
2253 /* We just create a new class, need to do reverse binding. */
2254 if (cl != new_cl)
2255 tc_bind_tclass(q, portid, clid, new_cl);
2256 }
2257out:
2258 return err;
2259}
2260
2261struct qdisc_dump_args {
2262 struct qdisc_walker w;
2263 struct sk_buff *skb;
2264 struct netlink_callback *cb;
2265};
2266
2267static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2268 struct qdisc_walker *arg)
2269{
2270 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2271
2272 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2273 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2274 RTM_NEWTCLASS, NULL);
2275}
2276
2277static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2278 struct tcmsg *tcm, struct netlink_callback *cb,
2279 int *t_p, int s_t)
2280{
2281 struct qdisc_dump_args arg;
2282
2283 if (tc_qdisc_dump_ignore(q, false) ||
2284 *t_p < s_t || !q->ops->cl_ops ||
2285 (tcm->tcm_parent &&
2286 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2287 (*t_p)++;
2288 return 0;
2289 }
2290 if (*t_p > s_t)
2291 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2292 arg.w.fn = qdisc_class_dump;
2293 arg.skb = skb;
2294 arg.cb = cb;
2295 arg.w.stop = 0;
2296 arg.w.skip = cb->args[1];
2297 arg.w.count = 0;
2298 q->ops->cl_ops->walk(q, &arg.w);
2299 cb->args[1] = arg.w.count;
2300 if (arg.w.stop)
2301 return -1;
2302 (*t_p)++;
2303 return 0;
2304}
2305
2306static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2307 struct tcmsg *tcm, struct netlink_callback *cb,
2308 int *t_p, int s_t, bool recur)
2309{
2310 struct Qdisc *q;
2311 int b;
2312
2313 if (!root)
2314 return 0;
2315
2316 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2317 return -1;
2318
2319 if (!qdisc_dev(root) || !recur)
2320 return 0;
2321
2322 if (tcm->tcm_parent) {
2323 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2324 if (q && q != root &&
2325 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2326 return -1;
2327 return 0;
2328 }
2329 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2330 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2331 return -1;
2332 }
2333
2334 return 0;
2335}
2336
2337static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2338{
2339 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2340 struct net *net = sock_net(skb->sk);
2341 struct netdev_queue *dev_queue;
2342 struct net_device *dev;
2343 int t, s_t;
2344
2345 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2346 return 0;
2347 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2348 if (!dev)
2349 return 0;
2350
2351 s_t = cb->args[0];
2352 t = 0;
2353
2354 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2355 skb, tcm, cb, &t, s_t, true) < 0)
2356 goto done;
2357
2358 dev_queue = dev_ingress_queue(dev);
2359 if (dev_queue &&
2360 tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
2361 skb, tcm, cb, &t, s_t, false) < 0)
2362 goto done;
2363
2364done:
2365 cb->args[0] = t;
2366
2367 dev_put(dev);
2368 return skb->len;
2369}
2370
2371#ifdef CONFIG_PROC_FS
2372static int psched_show(struct seq_file *seq, void *v)
2373{
2374 seq_printf(seq, "%08x %08x %08x %08x\n",
2375 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2376 1000000,
2377 (u32)NSEC_PER_SEC / hrtimer_resolution);
2378
2379 return 0;
2380}
2381
2382static int __net_init psched_net_init(struct net *net)
2383{
2384 struct proc_dir_entry *e;
2385
2386 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2387 if (e == NULL)
2388 return -ENOMEM;
2389
2390 return 0;
2391}
2392
2393static void __net_exit psched_net_exit(struct net *net)
2394{
2395 remove_proc_entry("psched", net->proc_net);
2396}
2397#else
2398static int __net_init psched_net_init(struct net *net)
2399{
2400 return 0;
2401}
2402
2403static void __net_exit psched_net_exit(struct net *net)
2404{
2405}
2406#endif
2407
2408static struct pernet_operations psched_net_ops = {
2409 .init = psched_net_init,
2410 .exit = psched_net_exit,
2411};
2412
2413#if IS_ENABLED(CONFIG_RETPOLINE)
2414DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2415#endif
2416
2417static int __init pktsched_init(void)
2418{
2419 int err;
2420
2421 err = register_pernet_subsys(&psched_net_ops);
2422 if (err) {
2423 pr_err("pktsched_init: "
2424 "cannot initialize per netns operations\n");
2425 return err;
2426 }
2427
2428 register_qdisc(&pfifo_fast_ops);
2429 register_qdisc(&pfifo_qdisc_ops);
2430 register_qdisc(&bfifo_qdisc_ops);
2431 register_qdisc(&pfifo_head_drop_qdisc_ops);
2432 register_qdisc(&mq_qdisc_ops);
2433 register_qdisc(&noqueue_qdisc_ops);
2434
2435 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2436 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2437 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2438 0);
2439 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2440 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2441 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2442 0);
2443
2444 tc_wrapper_init();
2445
2446 return 0;
2447}
2448
2449subsys_initcall(pktsched_init);