Loading...
1/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/skbuff.h>
24#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
29#include <linux/hrtimer.h>
30#include <linux/lockdep.h>
31#include <linux/slab.h>
32
33#include <net/net_namespace.h>
34#include <net/sock.h>
35#include <net/netlink.h>
36#include <net/pkt_sched.h>
37
38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
40 struct Qdisc *old, struct Qdisc *new);
41static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
44
45/*
46
47 Short review.
48 -------------
49
50 This file consists of two interrelated parts:
51
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
54
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
59
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
64
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
67
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
73
74 All real intelligent work is done inside qdisc modules.
75
76
77
78 Every discipline has two major routines: enqueue and dequeue.
79
80 ---dequeue
81
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
88
89 ---enqueue
90
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
93 not zero error code.
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100
101 Auxiliary routines:
102
103 ---peek
104
105 like dequeue but without removing a packet from the queue
106
107 ---reset
108
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
111
112 ---init
113
114 initializes newly created qdisc.
115
116 ---destroy
117
118 destroys resources allocated by init and during lifetime of qdisc.
119
120 ---change
121
122 changes qdisc parameters.
123 */
124
125/* Protects list of registered TC modules. It is pure SMP lock. */
126static DEFINE_RWLOCK(qdisc_mod_lock);
127
128
129/************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
132
133
134/* The list of all installed queueing disciplines. */
135
136static struct Qdisc_ops *qdisc_base;
137
138/* Register/uregister queueing discipline */
139
140int register_qdisc(struct Qdisc_ops *qops)
141{
142 struct Qdisc_ops *q, **qp;
143 int rc = -EEXIST;
144
145 write_lock(&qdisc_mod_lock);
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 if (!strcmp(qops->id, q->id))
148 goto out;
149
150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue;
152 if (qops->peek == NULL) {
153 if (qops->dequeue == NULL)
154 qops->peek = noop_qdisc_ops.peek;
155 else
156 goto out_einval;
157 }
158 if (qops->dequeue == NULL)
159 qops->dequeue = noop_qdisc_ops.dequeue;
160
161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
171 qops->next = NULL;
172 *qp = qops;
173 rc = 0;
174out:
175 write_unlock(&qdisc_mod_lock);
176 return rc;
177
178out_einval:
179 rc = -EINVAL;
180 goto out;
181}
182EXPORT_SYMBOL(register_qdisc);
183
184int unregister_qdisc(struct Qdisc_ops *qops)
185{
186 struct Qdisc_ops *q, **qp;
187 int err = -ENOENT;
188
189 write_lock(&qdisc_mod_lock);
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
191 if (q == qops)
192 break;
193 if (q) {
194 *qp = q->next;
195 q->next = NULL;
196 err = 0;
197 }
198 write_unlock(&qdisc_mod_lock);
199 return err;
200}
201EXPORT_SYMBOL(unregister_qdisc);
202
203/* We know handle. Find qdisc among all qdisc's attached to device
204 (root qdisc, all its children, children of children etc.)
205 */
206
207static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
208{
209 struct Qdisc *q;
210
211 if (!(root->flags & TCQ_F_BUILTIN) &&
212 root->handle == handle)
213 return root;
214
215 list_for_each_entry(q, &root->list, list) {
216 if (q->handle == handle)
217 return q;
218 }
219 return NULL;
220}
221
222static void qdisc_list_add(struct Qdisc *q)
223{
224 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
225 list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list);
226}
227
228void qdisc_list_del(struct Qdisc *q)
229{
230 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
231 list_del(&q->list);
232}
233EXPORT_SYMBOL(qdisc_list_del);
234
235struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
236{
237 struct Qdisc *q;
238
239 q = qdisc_match_from_root(dev->qdisc, handle);
240 if (q)
241 goto out;
242
243 if (dev_ingress_queue(dev))
244 q = qdisc_match_from_root(
245 dev_ingress_queue(dev)->qdisc_sleeping,
246 handle);
247out:
248 return q;
249}
250
251static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
252{
253 unsigned long cl;
254 struct Qdisc *leaf;
255 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
256
257 if (cops == NULL)
258 return NULL;
259 cl = cops->get(p, classid);
260
261 if (cl == 0)
262 return NULL;
263 leaf = cops->leaf(p, cl);
264 cops->put(p, cl);
265 return leaf;
266}
267
268/* Find queueing discipline by name */
269
270static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
271{
272 struct Qdisc_ops *q = NULL;
273
274 if (kind) {
275 read_lock(&qdisc_mod_lock);
276 for (q = qdisc_base; q; q = q->next) {
277 if (nla_strcmp(kind, q->id) == 0) {
278 if (!try_module_get(q->owner))
279 q = NULL;
280 break;
281 }
282 }
283 read_unlock(&qdisc_mod_lock);
284 }
285 return q;
286}
287
288static struct qdisc_rate_table *qdisc_rtab_list;
289
290struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
291{
292 struct qdisc_rate_table *rtab;
293
294 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
295 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
296 rtab->refcnt++;
297 return rtab;
298 }
299 }
300
301 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
302 nla_len(tab) != TC_RTAB_SIZE)
303 return NULL;
304
305 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
306 if (rtab) {
307 rtab->rate = *r;
308 rtab->refcnt = 1;
309 memcpy(rtab->data, nla_data(tab), 1024);
310 rtab->next = qdisc_rtab_list;
311 qdisc_rtab_list = rtab;
312 }
313 return rtab;
314}
315EXPORT_SYMBOL(qdisc_get_rtab);
316
317void qdisc_put_rtab(struct qdisc_rate_table *tab)
318{
319 struct qdisc_rate_table *rtab, **rtabp;
320
321 if (!tab || --tab->refcnt)
322 return;
323
324 for (rtabp = &qdisc_rtab_list;
325 (rtab = *rtabp) != NULL;
326 rtabp = &rtab->next) {
327 if (rtab == tab) {
328 *rtabp = rtab->next;
329 kfree(rtab);
330 return;
331 }
332 }
333}
334EXPORT_SYMBOL(qdisc_put_rtab);
335
336static LIST_HEAD(qdisc_stab_list);
337static DEFINE_SPINLOCK(qdisc_stab_lock);
338
339static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
340 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
341 [TCA_STAB_DATA] = { .type = NLA_BINARY },
342};
343
344static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
345{
346 struct nlattr *tb[TCA_STAB_MAX + 1];
347 struct qdisc_size_table *stab;
348 struct tc_sizespec *s;
349 unsigned int tsize = 0;
350 u16 *tab = NULL;
351 int err;
352
353 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
354 if (err < 0)
355 return ERR_PTR(err);
356 if (!tb[TCA_STAB_BASE])
357 return ERR_PTR(-EINVAL);
358
359 s = nla_data(tb[TCA_STAB_BASE]);
360
361 if (s->tsize > 0) {
362 if (!tb[TCA_STAB_DATA])
363 return ERR_PTR(-EINVAL);
364 tab = nla_data(tb[TCA_STAB_DATA]);
365 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
366 }
367
368 if (tsize != s->tsize || (!tab && tsize > 0))
369 return ERR_PTR(-EINVAL);
370
371 spin_lock(&qdisc_stab_lock);
372
373 list_for_each_entry(stab, &qdisc_stab_list, list) {
374 if (memcmp(&stab->szopts, s, sizeof(*s)))
375 continue;
376 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
377 continue;
378 stab->refcnt++;
379 spin_unlock(&qdisc_stab_lock);
380 return stab;
381 }
382
383 spin_unlock(&qdisc_stab_lock);
384
385 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
386 if (!stab)
387 return ERR_PTR(-ENOMEM);
388
389 stab->refcnt = 1;
390 stab->szopts = *s;
391 if (tsize > 0)
392 memcpy(stab->data, tab, tsize * sizeof(u16));
393
394 spin_lock(&qdisc_stab_lock);
395 list_add_tail(&stab->list, &qdisc_stab_list);
396 spin_unlock(&qdisc_stab_lock);
397
398 return stab;
399}
400
401static void stab_kfree_rcu(struct rcu_head *head)
402{
403 kfree(container_of(head, struct qdisc_size_table, rcu));
404}
405
406void qdisc_put_stab(struct qdisc_size_table *tab)
407{
408 if (!tab)
409 return;
410
411 spin_lock(&qdisc_stab_lock);
412
413 if (--tab->refcnt == 0) {
414 list_del(&tab->list);
415 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
416 }
417
418 spin_unlock(&qdisc_stab_lock);
419}
420EXPORT_SYMBOL(qdisc_put_stab);
421
422static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
423{
424 struct nlattr *nest;
425
426 nest = nla_nest_start(skb, TCA_STAB);
427 if (nest == NULL)
428 goto nla_put_failure;
429 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
430 nla_nest_end(skb, nest);
431
432 return skb->len;
433
434nla_put_failure:
435 return -1;
436}
437
438void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
439{
440 int pkt_len, slot;
441
442 pkt_len = skb->len + stab->szopts.overhead;
443 if (unlikely(!stab->szopts.tsize))
444 goto out;
445
446 slot = pkt_len + stab->szopts.cell_align;
447 if (unlikely(slot < 0))
448 slot = 0;
449
450 slot >>= stab->szopts.cell_log;
451 if (likely(slot < stab->szopts.tsize))
452 pkt_len = stab->data[slot];
453 else
454 pkt_len = stab->data[stab->szopts.tsize - 1] *
455 (slot / stab->szopts.tsize) +
456 stab->data[slot % stab->szopts.tsize];
457
458 pkt_len <<= stab->szopts.size_log;
459out:
460 if (unlikely(pkt_len < 1))
461 pkt_len = 1;
462 qdisc_skb_cb(skb)->pkt_len = pkt_len;
463}
464EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
465
466void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
467{
468 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
469 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
470 txt, qdisc->ops->id, qdisc->handle >> 16);
471 qdisc->flags |= TCQ_F_WARN_NONWC;
472 }
473}
474EXPORT_SYMBOL(qdisc_warn_nonwc);
475
476static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
477{
478 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
479 timer);
480
481 qdisc_unthrottled(wd->qdisc);
482 __netif_schedule(qdisc_root(wd->qdisc));
483
484 return HRTIMER_NORESTART;
485}
486
487void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
488{
489 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
490 wd->timer.function = qdisc_watchdog;
491 wd->qdisc = qdisc;
492}
493EXPORT_SYMBOL(qdisc_watchdog_init);
494
495void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
496{
497 ktime_t time;
498
499 if (test_bit(__QDISC_STATE_DEACTIVATED,
500 &qdisc_root_sleeping(wd->qdisc)->state))
501 return;
502
503 qdisc_throttled(wd->qdisc);
504 time = ktime_set(0, 0);
505 time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
506 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
507}
508EXPORT_SYMBOL(qdisc_watchdog_schedule);
509
510void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
511{
512 hrtimer_cancel(&wd->timer);
513 qdisc_unthrottled(wd->qdisc);
514}
515EXPORT_SYMBOL(qdisc_watchdog_cancel);
516
517static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
518{
519 unsigned int size = n * sizeof(struct hlist_head), i;
520 struct hlist_head *h;
521
522 if (size <= PAGE_SIZE)
523 h = kmalloc(size, GFP_KERNEL);
524 else
525 h = (struct hlist_head *)
526 __get_free_pages(GFP_KERNEL, get_order(size));
527
528 if (h != NULL) {
529 for (i = 0; i < n; i++)
530 INIT_HLIST_HEAD(&h[i]);
531 }
532 return h;
533}
534
535static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
536{
537 unsigned int size = n * sizeof(struct hlist_head);
538
539 if (size <= PAGE_SIZE)
540 kfree(h);
541 else
542 free_pages((unsigned long)h, get_order(size));
543}
544
545void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
546{
547 struct Qdisc_class_common *cl;
548 struct hlist_node *n, *next;
549 struct hlist_head *nhash, *ohash;
550 unsigned int nsize, nmask, osize;
551 unsigned int i, h;
552
553 /* Rehash when load factor exceeds 0.75 */
554 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
555 return;
556 nsize = clhash->hashsize * 2;
557 nmask = nsize - 1;
558 nhash = qdisc_class_hash_alloc(nsize);
559 if (nhash == NULL)
560 return;
561
562 ohash = clhash->hash;
563 osize = clhash->hashsize;
564
565 sch_tree_lock(sch);
566 for (i = 0; i < osize; i++) {
567 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
568 h = qdisc_class_hash(cl->classid, nmask);
569 hlist_add_head(&cl->hnode, &nhash[h]);
570 }
571 }
572 clhash->hash = nhash;
573 clhash->hashsize = nsize;
574 clhash->hashmask = nmask;
575 sch_tree_unlock(sch);
576
577 qdisc_class_hash_free(ohash, osize);
578}
579EXPORT_SYMBOL(qdisc_class_hash_grow);
580
581int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
582{
583 unsigned int size = 4;
584
585 clhash->hash = qdisc_class_hash_alloc(size);
586 if (clhash->hash == NULL)
587 return -ENOMEM;
588 clhash->hashsize = size;
589 clhash->hashmask = size - 1;
590 clhash->hashelems = 0;
591 return 0;
592}
593EXPORT_SYMBOL(qdisc_class_hash_init);
594
595void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
596{
597 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
598}
599EXPORT_SYMBOL(qdisc_class_hash_destroy);
600
601void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
602 struct Qdisc_class_common *cl)
603{
604 unsigned int h;
605
606 INIT_HLIST_NODE(&cl->hnode);
607 h = qdisc_class_hash(cl->classid, clhash->hashmask);
608 hlist_add_head(&cl->hnode, &clhash->hash[h]);
609 clhash->hashelems++;
610}
611EXPORT_SYMBOL(qdisc_class_hash_insert);
612
613void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
614 struct Qdisc_class_common *cl)
615{
616 hlist_del(&cl->hnode);
617 clhash->hashelems--;
618}
619EXPORT_SYMBOL(qdisc_class_hash_remove);
620
621/* Allocate an unique handle from space managed by kernel */
622
623static u32 qdisc_alloc_handle(struct net_device *dev)
624{
625 int i = 0x10000;
626 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
627
628 do {
629 autohandle += TC_H_MAKE(0x10000U, 0);
630 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
631 autohandle = TC_H_MAKE(0x80000000U, 0);
632 } while (qdisc_lookup(dev, autohandle) && --i > 0);
633
634 return i > 0 ? autohandle : 0;
635}
636
637void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
638{
639 const struct Qdisc_class_ops *cops;
640 unsigned long cl;
641 u32 parentid;
642
643 if (n == 0)
644 return;
645 while ((parentid = sch->parent)) {
646 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
647 return;
648
649 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
650 if (sch == NULL) {
651 WARN_ON(parentid != TC_H_ROOT);
652 return;
653 }
654 cops = sch->ops->cl_ops;
655 if (cops->qlen_notify) {
656 cl = cops->get(sch, parentid);
657 cops->qlen_notify(sch, cl);
658 cops->put(sch, cl);
659 }
660 sch->q.qlen -= n;
661 }
662}
663EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
664
665static void notify_and_destroy(struct net *net, struct sk_buff *skb,
666 struct nlmsghdr *n, u32 clid,
667 struct Qdisc *old, struct Qdisc *new)
668{
669 if (new || old)
670 qdisc_notify(net, skb, n, clid, old, new);
671
672 if (old)
673 qdisc_destroy(old);
674}
675
676/* Graft qdisc "new" to class "classid" of qdisc "parent" or
677 * to device "dev".
678 *
679 * When appropriate send a netlink notification using 'skb'
680 * and "n".
681 *
682 * On success, destroy old qdisc.
683 */
684
685static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
686 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
687 struct Qdisc *new, struct Qdisc *old)
688{
689 struct Qdisc *q = old;
690 struct net *net = dev_net(dev);
691 int err = 0;
692
693 if (parent == NULL) {
694 unsigned int i, num_q, ingress;
695
696 ingress = 0;
697 num_q = dev->num_tx_queues;
698 if ((q && q->flags & TCQ_F_INGRESS) ||
699 (new && new->flags & TCQ_F_INGRESS)) {
700 num_q = 1;
701 ingress = 1;
702 if (!dev_ingress_queue(dev))
703 return -ENOENT;
704 }
705
706 if (dev->flags & IFF_UP)
707 dev_deactivate(dev);
708
709 if (new && new->ops->attach) {
710 new->ops->attach(new);
711 num_q = 0;
712 }
713
714 for (i = 0; i < num_q; i++) {
715 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
716
717 if (!ingress)
718 dev_queue = netdev_get_tx_queue(dev, i);
719
720 old = dev_graft_qdisc(dev_queue, new);
721 if (new && i > 0)
722 atomic_inc(&new->refcnt);
723
724 if (!ingress)
725 qdisc_destroy(old);
726 }
727
728 if (!ingress) {
729 notify_and_destroy(net, skb, n, classid,
730 dev->qdisc, new);
731 if (new && !new->ops->attach)
732 atomic_inc(&new->refcnt);
733 dev->qdisc = new ? : &noop_qdisc;
734 } else {
735 notify_and_destroy(net, skb, n, classid, old, new);
736 }
737
738 if (dev->flags & IFF_UP)
739 dev_activate(dev);
740 } else {
741 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
742
743 err = -EOPNOTSUPP;
744 if (cops && cops->graft) {
745 unsigned long cl = cops->get(parent, classid);
746 if (cl) {
747 err = cops->graft(parent, cl, new, &old);
748 cops->put(parent, cl);
749 } else
750 err = -ENOENT;
751 }
752 if (!err)
753 notify_and_destroy(net, skb, n, classid, old, new);
754 }
755 return err;
756}
757
758/* lockdep annotation is needed for ingress; egress gets it only for name */
759static struct lock_class_key qdisc_tx_lock;
760static struct lock_class_key qdisc_rx_lock;
761
762/*
763 Allocate and initialize new qdisc.
764
765 Parameters are passed via opt.
766 */
767
768static struct Qdisc *
769qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
770 struct Qdisc *p, u32 parent, u32 handle,
771 struct nlattr **tca, int *errp)
772{
773 int err;
774 struct nlattr *kind = tca[TCA_KIND];
775 struct Qdisc *sch;
776 struct Qdisc_ops *ops;
777 struct qdisc_size_table *stab;
778
779 ops = qdisc_lookup_ops(kind);
780#ifdef CONFIG_MODULES
781 if (ops == NULL && kind != NULL) {
782 char name[IFNAMSIZ];
783 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
784 /* We dropped the RTNL semaphore in order to
785 * perform the module load. So, even if we
786 * succeeded in loading the module we have to
787 * tell the caller to replay the request. We
788 * indicate this using -EAGAIN.
789 * We replay the request because the device may
790 * go away in the mean time.
791 */
792 rtnl_unlock();
793 request_module("sch_%s", name);
794 rtnl_lock();
795 ops = qdisc_lookup_ops(kind);
796 if (ops != NULL) {
797 /* We will try again qdisc_lookup_ops,
798 * so don't keep a reference.
799 */
800 module_put(ops->owner);
801 err = -EAGAIN;
802 goto err_out;
803 }
804 }
805 }
806#endif
807
808 err = -ENOENT;
809 if (ops == NULL)
810 goto err_out;
811
812 sch = qdisc_alloc(dev_queue, ops);
813 if (IS_ERR(sch)) {
814 err = PTR_ERR(sch);
815 goto err_out2;
816 }
817
818 sch->parent = parent;
819
820 if (handle == TC_H_INGRESS) {
821 sch->flags |= TCQ_F_INGRESS;
822 handle = TC_H_MAKE(TC_H_INGRESS, 0);
823 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
824 } else {
825 if (handle == 0) {
826 handle = qdisc_alloc_handle(dev);
827 err = -ENOMEM;
828 if (handle == 0)
829 goto err_out3;
830 }
831 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
832 }
833
834 sch->handle = handle;
835
836 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
837 if (tca[TCA_STAB]) {
838 stab = qdisc_get_stab(tca[TCA_STAB]);
839 if (IS_ERR(stab)) {
840 err = PTR_ERR(stab);
841 goto err_out4;
842 }
843 rcu_assign_pointer(sch->stab, stab);
844 }
845 if (tca[TCA_RATE]) {
846 spinlock_t *root_lock;
847
848 err = -EOPNOTSUPP;
849 if (sch->flags & TCQ_F_MQROOT)
850 goto err_out4;
851
852 if ((sch->parent != TC_H_ROOT) &&
853 !(sch->flags & TCQ_F_INGRESS) &&
854 (!p || !(p->flags & TCQ_F_MQROOT)))
855 root_lock = qdisc_root_sleeping_lock(sch);
856 else
857 root_lock = qdisc_lock(sch);
858
859 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
860 root_lock, tca[TCA_RATE]);
861 if (err)
862 goto err_out4;
863 }
864
865 qdisc_list_add(sch);
866
867 return sch;
868 }
869err_out3:
870 dev_put(dev);
871 kfree((char *) sch - sch->padded);
872err_out2:
873 module_put(ops->owner);
874err_out:
875 *errp = err;
876 return NULL;
877
878err_out4:
879 /*
880 * Any broken qdiscs that would require a ops->reset() here?
881 * The qdisc was never in action so it shouldn't be necessary.
882 */
883 qdisc_put_stab(rtnl_dereference(sch->stab));
884 if (ops->destroy)
885 ops->destroy(sch);
886 goto err_out3;
887}
888
889static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
890{
891 struct qdisc_size_table *ostab, *stab = NULL;
892 int err = 0;
893
894 if (tca[TCA_OPTIONS]) {
895 if (sch->ops->change == NULL)
896 return -EINVAL;
897 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
898 if (err)
899 return err;
900 }
901
902 if (tca[TCA_STAB]) {
903 stab = qdisc_get_stab(tca[TCA_STAB]);
904 if (IS_ERR(stab))
905 return PTR_ERR(stab);
906 }
907
908 ostab = rtnl_dereference(sch->stab);
909 rcu_assign_pointer(sch->stab, stab);
910 qdisc_put_stab(ostab);
911
912 if (tca[TCA_RATE]) {
913 /* NB: ignores errors from replace_estimator
914 because change can't be undone. */
915 if (sch->flags & TCQ_F_MQROOT)
916 goto out;
917 gen_replace_estimator(&sch->bstats, &sch->rate_est,
918 qdisc_root_sleeping_lock(sch),
919 tca[TCA_RATE]);
920 }
921out:
922 return 0;
923}
924
925struct check_loop_arg {
926 struct qdisc_walker w;
927 struct Qdisc *p;
928 int depth;
929};
930
931static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
932
933static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
934{
935 struct check_loop_arg arg;
936
937 if (q->ops->cl_ops == NULL)
938 return 0;
939
940 arg.w.stop = arg.w.skip = arg.w.count = 0;
941 arg.w.fn = check_loop_fn;
942 arg.depth = depth;
943 arg.p = p;
944 q->ops->cl_ops->walk(q, &arg.w);
945 return arg.w.stop ? -ELOOP : 0;
946}
947
948static int
949check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
950{
951 struct Qdisc *leaf;
952 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
953 struct check_loop_arg *arg = (struct check_loop_arg *)w;
954
955 leaf = cops->leaf(q, cl);
956 if (leaf) {
957 if (leaf == arg->p || arg->depth > 7)
958 return -ELOOP;
959 return check_loop(leaf, arg->p, arg->depth + 1);
960 }
961 return 0;
962}
963
964/*
965 * Delete/get qdisc.
966 */
967
968static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
969{
970 struct net *net = sock_net(skb->sk);
971 struct tcmsg *tcm = NLMSG_DATA(n);
972 struct nlattr *tca[TCA_MAX + 1];
973 struct net_device *dev;
974 u32 clid = tcm->tcm_parent;
975 struct Qdisc *q = NULL;
976 struct Qdisc *p = NULL;
977 int err;
978
979 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
980 if (!dev)
981 return -ENODEV;
982
983 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
984 if (err < 0)
985 return err;
986
987 if (clid) {
988 if (clid != TC_H_ROOT) {
989 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
990 p = qdisc_lookup(dev, TC_H_MAJ(clid));
991 if (!p)
992 return -ENOENT;
993 q = qdisc_leaf(p, clid);
994 } else if (dev_ingress_queue(dev)) {
995 q = dev_ingress_queue(dev)->qdisc_sleeping;
996 }
997 } else {
998 q = dev->qdisc;
999 }
1000 if (!q)
1001 return -ENOENT;
1002
1003 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1004 return -EINVAL;
1005 } else {
1006 q = qdisc_lookup(dev, tcm->tcm_handle);
1007 if (!q)
1008 return -ENOENT;
1009 }
1010
1011 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1012 return -EINVAL;
1013
1014 if (n->nlmsg_type == RTM_DELQDISC) {
1015 if (!clid)
1016 return -EINVAL;
1017 if (q->handle == 0)
1018 return -ENOENT;
1019 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1020 if (err != 0)
1021 return err;
1022 } else {
1023 qdisc_notify(net, skb, n, clid, NULL, q);
1024 }
1025 return 0;
1026}
1027
1028/*
1029 * Create/change qdisc.
1030 */
1031
1032static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1033{
1034 struct net *net = sock_net(skb->sk);
1035 struct tcmsg *tcm;
1036 struct nlattr *tca[TCA_MAX + 1];
1037 struct net_device *dev;
1038 u32 clid;
1039 struct Qdisc *q, *p;
1040 int err;
1041
1042replay:
1043 /* Reinit, just in case something touches this. */
1044 tcm = NLMSG_DATA(n);
1045 clid = tcm->tcm_parent;
1046 q = p = NULL;
1047
1048 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1049 if (!dev)
1050 return -ENODEV;
1051
1052 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1053 if (err < 0)
1054 return err;
1055
1056 if (clid) {
1057 if (clid != TC_H_ROOT) {
1058 if (clid != TC_H_INGRESS) {
1059 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1060 if (!p)
1061 return -ENOENT;
1062 q = qdisc_leaf(p, clid);
1063 } else if (dev_ingress_queue_create(dev)) {
1064 q = dev_ingress_queue(dev)->qdisc_sleeping;
1065 }
1066 } else {
1067 q = dev->qdisc;
1068 }
1069
1070 /* It may be default qdisc, ignore it */
1071 if (q && q->handle == 0)
1072 q = NULL;
1073
1074 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1075 if (tcm->tcm_handle) {
1076 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1077 return -EEXIST;
1078 if (TC_H_MIN(tcm->tcm_handle))
1079 return -EINVAL;
1080 q = qdisc_lookup(dev, tcm->tcm_handle);
1081 if (!q)
1082 goto create_n_graft;
1083 if (n->nlmsg_flags & NLM_F_EXCL)
1084 return -EEXIST;
1085 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1086 return -EINVAL;
1087 if (q == p ||
1088 (p && check_loop(q, p, 0)))
1089 return -ELOOP;
1090 atomic_inc(&q->refcnt);
1091 goto graft;
1092 } else {
1093 if (!q)
1094 goto create_n_graft;
1095
1096 /* This magic test requires explanation.
1097 *
1098 * We know, that some child q is already
1099 * attached to this parent and have choice:
1100 * either to change it or to create/graft new one.
1101 *
1102 * 1. We are allowed to create/graft only
1103 * if CREATE and REPLACE flags are set.
1104 *
1105 * 2. If EXCL is set, requestor wanted to say,
1106 * that qdisc tcm_handle is not expected
1107 * to exist, so that we choose create/graft too.
1108 *
1109 * 3. The last case is when no flags are set.
1110 * Alas, it is sort of hole in API, we
1111 * cannot decide what to do unambiguously.
1112 * For now we select create/graft, if
1113 * user gave KIND, which does not match existing.
1114 */
1115 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1116 (n->nlmsg_flags & NLM_F_REPLACE) &&
1117 ((n->nlmsg_flags & NLM_F_EXCL) ||
1118 (tca[TCA_KIND] &&
1119 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1120 goto create_n_graft;
1121 }
1122 }
1123 } else {
1124 if (!tcm->tcm_handle)
1125 return -EINVAL;
1126 q = qdisc_lookup(dev, tcm->tcm_handle);
1127 }
1128
1129 /* Change qdisc parameters */
1130 if (q == NULL)
1131 return -ENOENT;
1132 if (n->nlmsg_flags & NLM_F_EXCL)
1133 return -EEXIST;
1134 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1135 return -EINVAL;
1136 err = qdisc_change(q, tca);
1137 if (err == 0)
1138 qdisc_notify(net, skb, n, clid, NULL, q);
1139 return err;
1140
1141create_n_graft:
1142 if (!(n->nlmsg_flags & NLM_F_CREATE))
1143 return -ENOENT;
1144 if (clid == TC_H_INGRESS) {
1145 if (dev_ingress_queue(dev))
1146 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1147 tcm->tcm_parent, tcm->tcm_parent,
1148 tca, &err);
1149 else
1150 err = -ENOENT;
1151 } else {
1152 struct netdev_queue *dev_queue;
1153
1154 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1155 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1156 else if (p)
1157 dev_queue = p->dev_queue;
1158 else
1159 dev_queue = netdev_get_tx_queue(dev, 0);
1160
1161 q = qdisc_create(dev, dev_queue, p,
1162 tcm->tcm_parent, tcm->tcm_handle,
1163 tca, &err);
1164 }
1165 if (q == NULL) {
1166 if (err == -EAGAIN)
1167 goto replay;
1168 return err;
1169 }
1170
1171graft:
1172 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1173 if (err) {
1174 if (q)
1175 qdisc_destroy(q);
1176 return err;
1177 }
1178
1179 return 0;
1180}
1181
1182static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1183 u32 pid, u32 seq, u16 flags, int event)
1184{
1185 struct tcmsg *tcm;
1186 struct nlmsghdr *nlh;
1187 unsigned char *b = skb_tail_pointer(skb);
1188 struct gnet_dump d;
1189 struct qdisc_size_table *stab;
1190
1191 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1192 tcm = NLMSG_DATA(nlh);
1193 tcm->tcm_family = AF_UNSPEC;
1194 tcm->tcm__pad1 = 0;
1195 tcm->tcm__pad2 = 0;
1196 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1197 tcm->tcm_parent = clid;
1198 tcm->tcm_handle = q->handle;
1199 tcm->tcm_info = atomic_read(&q->refcnt);
1200 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1201 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1202 goto nla_put_failure;
1203 q->qstats.qlen = q->q.qlen;
1204
1205 stab = rtnl_dereference(q->stab);
1206 if (stab && qdisc_dump_stab(skb, stab) < 0)
1207 goto nla_put_failure;
1208
1209 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1210 qdisc_root_sleeping_lock(q), &d) < 0)
1211 goto nla_put_failure;
1212
1213 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1214 goto nla_put_failure;
1215
1216 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
1217 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1218 gnet_stats_copy_queue(&d, &q->qstats) < 0)
1219 goto nla_put_failure;
1220
1221 if (gnet_stats_finish_copy(&d) < 0)
1222 goto nla_put_failure;
1223
1224 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1225 return skb->len;
1226
1227nlmsg_failure:
1228nla_put_failure:
1229 nlmsg_trim(skb, b);
1230 return -1;
1231}
1232
1233static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1234{
1235 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1236}
1237
1238static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1239 struct nlmsghdr *n, u32 clid,
1240 struct Qdisc *old, struct Qdisc *new)
1241{
1242 struct sk_buff *skb;
1243 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1244
1245 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1246 if (!skb)
1247 return -ENOBUFS;
1248
1249 if (old && !tc_qdisc_dump_ignore(old)) {
1250 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
1251 0, RTM_DELQDISC) < 0)
1252 goto err_out;
1253 }
1254 if (new && !tc_qdisc_dump_ignore(new)) {
1255 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
1256 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1257 goto err_out;
1258 }
1259
1260 if (skb->len)
1261 return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
1262 n->nlmsg_flags & NLM_F_ECHO);
1263
1264err_out:
1265 kfree_skb(skb);
1266 return -EINVAL;
1267}
1268
1269static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1270 struct netlink_callback *cb,
1271 int *q_idx_p, int s_q_idx)
1272{
1273 int ret = 0, q_idx = *q_idx_p;
1274 struct Qdisc *q;
1275
1276 if (!root)
1277 return 0;
1278
1279 q = root;
1280 if (q_idx < s_q_idx) {
1281 q_idx++;
1282 } else {
1283 if (!tc_qdisc_dump_ignore(q) &&
1284 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1285 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1286 goto done;
1287 q_idx++;
1288 }
1289 list_for_each_entry(q, &root->list, list) {
1290 if (q_idx < s_q_idx) {
1291 q_idx++;
1292 continue;
1293 }
1294 if (!tc_qdisc_dump_ignore(q) &&
1295 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1296 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1297 goto done;
1298 q_idx++;
1299 }
1300
1301out:
1302 *q_idx_p = q_idx;
1303 return ret;
1304done:
1305 ret = -1;
1306 goto out;
1307}
1308
1309static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1310{
1311 struct net *net = sock_net(skb->sk);
1312 int idx, q_idx;
1313 int s_idx, s_q_idx;
1314 struct net_device *dev;
1315
1316 s_idx = cb->args[0];
1317 s_q_idx = q_idx = cb->args[1];
1318
1319 rcu_read_lock();
1320 idx = 0;
1321 for_each_netdev_rcu(net, dev) {
1322 struct netdev_queue *dev_queue;
1323
1324 if (idx < s_idx)
1325 goto cont;
1326 if (idx > s_idx)
1327 s_q_idx = 0;
1328 q_idx = 0;
1329
1330 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1331 goto done;
1332
1333 dev_queue = dev_ingress_queue(dev);
1334 if (dev_queue &&
1335 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1336 &q_idx, s_q_idx) < 0)
1337 goto done;
1338
1339cont:
1340 idx++;
1341 }
1342
1343done:
1344 rcu_read_unlock();
1345
1346 cb->args[0] = idx;
1347 cb->args[1] = q_idx;
1348
1349 return skb->len;
1350}
1351
1352
1353
1354/************************************************
1355 * Traffic classes manipulation. *
1356 ************************************************/
1357
1358
1359
1360static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1361{
1362 struct net *net = sock_net(skb->sk);
1363 struct tcmsg *tcm = NLMSG_DATA(n);
1364 struct nlattr *tca[TCA_MAX + 1];
1365 struct net_device *dev;
1366 struct Qdisc *q = NULL;
1367 const struct Qdisc_class_ops *cops;
1368 unsigned long cl = 0;
1369 unsigned long new_cl;
1370 u32 pid = tcm->tcm_parent;
1371 u32 clid = tcm->tcm_handle;
1372 u32 qid = TC_H_MAJ(clid);
1373 int err;
1374
1375 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1376 if (!dev)
1377 return -ENODEV;
1378
1379 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1380 if (err < 0)
1381 return err;
1382
1383 /*
1384 parent == TC_H_UNSPEC - unspecified parent.
1385 parent == TC_H_ROOT - class is root, which has no parent.
1386 parent == X:0 - parent is root class.
1387 parent == X:Y - parent is a node in hierarchy.
1388 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1389
1390 handle == 0:0 - generate handle from kernel pool.
1391 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1392 handle == X:Y - clear.
1393 handle == X:0 - root class.
1394 */
1395
1396 /* Step 1. Determine qdisc handle X:0 */
1397
1398 if (pid != TC_H_ROOT) {
1399 u32 qid1 = TC_H_MAJ(pid);
1400
1401 if (qid && qid1) {
1402 /* If both majors are known, they must be identical. */
1403 if (qid != qid1)
1404 return -EINVAL;
1405 } else if (qid1) {
1406 qid = qid1;
1407 } else if (qid == 0)
1408 qid = dev->qdisc->handle;
1409
1410 /* Now qid is genuine qdisc handle consistent
1411 * both with parent and child.
1412 *
1413 * TC_H_MAJ(pid) still may be unspecified, complete it now.
1414 */
1415 if (pid)
1416 pid = TC_H_MAKE(qid, pid);
1417 } else {
1418 if (qid == 0)
1419 qid = dev->qdisc->handle;
1420 }
1421
1422 /* OK. Locate qdisc */
1423 q = qdisc_lookup(dev, qid);
1424 if (!q)
1425 return -ENOENT;
1426
1427 /* An check that it supports classes */
1428 cops = q->ops->cl_ops;
1429 if (cops == NULL)
1430 return -EINVAL;
1431
1432 /* Now try to get class */
1433 if (clid == 0) {
1434 if (pid == TC_H_ROOT)
1435 clid = qid;
1436 } else
1437 clid = TC_H_MAKE(qid, clid);
1438
1439 if (clid)
1440 cl = cops->get(q, clid);
1441
1442 if (cl == 0) {
1443 err = -ENOENT;
1444 if (n->nlmsg_type != RTM_NEWTCLASS ||
1445 !(n->nlmsg_flags & NLM_F_CREATE))
1446 goto out;
1447 } else {
1448 switch (n->nlmsg_type) {
1449 case RTM_NEWTCLASS:
1450 err = -EEXIST;
1451 if (n->nlmsg_flags & NLM_F_EXCL)
1452 goto out;
1453 break;
1454 case RTM_DELTCLASS:
1455 err = -EOPNOTSUPP;
1456 if (cops->delete)
1457 err = cops->delete(q, cl);
1458 if (err == 0)
1459 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1460 goto out;
1461 case RTM_GETTCLASS:
1462 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1463 goto out;
1464 default:
1465 err = -EINVAL;
1466 goto out;
1467 }
1468 }
1469
1470 new_cl = cl;
1471 err = -EOPNOTSUPP;
1472 if (cops->change)
1473 err = cops->change(q, clid, pid, tca, &new_cl);
1474 if (err == 0)
1475 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1476
1477out:
1478 if (cl)
1479 cops->put(q, cl);
1480
1481 return err;
1482}
1483
1484
1485static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1486 unsigned long cl,
1487 u32 pid, u32 seq, u16 flags, int event)
1488{
1489 struct tcmsg *tcm;
1490 struct nlmsghdr *nlh;
1491 unsigned char *b = skb_tail_pointer(skb);
1492 struct gnet_dump d;
1493 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1494
1495 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1496 tcm = NLMSG_DATA(nlh);
1497 tcm->tcm_family = AF_UNSPEC;
1498 tcm->tcm__pad1 = 0;
1499 tcm->tcm__pad2 = 0;
1500 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1501 tcm->tcm_parent = q->handle;
1502 tcm->tcm_handle = q->handle;
1503 tcm->tcm_info = 0;
1504 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1505 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1506 goto nla_put_failure;
1507
1508 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1509 qdisc_root_sleeping_lock(q), &d) < 0)
1510 goto nla_put_failure;
1511
1512 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1513 goto nla_put_failure;
1514
1515 if (gnet_stats_finish_copy(&d) < 0)
1516 goto nla_put_failure;
1517
1518 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1519 return skb->len;
1520
1521nlmsg_failure:
1522nla_put_failure:
1523 nlmsg_trim(skb, b);
1524 return -1;
1525}
1526
1527static int tclass_notify(struct net *net, struct sk_buff *oskb,
1528 struct nlmsghdr *n, struct Qdisc *q,
1529 unsigned long cl, int event)
1530{
1531 struct sk_buff *skb;
1532 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1533
1534 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1535 if (!skb)
1536 return -ENOBUFS;
1537
1538 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1539 kfree_skb(skb);
1540 return -EINVAL;
1541 }
1542
1543 return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
1544 n->nlmsg_flags & NLM_F_ECHO);
1545}
1546
1547struct qdisc_dump_args {
1548 struct qdisc_walker w;
1549 struct sk_buff *skb;
1550 struct netlink_callback *cb;
1551};
1552
1553static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1554{
1555 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1556
1557 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1558 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1559}
1560
1561static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1562 struct tcmsg *tcm, struct netlink_callback *cb,
1563 int *t_p, int s_t)
1564{
1565 struct qdisc_dump_args arg;
1566
1567 if (tc_qdisc_dump_ignore(q) ||
1568 *t_p < s_t || !q->ops->cl_ops ||
1569 (tcm->tcm_parent &&
1570 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1571 (*t_p)++;
1572 return 0;
1573 }
1574 if (*t_p > s_t)
1575 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1576 arg.w.fn = qdisc_class_dump;
1577 arg.skb = skb;
1578 arg.cb = cb;
1579 arg.w.stop = 0;
1580 arg.w.skip = cb->args[1];
1581 arg.w.count = 0;
1582 q->ops->cl_ops->walk(q, &arg.w);
1583 cb->args[1] = arg.w.count;
1584 if (arg.w.stop)
1585 return -1;
1586 (*t_p)++;
1587 return 0;
1588}
1589
1590static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1591 struct tcmsg *tcm, struct netlink_callback *cb,
1592 int *t_p, int s_t)
1593{
1594 struct Qdisc *q;
1595
1596 if (!root)
1597 return 0;
1598
1599 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1600 return -1;
1601
1602 list_for_each_entry(q, &root->list, list) {
1603 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1604 return -1;
1605 }
1606
1607 return 0;
1608}
1609
1610static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1611{
1612 struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
1613 struct net *net = sock_net(skb->sk);
1614 struct netdev_queue *dev_queue;
1615 struct net_device *dev;
1616 int t, s_t;
1617
1618 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1619 return 0;
1620 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1621 if (!dev)
1622 return 0;
1623
1624 s_t = cb->args[0];
1625 t = 0;
1626
1627 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1628 goto done;
1629
1630 dev_queue = dev_ingress_queue(dev);
1631 if (dev_queue &&
1632 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1633 &t, s_t) < 0)
1634 goto done;
1635
1636done:
1637 cb->args[0] = t;
1638
1639 dev_put(dev);
1640 return skb->len;
1641}
1642
1643/* Main classifier routine: scans classifier chain attached
1644 * to this qdisc, (optionally) tests for protocol and asks
1645 * specific classifiers.
1646 */
1647int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
1648 struct tcf_result *res)
1649{
1650 __be16 protocol = skb->protocol;
1651 int err;
1652
1653 for (; tp; tp = tp->next) {
1654 if (tp->protocol != protocol &&
1655 tp->protocol != htons(ETH_P_ALL))
1656 continue;
1657 err = tp->classify(skb, tp, res);
1658
1659 if (err >= 0) {
1660#ifdef CONFIG_NET_CLS_ACT
1661 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1662 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1663#endif
1664 return err;
1665 }
1666 }
1667 return -1;
1668}
1669EXPORT_SYMBOL(tc_classify_compat);
1670
1671int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1672 struct tcf_result *res)
1673{
1674 int err = 0;
1675#ifdef CONFIG_NET_CLS_ACT
1676 const struct tcf_proto *otp = tp;
1677reclassify:
1678#endif
1679
1680 err = tc_classify_compat(skb, tp, res);
1681#ifdef CONFIG_NET_CLS_ACT
1682 if (err == TC_ACT_RECLASSIFY) {
1683 u32 verd = G_TC_VERD(skb->tc_verd);
1684 tp = otp;
1685
1686 if (verd++ >= MAX_REC_LOOP) {
1687 if (net_ratelimit())
1688 pr_notice("%s: packet reclassify loop"
1689 " rule prio %u protocol %02x\n",
1690 tp->q->ops->id,
1691 tp->prio & 0xffff,
1692 ntohs(tp->protocol));
1693 return TC_ACT_SHOT;
1694 }
1695 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1696 goto reclassify;
1697 }
1698#endif
1699 return err;
1700}
1701EXPORT_SYMBOL(tc_classify);
1702
1703void tcf_destroy(struct tcf_proto *tp)
1704{
1705 tp->ops->destroy(tp);
1706 module_put(tp->ops->owner);
1707 kfree(tp);
1708}
1709
1710void tcf_destroy_chain(struct tcf_proto **fl)
1711{
1712 struct tcf_proto *tp;
1713
1714 while ((tp = *fl) != NULL) {
1715 *fl = tp->next;
1716 tcf_destroy(tp);
1717 }
1718}
1719EXPORT_SYMBOL(tcf_destroy_chain);
1720
1721#ifdef CONFIG_PROC_FS
1722static int psched_show(struct seq_file *seq, void *v)
1723{
1724 struct timespec ts;
1725
1726 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1727 seq_printf(seq, "%08x %08x %08x %08x\n",
1728 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1729 1000000,
1730 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1731
1732 return 0;
1733}
1734
1735static int psched_open(struct inode *inode, struct file *file)
1736{
1737 return single_open(file, psched_show, NULL);
1738}
1739
1740static const struct file_operations psched_fops = {
1741 .owner = THIS_MODULE,
1742 .open = psched_open,
1743 .read = seq_read,
1744 .llseek = seq_lseek,
1745 .release = single_release,
1746};
1747
1748static int __net_init psched_net_init(struct net *net)
1749{
1750 struct proc_dir_entry *e;
1751
1752 e = proc_net_fops_create(net, "psched", 0, &psched_fops);
1753 if (e == NULL)
1754 return -ENOMEM;
1755
1756 return 0;
1757}
1758
1759static void __net_exit psched_net_exit(struct net *net)
1760{
1761 proc_net_remove(net, "psched");
1762}
1763#else
1764static int __net_init psched_net_init(struct net *net)
1765{
1766 return 0;
1767}
1768
1769static void __net_exit psched_net_exit(struct net *net)
1770{
1771}
1772#endif
1773
1774static struct pernet_operations psched_net_ops = {
1775 .init = psched_net_init,
1776 .exit = psched_net_exit,
1777};
1778
1779static int __init pktsched_init(void)
1780{
1781 int err;
1782
1783 err = register_pernet_subsys(&psched_net_ops);
1784 if (err) {
1785 pr_err("pktsched_init: "
1786 "cannot initialize per netns operations\n");
1787 return err;
1788 }
1789
1790 register_qdisc(&pfifo_qdisc_ops);
1791 register_qdisc(&bfifo_qdisc_ops);
1792 register_qdisc(&pfifo_head_drop_qdisc_ops);
1793 register_qdisc(&mq_qdisc_ops);
1794
1795 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1796 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1797 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1798 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1799 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1800 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1801
1802 return 0;
1803}
1804
1805subsys_initcall(pktsched_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_api.c Packet scheduler API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Fixes:
8 *
9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <linux/init.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/kmod.h>
24#include <linux/list.h>
25#include <linux/hrtimer.h>
26#include <linux/slab.h>
27#include <linux/hashtable.h>
28
29#include <net/net_namespace.h>
30#include <net/sock.h>
31#include <net/netlink.h>
32#include <net/pkt_sched.h>
33#include <net/pkt_cls.h>
34#include <net/tc_wrapper.h>
35
36#include <trace/events/qdisc.h>
37
38/*
39
40 Short review.
41 -------------
42
43 This file consists of two interrelated parts:
44
45 1. queueing disciplines manager frontend.
46 2. traffic classes manager frontend.
47
48 Generally, queueing discipline ("qdisc") is a black box,
49 which is able to enqueue packets and to dequeue them (when
50 device is ready to send something) in order and at times
51 determined by algorithm hidden in it.
52
53 qdisc's are divided to two categories:
54 - "queues", which have no internal structure visible from outside.
55 - "schedulers", which split all the packets to "traffic classes",
56 using "packet classifiers" (look at cls_api.c)
57
58 In turn, classes may have child qdiscs (as rule, queues)
59 attached to them etc. etc. etc.
60
61 The goal of the routines in this file is to translate
62 information supplied by user in the form of handles
63 to more intelligible for kernel form, to make some sanity
64 checks and part of work, which is common to all qdiscs
65 and to provide rtnetlink notifications.
66
67 All real intelligent work is done inside qdisc modules.
68
69
70
71 Every discipline has two major routines: enqueue and dequeue.
72
73 ---dequeue
74
75 dequeue usually returns a skb to send. It is allowed to return NULL,
76 but it does not mean that queue is empty, it just means that
77 discipline does not want to send anything this time.
78 Queue is really empty if q->q.qlen == 0.
79 For complicated disciplines with multiple queues q->q is not
80 real packet queue, but however q->q.qlen must be valid.
81
82 ---enqueue
83
84 enqueue returns 0, if packet was enqueued successfully.
85 If packet (this one or another one) was dropped, it returns
86 not zero error code.
87 NET_XMIT_DROP - this packet dropped
88 Expected action: do not backoff, but wait until queue will clear.
89 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
90 Expected action: backoff or ignore
91
92 Auxiliary routines:
93
94 ---peek
95
96 like dequeue but without removing a packet from the queue
97
98 ---reset
99
100 returns qdisc to initial state: purge all buffers, clear all
101 timers, counters (except for statistics) etc.
102
103 ---init
104
105 initializes newly created qdisc.
106
107 ---destroy
108
109 destroys resources allocated by init and during lifetime of qdisc.
110
111 ---change
112
113 changes qdisc parameters.
114 */
115
116/* Protects list of registered TC modules. It is pure SMP lock. */
117static DEFINE_RWLOCK(qdisc_mod_lock);
118
119
120/************************************************
121 * Queueing disciplines manipulation. *
122 ************************************************/
123
124
125/* The list of all installed queueing disciplines. */
126
127static struct Qdisc_ops *qdisc_base;
128
129/* Register/unregister queueing discipline */
130
131int register_qdisc(struct Qdisc_ops *qops)
132{
133 struct Qdisc_ops *q, **qp;
134 int rc = -EEXIST;
135
136 write_lock(&qdisc_mod_lock);
137 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 if (!strcmp(qops->id, q->id))
139 goto out;
140
141 if (qops->enqueue == NULL)
142 qops->enqueue = noop_qdisc_ops.enqueue;
143 if (qops->peek == NULL) {
144 if (qops->dequeue == NULL)
145 qops->peek = noop_qdisc_ops.peek;
146 else
147 goto out_einval;
148 }
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
151
152 if (qops->cl_ops) {
153 const struct Qdisc_class_ops *cops = qops->cl_ops;
154
155 if (!(cops->find && cops->walk && cops->leaf))
156 goto out_einval;
157
158 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
159 goto out_einval;
160 }
161
162 qops->next = NULL;
163 *qp = qops;
164 rc = 0;
165out:
166 write_unlock(&qdisc_mod_lock);
167 return rc;
168
169out_einval:
170 rc = -EINVAL;
171 goto out;
172}
173EXPORT_SYMBOL(register_qdisc);
174
175void unregister_qdisc(struct Qdisc_ops *qops)
176{
177 struct Qdisc_ops *q, **qp;
178 int err = -ENOENT;
179
180 write_lock(&qdisc_mod_lock);
181 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
182 if (q == qops)
183 break;
184 if (q) {
185 *qp = q->next;
186 q->next = NULL;
187 err = 0;
188 }
189 write_unlock(&qdisc_mod_lock);
190
191 WARN(err, "unregister qdisc(%s) failed\n", qops->id);
192}
193EXPORT_SYMBOL(unregister_qdisc);
194
195/* Get default qdisc if not otherwise specified */
196void qdisc_get_default(char *name, size_t len)
197{
198 read_lock(&qdisc_mod_lock);
199 strscpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
201}
202
203static struct Qdisc_ops *qdisc_lookup_default(const char *name)
204{
205 struct Qdisc_ops *q = NULL;
206
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
210 q = NULL;
211 break;
212 }
213 }
214
215 return q;
216}
217
218/* Set new default qdisc to use */
219int qdisc_set_default(const char *name)
220{
221 const struct Qdisc_ops *ops;
222
223 if (!capable(CAP_NET_ADMIN))
224 return -EPERM;
225
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
228 if (!ops) {
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module("sch_%s", name);
232 write_lock(&qdisc_mod_lock);
233
234 ops = qdisc_lookup_default(name);
235 }
236
237 if (ops) {
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
241 }
242 write_unlock(&qdisc_mod_lock);
243
244 return ops ? 0 : -ENOENT;
245}
246
247#ifdef CONFIG_NET_SCH_DEFAULT
248/* Set default value from kernel config */
249static int __init sch_default_qdisc(void)
250{
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
252}
253late_initcall(sch_default_qdisc);
254#endif
255
256/* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
259 */
260
261static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
262{
263 struct Qdisc *q;
264
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
267
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
270 return root;
271
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 lockdep_rtnl_is_held()) {
274 if (q->handle == handle)
275 return q;
276 }
277 return NULL;
278}
279
280void qdisc_hash_add(struct Qdisc *q, bool invisible)
281{
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 ASSERT_RTNL();
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 if (invisible)
286 q->flags |= TCQ_F_INVISIBLE;
287 }
288}
289EXPORT_SYMBOL(qdisc_hash_add);
290
291void qdisc_hash_del(struct Qdisc *q)
292{
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 ASSERT_RTNL();
295 hash_del_rcu(&q->hash);
296 }
297}
298EXPORT_SYMBOL(qdisc_hash_del);
299
300struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301{
302 struct Qdisc *q;
303
304 if (!handle)
305 return NULL;
306 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
307 if (q)
308 goto out;
309
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 dev_ingress_queue(dev)->qdisc_sleeping,
313 handle);
314out:
315 return q;
316}
317
318struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
319{
320 struct netdev_queue *nq;
321 struct Qdisc *q;
322
323 if (!handle)
324 return NULL;
325 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
326 if (q)
327 goto out;
328
329 nq = dev_ingress_queue_rcu(dev);
330 if (nq)
331 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
332out:
333 return q;
334}
335
336static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
337{
338 unsigned long cl;
339 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
340
341 if (cops == NULL)
342 return NULL;
343 cl = cops->find(p, classid);
344
345 if (cl == 0)
346 return NULL;
347 return cops->leaf(p, cl);
348}
349
350/* Find queueing discipline by name */
351
352static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
353{
354 struct Qdisc_ops *q = NULL;
355
356 if (kind) {
357 read_lock(&qdisc_mod_lock);
358 for (q = qdisc_base; q; q = q->next) {
359 if (nla_strcmp(kind, q->id) == 0) {
360 if (!try_module_get(q->owner))
361 q = NULL;
362 break;
363 }
364 }
365 read_unlock(&qdisc_mod_lock);
366 }
367 return q;
368}
369
370/* The linklayer setting were not transferred from iproute2, in older
371 * versions, and the rate tables lookup systems have been dropped in
372 * the kernel. To keep backward compatible with older iproute2 tc
373 * utils, we detect the linklayer setting by detecting if the rate
374 * table were modified.
375 *
376 * For linklayer ATM table entries, the rate table will be aligned to
377 * 48 bytes, thus some table entries will contain the same value. The
378 * mpu (min packet unit) is also encoded into the old rate table, thus
379 * starting from the mpu, we find low and high table entries for
380 * mapping this cell. If these entries contain the same value, when
381 * the rate tables have been modified for linklayer ATM.
382 *
383 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
384 * and then roundup to the next cell, calc the table entry one below,
385 * and compare.
386 */
387static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
388{
389 int low = roundup(r->mpu, 48);
390 int high = roundup(low+1, 48);
391 int cell_low = low >> r->cell_log;
392 int cell_high = (high >> r->cell_log) - 1;
393
394 /* rtab is too inaccurate at rates > 100Mbit/s */
395 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
396 pr_debug("TC linklayer: Giving up ATM detection\n");
397 return TC_LINKLAYER_ETHERNET;
398 }
399
400 if ((cell_high > cell_low) && (cell_high < 256)
401 && (rtab[cell_low] == rtab[cell_high])) {
402 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
403 cell_low, cell_high, rtab[cell_high]);
404 return TC_LINKLAYER_ATM;
405 }
406 return TC_LINKLAYER_ETHERNET;
407}
408
409static struct qdisc_rate_table *qdisc_rtab_list;
410
411struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
412 struct nlattr *tab,
413 struct netlink_ext_ack *extack)
414{
415 struct qdisc_rate_table *rtab;
416
417 if (tab == NULL || r->rate == 0 ||
418 r->cell_log == 0 || r->cell_log >= 32 ||
419 nla_len(tab) != TC_RTAB_SIZE) {
420 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
421 return NULL;
422 }
423
424 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
425 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
426 !memcmp(&rtab->data, nla_data(tab), 1024)) {
427 rtab->refcnt++;
428 return rtab;
429 }
430 }
431
432 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
433 if (rtab) {
434 rtab->rate = *r;
435 rtab->refcnt = 1;
436 memcpy(rtab->data, nla_data(tab), 1024);
437 if (r->linklayer == TC_LINKLAYER_UNAWARE)
438 r->linklayer = __detect_linklayer(r, rtab->data);
439 rtab->next = qdisc_rtab_list;
440 qdisc_rtab_list = rtab;
441 } else {
442 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
443 }
444 return rtab;
445}
446EXPORT_SYMBOL(qdisc_get_rtab);
447
448void qdisc_put_rtab(struct qdisc_rate_table *tab)
449{
450 struct qdisc_rate_table *rtab, **rtabp;
451
452 if (!tab || --tab->refcnt)
453 return;
454
455 for (rtabp = &qdisc_rtab_list;
456 (rtab = *rtabp) != NULL;
457 rtabp = &rtab->next) {
458 if (rtab == tab) {
459 *rtabp = rtab->next;
460 kfree(rtab);
461 return;
462 }
463 }
464}
465EXPORT_SYMBOL(qdisc_put_rtab);
466
467static LIST_HEAD(qdisc_stab_list);
468
469static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
470 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
471 [TCA_STAB_DATA] = { .type = NLA_BINARY },
472};
473
474static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
475 struct netlink_ext_ack *extack)
476{
477 struct nlattr *tb[TCA_STAB_MAX + 1];
478 struct qdisc_size_table *stab;
479 struct tc_sizespec *s;
480 unsigned int tsize = 0;
481 u16 *tab = NULL;
482 int err;
483
484 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
485 extack);
486 if (err < 0)
487 return ERR_PTR(err);
488 if (!tb[TCA_STAB_BASE]) {
489 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
490 return ERR_PTR(-EINVAL);
491 }
492
493 s = nla_data(tb[TCA_STAB_BASE]);
494
495 if (s->tsize > 0) {
496 if (!tb[TCA_STAB_DATA]) {
497 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
498 return ERR_PTR(-EINVAL);
499 }
500 tab = nla_data(tb[TCA_STAB_DATA]);
501 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
502 }
503
504 if (tsize != s->tsize || (!tab && tsize > 0)) {
505 NL_SET_ERR_MSG(extack, "Invalid size of size table");
506 return ERR_PTR(-EINVAL);
507 }
508
509 list_for_each_entry(stab, &qdisc_stab_list, list) {
510 if (memcmp(&stab->szopts, s, sizeof(*s)))
511 continue;
512 if (tsize > 0 &&
513 memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
514 continue;
515 stab->refcnt++;
516 return stab;
517 }
518
519 if (s->size_log > STAB_SIZE_LOG_MAX ||
520 s->cell_log > STAB_SIZE_LOG_MAX) {
521 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
522 return ERR_PTR(-EINVAL);
523 }
524
525 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
526 if (!stab)
527 return ERR_PTR(-ENOMEM);
528
529 stab->refcnt = 1;
530 stab->szopts = *s;
531 if (tsize > 0)
532 memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
533
534 list_add_tail(&stab->list, &qdisc_stab_list);
535
536 return stab;
537}
538
539void qdisc_put_stab(struct qdisc_size_table *tab)
540{
541 if (!tab)
542 return;
543
544 if (--tab->refcnt == 0) {
545 list_del(&tab->list);
546 kfree_rcu(tab, rcu);
547 }
548}
549EXPORT_SYMBOL(qdisc_put_stab);
550
551static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
552{
553 struct nlattr *nest;
554
555 nest = nla_nest_start_noflag(skb, TCA_STAB);
556 if (nest == NULL)
557 goto nla_put_failure;
558 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
559 goto nla_put_failure;
560 nla_nest_end(skb, nest);
561
562 return skb->len;
563
564nla_put_failure:
565 return -1;
566}
567
568void __qdisc_calculate_pkt_len(struct sk_buff *skb,
569 const struct qdisc_size_table *stab)
570{
571 int pkt_len, slot;
572
573 pkt_len = skb->len + stab->szopts.overhead;
574 if (unlikely(!stab->szopts.tsize))
575 goto out;
576
577 slot = pkt_len + stab->szopts.cell_align;
578 if (unlikely(slot < 0))
579 slot = 0;
580
581 slot >>= stab->szopts.cell_log;
582 if (likely(slot < stab->szopts.tsize))
583 pkt_len = stab->data[slot];
584 else
585 pkt_len = stab->data[stab->szopts.tsize - 1] *
586 (slot / stab->szopts.tsize) +
587 stab->data[slot % stab->szopts.tsize];
588
589 pkt_len <<= stab->szopts.size_log;
590out:
591 if (unlikely(pkt_len < 1))
592 pkt_len = 1;
593 qdisc_skb_cb(skb)->pkt_len = pkt_len;
594}
595EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
596
597void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
598{
599 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
600 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
601 txt, qdisc->ops->id, qdisc->handle >> 16);
602 qdisc->flags |= TCQ_F_WARN_NONWC;
603 }
604}
605EXPORT_SYMBOL(qdisc_warn_nonwc);
606
607static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
608{
609 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
610 timer);
611
612 rcu_read_lock();
613 __netif_schedule(qdisc_root(wd->qdisc));
614 rcu_read_unlock();
615
616 return HRTIMER_NORESTART;
617}
618
619void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
620 clockid_t clockid)
621{
622 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
623 wd->timer.function = qdisc_watchdog;
624 wd->qdisc = qdisc;
625}
626EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
627
628void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
629{
630 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
631}
632EXPORT_SYMBOL(qdisc_watchdog_init);
633
634void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
635 u64 delta_ns)
636{
637 if (test_bit(__QDISC_STATE_DEACTIVATED,
638 &qdisc_root_sleeping(wd->qdisc)->state))
639 return;
640
641 if (hrtimer_is_queued(&wd->timer)) {
642 /* If timer is already set in [expires, expires + delta_ns],
643 * do not reprogram it.
644 */
645 if (wd->last_expires - expires <= delta_ns)
646 return;
647 }
648
649 wd->last_expires = expires;
650 hrtimer_start_range_ns(&wd->timer,
651 ns_to_ktime(expires),
652 delta_ns,
653 HRTIMER_MODE_ABS_PINNED);
654}
655EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
656
657void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
658{
659 hrtimer_cancel(&wd->timer);
660}
661EXPORT_SYMBOL(qdisc_watchdog_cancel);
662
663static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
664{
665 struct hlist_head *h;
666 unsigned int i;
667
668 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
669
670 if (h != NULL) {
671 for (i = 0; i < n; i++)
672 INIT_HLIST_HEAD(&h[i]);
673 }
674 return h;
675}
676
677void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
678{
679 struct Qdisc_class_common *cl;
680 struct hlist_node *next;
681 struct hlist_head *nhash, *ohash;
682 unsigned int nsize, nmask, osize;
683 unsigned int i, h;
684
685 /* Rehash when load factor exceeds 0.75 */
686 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
687 return;
688 nsize = clhash->hashsize * 2;
689 nmask = nsize - 1;
690 nhash = qdisc_class_hash_alloc(nsize);
691 if (nhash == NULL)
692 return;
693
694 ohash = clhash->hash;
695 osize = clhash->hashsize;
696
697 sch_tree_lock(sch);
698 for (i = 0; i < osize; i++) {
699 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
700 h = qdisc_class_hash(cl->classid, nmask);
701 hlist_add_head(&cl->hnode, &nhash[h]);
702 }
703 }
704 clhash->hash = nhash;
705 clhash->hashsize = nsize;
706 clhash->hashmask = nmask;
707 sch_tree_unlock(sch);
708
709 kvfree(ohash);
710}
711EXPORT_SYMBOL(qdisc_class_hash_grow);
712
713int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
714{
715 unsigned int size = 4;
716
717 clhash->hash = qdisc_class_hash_alloc(size);
718 if (!clhash->hash)
719 return -ENOMEM;
720 clhash->hashsize = size;
721 clhash->hashmask = size - 1;
722 clhash->hashelems = 0;
723 return 0;
724}
725EXPORT_SYMBOL(qdisc_class_hash_init);
726
727void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
728{
729 kvfree(clhash->hash);
730}
731EXPORT_SYMBOL(qdisc_class_hash_destroy);
732
733void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
734 struct Qdisc_class_common *cl)
735{
736 unsigned int h;
737
738 INIT_HLIST_NODE(&cl->hnode);
739 h = qdisc_class_hash(cl->classid, clhash->hashmask);
740 hlist_add_head(&cl->hnode, &clhash->hash[h]);
741 clhash->hashelems++;
742}
743EXPORT_SYMBOL(qdisc_class_hash_insert);
744
745void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
746 struct Qdisc_class_common *cl)
747{
748 hlist_del(&cl->hnode);
749 clhash->hashelems--;
750}
751EXPORT_SYMBOL(qdisc_class_hash_remove);
752
753/* Allocate an unique handle from space managed by kernel
754 * Possible range is [8000-FFFF]:0000 (0x8000 values)
755 */
756static u32 qdisc_alloc_handle(struct net_device *dev)
757{
758 int i = 0x8000;
759 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
760
761 do {
762 autohandle += TC_H_MAKE(0x10000U, 0);
763 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
764 autohandle = TC_H_MAKE(0x80000000U, 0);
765 if (!qdisc_lookup(dev, autohandle))
766 return autohandle;
767 cond_resched();
768 } while (--i > 0);
769
770 return 0;
771}
772
773void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
774{
775 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
776 const struct Qdisc_class_ops *cops;
777 unsigned long cl;
778 u32 parentid;
779 bool notify;
780 int drops;
781
782 if (n == 0 && len == 0)
783 return;
784 drops = max_t(int, n, 0);
785 rcu_read_lock();
786 while ((parentid = sch->parent)) {
787 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
788 break;
789
790 if (sch->flags & TCQ_F_NOPARENT)
791 break;
792 /* Notify parent qdisc only if child qdisc becomes empty.
793 *
794 * If child was empty even before update then backlog
795 * counter is screwed and we skip notification because
796 * parent class is already passive.
797 *
798 * If the original child was offloaded then it is allowed
799 * to be seem as empty, so the parent is notified anyway.
800 */
801 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
802 !qdisc_is_offloaded);
803 /* TODO: perform the search on a per txq basis */
804 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
805 if (sch == NULL) {
806 WARN_ON_ONCE(parentid != TC_H_ROOT);
807 break;
808 }
809 cops = sch->ops->cl_ops;
810 if (notify && cops->qlen_notify) {
811 cl = cops->find(sch, parentid);
812 cops->qlen_notify(sch, cl);
813 }
814 sch->q.qlen -= n;
815 sch->qstats.backlog -= len;
816 __qdisc_qstats_drop(sch, drops);
817 }
818 rcu_read_unlock();
819}
820EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
821
822int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
823 void *type_data)
824{
825 struct net_device *dev = qdisc_dev(sch);
826 int err;
827
828 sch->flags &= ~TCQ_F_OFFLOADED;
829 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
830 return 0;
831
832 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
833 if (err == -EOPNOTSUPP)
834 return 0;
835
836 if (!err)
837 sch->flags |= TCQ_F_OFFLOADED;
838
839 return err;
840}
841EXPORT_SYMBOL(qdisc_offload_dump_helper);
842
843void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
844 struct Qdisc *new, struct Qdisc *old,
845 enum tc_setup_type type, void *type_data,
846 struct netlink_ext_ack *extack)
847{
848 bool any_qdisc_is_offloaded;
849 int err;
850
851 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
852 return;
853
854 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
855
856 /* Don't report error if the graft is part of destroy operation. */
857 if (!err || !new || new == &noop_qdisc)
858 return;
859
860 /* Don't report error if the parent, the old child and the new
861 * one are not offloaded.
862 */
863 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
864 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
865 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
866
867 if (any_qdisc_is_offloaded)
868 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
869}
870EXPORT_SYMBOL(qdisc_offload_graft_helper);
871
872void qdisc_offload_query_caps(struct net_device *dev,
873 enum tc_setup_type type,
874 void *caps, size_t caps_len)
875{
876 const struct net_device_ops *ops = dev->netdev_ops;
877 struct tc_query_caps_base base = {
878 .type = type,
879 .caps = caps,
880 };
881
882 memset(caps, 0, caps_len);
883
884 if (ops->ndo_setup_tc)
885 ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
886}
887EXPORT_SYMBOL(qdisc_offload_query_caps);
888
889static void qdisc_offload_graft_root(struct net_device *dev,
890 struct Qdisc *new, struct Qdisc *old,
891 struct netlink_ext_ack *extack)
892{
893 struct tc_root_qopt_offload graft_offload = {
894 .command = TC_ROOT_GRAFT,
895 .handle = new ? new->handle : 0,
896 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
897 (old && old->flags & TCQ_F_INGRESS),
898 };
899
900 qdisc_offload_graft_helper(dev, NULL, new, old,
901 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
902}
903
904static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
905 u32 portid, u32 seq, u16 flags, int event)
906{
907 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
908 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
909 struct tcmsg *tcm;
910 struct nlmsghdr *nlh;
911 unsigned char *b = skb_tail_pointer(skb);
912 struct gnet_dump d;
913 struct qdisc_size_table *stab;
914 u32 block_index;
915 __u32 qlen;
916
917 cond_resched();
918 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
919 if (!nlh)
920 goto out_nlmsg_trim;
921 tcm = nlmsg_data(nlh);
922 tcm->tcm_family = AF_UNSPEC;
923 tcm->tcm__pad1 = 0;
924 tcm->tcm__pad2 = 0;
925 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
926 tcm->tcm_parent = clid;
927 tcm->tcm_handle = q->handle;
928 tcm->tcm_info = refcount_read(&q->refcnt);
929 if (nla_put_string(skb, TCA_KIND, q->ops->id))
930 goto nla_put_failure;
931 if (q->ops->ingress_block_get) {
932 block_index = q->ops->ingress_block_get(q);
933 if (block_index &&
934 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
935 goto nla_put_failure;
936 }
937 if (q->ops->egress_block_get) {
938 block_index = q->ops->egress_block_get(q);
939 if (block_index &&
940 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
941 goto nla_put_failure;
942 }
943 if (q->ops->dump && q->ops->dump(q, skb) < 0)
944 goto nla_put_failure;
945 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
946 goto nla_put_failure;
947 qlen = qdisc_qlen_sum(q);
948
949 stab = rtnl_dereference(q->stab);
950 if (stab && qdisc_dump_stab(skb, stab) < 0)
951 goto nla_put_failure;
952
953 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
954 NULL, &d, TCA_PAD) < 0)
955 goto nla_put_failure;
956
957 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
958 goto nla_put_failure;
959
960 if (qdisc_is_percpu_stats(q)) {
961 cpu_bstats = q->cpu_bstats;
962 cpu_qstats = q->cpu_qstats;
963 }
964
965 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
966 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
967 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
968 goto nla_put_failure;
969
970 if (gnet_stats_finish_copy(&d) < 0)
971 goto nla_put_failure;
972
973 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
974 return skb->len;
975
976out_nlmsg_trim:
977nla_put_failure:
978 nlmsg_trim(skb, b);
979 return -1;
980}
981
982static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
983{
984 if (q->flags & TCQ_F_BUILTIN)
985 return true;
986 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
987 return true;
988
989 return false;
990}
991
992static int qdisc_notify(struct net *net, struct sk_buff *oskb,
993 struct nlmsghdr *n, u32 clid,
994 struct Qdisc *old, struct Qdisc *new)
995{
996 struct sk_buff *skb;
997 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
998
999 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1000 if (!skb)
1001 return -ENOBUFS;
1002
1003 if (old && !tc_qdisc_dump_ignore(old, false)) {
1004 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1005 0, RTM_DELQDISC) < 0)
1006 goto err_out;
1007 }
1008 if (new && !tc_qdisc_dump_ignore(new, false)) {
1009 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1010 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1011 goto err_out;
1012 }
1013
1014 if (skb->len)
1015 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1016 n->nlmsg_flags & NLM_F_ECHO);
1017
1018err_out:
1019 kfree_skb(skb);
1020 return -EINVAL;
1021}
1022
1023static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1024 struct nlmsghdr *n, u32 clid,
1025 struct Qdisc *old, struct Qdisc *new)
1026{
1027 if (new || old)
1028 qdisc_notify(net, skb, n, clid, old, new);
1029
1030 if (old)
1031 qdisc_put(old);
1032}
1033
1034static void qdisc_clear_nolock(struct Qdisc *sch)
1035{
1036 sch->flags &= ~TCQ_F_NOLOCK;
1037 if (!(sch->flags & TCQ_F_CPUSTATS))
1038 return;
1039
1040 free_percpu(sch->cpu_bstats);
1041 free_percpu(sch->cpu_qstats);
1042 sch->cpu_bstats = NULL;
1043 sch->cpu_qstats = NULL;
1044 sch->flags &= ~TCQ_F_CPUSTATS;
1045}
1046
1047/* Graft qdisc "new" to class "classid" of qdisc "parent" or
1048 * to device "dev".
1049 *
1050 * When appropriate send a netlink notification using 'skb'
1051 * and "n".
1052 *
1053 * On success, destroy old qdisc.
1054 */
1055
1056static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1057 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1058 struct Qdisc *new, struct Qdisc *old,
1059 struct netlink_ext_ack *extack)
1060{
1061 struct Qdisc *q = old;
1062 struct net *net = dev_net(dev);
1063
1064 if (parent == NULL) {
1065 unsigned int i, num_q, ingress;
1066
1067 ingress = 0;
1068 num_q = dev->num_tx_queues;
1069 if ((q && q->flags & TCQ_F_INGRESS) ||
1070 (new && new->flags & TCQ_F_INGRESS)) {
1071 num_q = 1;
1072 ingress = 1;
1073 if (!dev_ingress_queue(dev)) {
1074 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1075 return -ENOENT;
1076 }
1077 }
1078
1079 if (dev->flags & IFF_UP)
1080 dev_deactivate(dev);
1081
1082 qdisc_offload_graft_root(dev, new, old, extack);
1083
1084 if (new && new->ops->attach && !ingress)
1085 goto skip;
1086
1087 for (i = 0; i < num_q; i++) {
1088 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1089
1090 if (!ingress)
1091 dev_queue = netdev_get_tx_queue(dev, i);
1092
1093 old = dev_graft_qdisc(dev_queue, new);
1094 if (new && i > 0)
1095 qdisc_refcount_inc(new);
1096
1097 if (!ingress)
1098 qdisc_put(old);
1099 }
1100
1101skip:
1102 if (!ingress) {
1103 old = rtnl_dereference(dev->qdisc);
1104 if (new && !new->ops->attach)
1105 qdisc_refcount_inc(new);
1106 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1107
1108 notify_and_destroy(net, skb, n, classid, old, new);
1109
1110 if (new && new->ops->attach)
1111 new->ops->attach(new);
1112 } else {
1113 notify_and_destroy(net, skb, n, classid, old, new);
1114 }
1115
1116 if (dev->flags & IFF_UP)
1117 dev_activate(dev);
1118 } else {
1119 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1120 unsigned long cl;
1121 int err;
1122
1123 /* Only support running class lockless if parent is lockless */
1124 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1125 qdisc_clear_nolock(new);
1126
1127 if (!cops || !cops->graft)
1128 return -EOPNOTSUPP;
1129
1130 cl = cops->find(parent, classid);
1131 if (!cl) {
1132 NL_SET_ERR_MSG(extack, "Specified class not found");
1133 return -ENOENT;
1134 }
1135
1136 if (new && new->ops == &noqueue_qdisc_ops) {
1137 NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1138 return -EINVAL;
1139 }
1140
1141 err = cops->graft(parent, cl, new, &old, extack);
1142 if (err)
1143 return err;
1144 notify_and_destroy(net, skb, n, classid, old, new);
1145 }
1146 return 0;
1147}
1148
1149static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1150 struct netlink_ext_ack *extack)
1151{
1152 u32 block_index;
1153
1154 if (tca[TCA_INGRESS_BLOCK]) {
1155 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1156
1157 if (!block_index) {
1158 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1159 return -EINVAL;
1160 }
1161 if (!sch->ops->ingress_block_set) {
1162 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1163 return -EOPNOTSUPP;
1164 }
1165 sch->ops->ingress_block_set(sch, block_index);
1166 }
1167 if (tca[TCA_EGRESS_BLOCK]) {
1168 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1169
1170 if (!block_index) {
1171 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1172 return -EINVAL;
1173 }
1174 if (!sch->ops->egress_block_set) {
1175 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1176 return -EOPNOTSUPP;
1177 }
1178 sch->ops->egress_block_set(sch, block_index);
1179 }
1180 return 0;
1181}
1182
1183/*
1184 Allocate and initialize new qdisc.
1185
1186 Parameters are passed via opt.
1187 */
1188
1189static struct Qdisc *qdisc_create(struct net_device *dev,
1190 struct netdev_queue *dev_queue,
1191 u32 parent, u32 handle,
1192 struct nlattr **tca, int *errp,
1193 struct netlink_ext_ack *extack)
1194{
1195 int err;
1196 struct nlattr *kind = tca[TCA_KIND];
1197 struct Qdisc *sch;
1198 struct Qdisc_ops *ops;
1199 struct qdisc_size_table *stab;
1200
1201 ops = qdisc_lookup_ops(kind);
1202#ifdef CONFIG_MODULES
1203 if (ops == NULL && kind != NULL) {
1204 char name[IFNAMSIZ];
1205 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1206 /* We dropped the RTNL semaphore in order to
1207 * perform the module load. So, even if we
1208 * succeeded in loading the module we have to
1209 * tell the caller to replay the request. We
1210 * indicate this using -EAGAIN.
1211 * We replay the request because the device may
1212 * go away in the mean time.
1213 */
1214 rtnl_unlock();
1215 request_module("sch_%s", name);
1216 rtnl_lock();
1217 ops = qdisc_lookup_ops(kind);
1218 if (ops != NULL) {
1219 /* We will try again qdisc_lookup_ops,
1220 * so don't keep a reference.
1221 */
1222 module_put(ops->owner);
1223 err = -EAGAIN;
1224 goto err_out;
1225 }
1226 }
1227 }
1228#endif
1229
1230 err = -ENOENT;
1231 if (!ops) {
1232 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1233 goto err_out;
1234 }
1235
1236 sch = qdisc_alloc(dev_queue, ops, extack);
1237 if (IS_ERR(sch)) {
1238 err = PTR_ERR(sch);
1239 goto err_out2;
1240 }
1241
1242 sch->parent = parent;
1243
1244 if (handle == TC_H_INGRESS) {
1245 sch->flags |= TCQ_F_INGRESS;
1246 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1247 } else {
1248 if (handle == 0) {
1249 handle = qdisc_alloc_handle(dev);
1250 if (handle == 0) {
1251 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1252 err = -ENOSPC;
1253 goto err_out3;
1254 }
1255 }
1256 if (!netif_is_multiqueue(dev))
1257 sch->flags |= TCQ_F_ONETXQUEUE;
1258 }
1259
1260 sch->handle = handle;
1261
1262 /* This exist to keep backward compatible with a userspace
1263 * loophole, what allowed userspace to get IFF_NO_QUEUE
1264 * facility on older kernels by setting tx_queue_len=0 (prior
1265 * to qdisc init), and then forgot to reinit tx_queue_len
1266 * before again attaching a qdisc.
1267 */
1268 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1269 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1270 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1271 }
1272
1273 err = qdisc_block_indexes_set(sch, tca, extack);
1274 if (err)
1275 goto err_out3;
1276
1277 if (ops->init) {
1278 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1279 if (err != 0)
1280 goto err_out5;
1281 }
1282
1283 if (tca[TCA_STAB]) {
1284 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1285 if (IS_ERR(stab)) {
1286 err = PTR_ERR(stab);
1287 goto err_out4;
1288 }
1289 rcu_assign_pointer(sch->stab, stab);
1290 }
1291 if (tca[TCA_RATE]) {
1292 err = -EOPNOTSUPP;
1293 if (sch->flags & TCQ_F_MQROOT) {
1294 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1295 goto err_out4;
1296 }
1297
1298 err = gen_new_estimator(&sch->bstats,
1299 sch->cpu_bstats,
1300 &sch->rate_est,
1301 NULL,
1302 true,
1303 tca[TCA_RATE]);
1304 if (err) {
1305 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1306 goto err_out4;
1307 }
1308 }
1309
1310 qdisc_hash_add(sch, false);
1311 trace_qdisc_create(ops, dev, parent);
1312
1313 return sch;
1314
1315err_out5:
1316 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1317 if (ops->destroy)
1318 ops->destroy(sch);
1319err_out3:
1320 netdev_put(dev, &sch->dev_tracker);
1321 qdisc_free(sch);
1322err_out2:
1323 module_put(ops->owner);
1324err_out:
1325 *errp = err;
1326 return NULL;
1327
1328err_out4:
1329 /*
1330 * Any broken qdiscs that would require a ops->reset() here?
1331 * The qdisc was never in action so it shouldn't be necessary.
1332 */
1333 qdisc_put_stab(rtnl_dereference(sch->stab));
1334 if (ops->destroy)
1335 ops->destroy(sch);
1336 goto err_out3;
1337}
1338
1339static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1340 struct netlink_ext_ack *extack)
1341{
1342 struct qdisc_size_table *ostab, *stab = NULL;
1343 int err = 0;
1344
1345 if (tca[TCA_OPTIONS]) {
1346 if (!sch->ops->change) {
1347 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1348 return -EINVAL;
1349 }
1350 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1351 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1352 return -EOPNOTSUPP;
1353 }
1354 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1355 if (err)
1356 return err;
1357 }
1358
1359 if (tca[TCA_STAB]) {
1360 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1361 if (IS_ERR(stab))
1362 return PTR_ERR(stab);
1363 }
1364
1365 ostab = rtnl_dereference(sch->stab);
1366 rcu_assign_pointer(sch->stab, stab);
1367 qdisc_put_stab(ostab);
1368
1369 if (tca[TCA_RATE]) {
1370 /* NB: ignores errors from replace_estimator
1371 because change can't be undone. */
1372 if (sch->flags & TCQ_F_MQROOT)
1373 goto out;
1374 gen_replace_estimator(&sch->bstats,
1375 sch->cpu_bstats,
1376 &sch->rate_est,
1377 NULL,
1378 true,
1379 tca[TCA_RATE]);
1380 }
1381out:
1382 return 0;
1383}
1384
1385struct check_loop_arg {
1386 struct qdisc_walker w;
1387 struct Qdisc *p;
1388 int depth;
1389};
1390
1391static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1392 struct qdisc_walker *w);
1393
1394static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1395{
1396 struct check_loop_arg arg;
1397
1398 if (q->ops->cl_ops == NULL)
1399 return 0;
1400
1401 arg.w.stop = arg.w.skip = arg.w.count = 0;
1402 arg.w.fn = check_loop_fn;
1403 arg.depth = depth;
1404 arg.p = p;
1405 q->ops->cl_ops->walk(q, &arg.w);
1406 return arg.w.stop ? -ELOOP : 0;
1407}
1408
1409static int
1410check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1411{
1412 struct Qdisc *leaf;
1413 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1414 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1415
1416 leaf = cops->leaf(q, cl);
1417 if (leaf) {
1418 if (leaf == arg->p || arg->depth > 7)
1419 return -ELOOP;
1420 return check_loop(leaf, arg->p, arg->depth + 1);
1421 }
1422 return 0;
1423}
1424
1425const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1426 [TCA_KIND] = { .type = NLA_STRING },
1427 [TCA_RATE] = { .type = NLA_BINARY,
1428 .len = sizeof(struct tc_estimator) },
1429 [TCA_STAB] = { .type = NLA_NESTED },
1430 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1431 [TCA_CHAIN] = { .type = NLA_U32 },
1432 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1433 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1434};
1435
1436/*
1437 * Delete/get qdisc.
1438 */
1439
1440static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1441 struct netlink_ext_ack *extack)
1442{
1443 struct net *net = sock_net(skb->sk);
1444 struct tcmsg *tcm = nlmsg_data(n);
1445 struct nlattr *tca[TCA_MAX + 1];
1446 struct net_device *dev;
1447 u32 clid;
1448 struct Qdisc *q = NULL;
1449 struct Qdisc *p = NULL;
1450 int err;
1451
1452 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1453 rtm_tca_policy, extack);
1454 if (err < 0)
1455 return err;
1456
1457 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1458 if (!dev)
1459 return -ENODEV;
1460
1461 clid = tcm->tcm_parent;
1462 if (clid) {
1463 if (clid != TC_H_ROOT) {
1464 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1465 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1466 if (!p) {
1467 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1468 return -ENOENT;
1469 }
1470 q = qdisc_leaf(p, clid);
1471 } else if (dev_ingress_queue(dev)) {
1472 q = dev_ingress_queue(dev)->qdisc_sleeping;
1473 }
1474 } else {
1475 q = rtnl_dereference(dev->qdisc);
1476 }
1477 if (!q) {
1478 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1479 return -ENOENT;
1480 }
1481
1482 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1483 NL_SET_ERR_MSG(extack, "Invalid handle");
1484 return -EINVAL;
1485 }
1486 } else {
1487 q = qdisc_lookup(dev, tcm->tcm_handle);
1488 if (!q) {
1489 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1490 return -ENOENT;
1491 }
1492 }
1493
1494 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1495 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1496 return -EINVAL;
1497 }
1498
1499 if (n->nlmsg_type == RTM_DELQDISC) {
1500 if (!clid) {
1501 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1502 return -EINVAL;
1503 }
1504 if (q->handle == 0) {
1505 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1506 return -ENOENT;
1507 }
1508 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1509 if (err != 0)
1510 return err;
1511 } else {
1512 qdisc_notify(net, skb, n, clid, NULL, q);
1513 }
1514 return 0;
1515}
1516
1517/*
1518 * Create/change qdisc.
1519 */
1520
1521static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1522 struct netlink_ext_ack *extack)
1523{
1524 struct net *net = sock_net(skb->sk);
1525 struct tcmsg *tcm;
1526 struct nlattr *tca[TCA_MAX + 1];
1527 struct net_device *dev;
1528 u32 clid;
1529 struct Qdisc *q, *p;
1530 int err;
1531
1532replay:
1533 /* Reinit, just in case something touches this. */
1534 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1535 rtm_tca_policy, extack);
1536 if (err < 0)
1537 return err;
1538
1539 tcm = nlmsg_data(n);
1540 clid = tcm->tcm_parent;
1541 q = p = NULL;
1542
1543 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1544 if (!dev)
1545 return -ENODEV;
1546
1547
1548 if (clid) {
1549 if (clid != TC_H_ROOT) {
1550 if (clid != TC_H_INGRESS) {
1551 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1552 if (!p) {
1553 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1554 return -ENOENT;
1555 }
1556 q = qdisc_leaf(p, clid);
1557 } else if (dev_ingress_queue_create(dev)) {
1558 q = dev_ingress_queue(dev)->qdisc_sleeping;
1559 }
1560 } else {
1561 q = rtnl_dereference(dev->qdisc);
1562 }
1563
1564 /* It may be default qdisc, ignore it */
1565 if (q && q->handle == 0)
1566 q = NULL;
1567
1568 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1569 if (tcm->tcm_handle) {
1570 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1571 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1572 return -EEXIST;
1573 }
1574 if (TC_H_MIN(tcm->tcm_handle)) {
1575 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1576 return -EINVAL;
1577 }
1578 q = qdisc_lookup(dev, tcm->tcm_handle);
1579 if (!q)
1580 goto create_n_graft;
1581 if (n->nlmsg_flags & NLM_F_EXCL) {
1582 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1583 return -EEXIST;
1584 }
1585 if (tca[TCA_KIND] &&
1586 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1587 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1588 return -EINVAL;
1589 }
1590 if (q == p ||
1591 (p && check_loop(q, p, 0))) {
1592 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1593 return -ELOOP;
1594 }
1595 qdisc_refcount_inc(q);
1596 goto graft;
1597 } else {
1598 if (!q)
1599 goto create_n_graft;
1600
1601 /* This magic test requires explanation.
1602 *
1603 * We know, that some child q is already
1604 * attached to this parent and have choice:
1605 * either to change it or to create/graft new one.
1606 *
1607 * 1. We are allowed to create/graft only
1608 * if CREATE and REPLACE flags are set.
1609 *
1610 * 2. If EXCL is set, requestor wanted to say,
1611 * that qdisc tcm_handle is not expected
1612 * to exist, so that we choose create/graft too.
1613 *
1614 * 3. The last case is when no flags are set.
1615 * Alas, it is sort of hole in API, we
1616 * cannot decide what to do unambiguously.
1617 * For now we select create/graft, if
1618 * user gave KIND, which does not match existing.
1619 */
1620 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1621 (n->nlmsg_flags & NLM_F_REPLACE) &&
1622 ((n->nlmsg_flags & NLM_F_EXCL) ||
1623 (tca[TCA_KIND] &&
1624 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1625 goto create_n_graft;
1626 }
1627 }
1628 } else {
1629 if (!tcm->tcm_handle) {
1630 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1631 return -EINVAL;
1632 }
1633 q = qdisc_lookup(dev, tcm->tcm_handle);
1634 }
1635
1636 /* Change qdisc parameters */
1637 if (!q) {
1638 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1639 return -ENOENT;
1640 }
1641 if (n->nlmsg_flags & NLM_F_EXCL) {
1642 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1643 return -EEXIST;
1644 }
1645 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1646 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1647 return -EINVAL;
1648 }
1649 err = qdisc_change(q, tca, extack);
1650 if (err == 0)
1651 qdisc_notify(net, skb, n, clid, NULL, q);
1652 return err;
1653
1654create_n_graft:
1655 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1656 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1657 return -ENOENT;
1658 }
1659 if (clid == TC_H_INGRESS) {
1660 if (dev_ingress_queue(dev)) {
1661 q = qdisc_create(dev, dev_ingress_queue(dev),
1662 tcm->tcm_parent, tcm->tcm_parent,
1663 tca, &err, extack);
1664 } else {
1665 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1666 err = -ENOENT;
1667 }
1668 } else {
1669 struct netdev_queue *dev_queue;
1670
1671 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1672 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1673 else if (p)
1674 dev_queue = p->dev_queue;
1675 else
1676 dev_queue = netdev_get_tx_queue(dev, 0);
1677
1678 q = qdisc_create(dev, dev_queue,
1679 tcm->tcm_parent, tcm->tcm_handle,
1680 tca, &err, extack);
1681 }
1682 if (q == NULL) {
1683 if (err == -EAGAIN)
1684 goto replay;
1685 return err;
1686 }
1687
1688graft:
1689 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1690 if (err) {
1691 if (q)
1692 qdisc_put(q);
1693 return err;
1694 }
1695
1696 return 0;
1697}
1698
1699static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1700 struct netlink_callback *cb,
1701 int *q_idx_p, int s_q_idx, bool recur,
1702 bool dump_invisible)
1703{
1704 int ret = 0, q_idx = *q_idx_p;
1705 struct Qdisc *q;
1706 int b;
1707
1708 if (!root)
1709 return 0;
1710
1711 q = root;
1712 if (q_idx < s_q_idx) {
1713 q_idx++;
1714 } else {
1715 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1716 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1717 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1718 RTM_NEWQDISC) <= 0)
1719 goto done;
1720 q_idx++;
1721 }
1722
1723 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1724 * itself has already been dumped.
1725 *
1726 * If we've already dumped the top-level (ingress) qdisc above and the global
1727 * qdisc hashtable, we don't want to hit it again
1728 */
1729 if (!qdisc_dev(root) || !recur)
1730 goto out;
1731
1732 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1733 if (q_idx < s_q_idx) {
1734 q_idx++;
1735 continue;
1736 }
1737 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1738 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1739 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1740 RTM_NEWQDISC) <= 0)
1741 goto done;
1742 q_idx++;
1743 }
1744
1745out:
1746 *q_idx_p = q_idx;
1747 return ret;
1748done:
1749 ret = -1;
1750 goto out;
1751}
1752
1753static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1754{
1755 struct net *net = sock_net(skb->sk);
1756 int idx, q_idx;
1757 int s_idx, s_q_idx;
1758 struct net_device *dev;
1759 const struct nlmsghdr *nlh = cb->nlh;
1760 struct nlattr *tca[TCA_MAX + 1];
1761 int err;
1762
1763 s_idx = cb->args[0];
1764 s_q_idx = q_idx = cb->args[1];
1765
1766 idx = 0;
1767 ASSERT_RTNL();
1768
1769 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1770 rtm_tca_policy, cb->extack);
1771 if (err < 0)
1772 return err;
1773
1774 for_each_netdev(net, dev) {
1775 struct netdev_queue *dev_queue;
1776
1777 if (idx < s_idx)
1778 goto cont;
1779 if (idx > s_idx)
1780 s_q_idx = 0;
1781 q_idx = 0;
1782
1783 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1784 skb, cb, &q_idx, s_q_idx,
1785 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1786 goto done;
1787
1788 dev_queue = dev_ingress_queue(dev);
1789 if (dev_queue &&
1790 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1791 &q_idx, s_q_idx, false,
1792 tca[TCA_DUMP_INVISIBLE]) < 0)
1793 goto done;
1794
1795cont:
1796 idx++;
1797 }
1798
1799done:
1800 cb->args[0] = idx;
1801 cb->args[1] = q_idx;
1802
1803 return skb->len;
1804}
1805
1806
1807
1808/************************************************
1809 * Traffic classes manipulation. *
1810 ************************************************/
1811
1812static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1813 unsigned long cl,
1814 u32 portid, u32 seq, u16 flags, int event)
1815{
1816 struct tcmsg *tcm;
1817 struct nlmsghdr *nlh;
1818 unsigned char *b = skb_tail_pointer(skb);
1819 struct gnet_dump d;
1820 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1821
1822 cond_resched();
1823 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1824 if (!nlh)
1825 goto out_nlmsg_trim;
1826 tcm = nlmsg_data(nlh);
1827 tcm->tcm_family = AF_UNSPEC;
1828 tcm->tcm__pad1 = 0;
1829 tcm->tcm__pad2 = 0;
1830 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1831 tcm->tcm_parent = q->handle;
1832 tcm->tcm_handle = q->handle;
1833 tcm->tcm_info = 0;
1834 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1835 goto nla_put_failure;
1836 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1837 goto nla_put_failure;
1838
1839 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1840 NULL, &d, TCA_PAD) < 0)
1841 goto nla_put_failure;
1842
1843 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1844 goto nla_put_failure;
1845
1846 if (gnet_stats_finish_copy(&d) < 0)
1847 goto nla_put_failure;
1848
1849 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1850 return skb->len;
1851
1852out_nlmsg_trim:
1853nla_put_failure:
1854 nlmsg_trim(skb, b);
1855 return -1;
1856}
1857
1858static int tclass_notify(struct net *net, struct sk_buff *oskb,
1859 struct nlmsghdr *n, struct Qdisc *q,
1860 unsigned long cl, int event)
1861{
1862 struct sk_buff *skb;
1863 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1864
1865 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1866 if (!skb)
1867 return -ENOBUFS;
1868
1869 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1870 kfree_skb(skb);
1871 return -EINVAL;
1872 }
1873
1874 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1875 n->nlmsg_flags & NLM_F_ECHO);
1876}
1877
1878static int tclass_del_notify(struct net *net,
1879 const struct Qdisc_class_ops *cops,
1880 struct sk_buff *oskb, struct nlmsghdr *n,
1881 struct Qdisc *q, unsigned long cl,
1882 struct netlink_ext_ack *extack)
1883{
1884 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1885 struct sk_buff *skb;
1886 int err = 0;
1887
1888 if (!cops->delete)
1889 return -EOPNOTSUPP;
1890
1891 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1892 if (!skb)
1893 return -ENOBUFS;
1894
1895 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1896 RTM_DELTCLASS) < 0) {
1897 kfree_skb(skb);
1898 return -EINVAL;
1899 }
1900
1901 err = cops->delete(q, cl, extack);
1902 if (err) {
1903 kfree_skb(skb);
1904 return err;
1905 }
1906
1907 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1908 n->nlmsg_flags & NLM_F_ECHO);
1909 return err;
1910}
1911
1912#ifdef CONFIG_NET_CLS
1913
1914struct tcf_bind_args {
1915 struct tcf_walker w;
1916 unsigned long base;
1917 unsigned long cl;
1918 u32 classid;
1919};
1920
1921static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1922{
1923 struct tcf_bind_args *a = (void *)arg;
1924
1925 if (n && tp->ops->bind_class) {
1926 struct Qdisc *q = tcf_block_q(tp->chain->block);
1927
1928 sch_tree_lock(q);
1929 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
1930 sch_tree_unlock(q);
1931 }
1932 return 0;
1933}
1934
1935struct tc_bind_class_args {
1936 struct qdisc_walker w;
1937 unsigned long new_cl;
1938 u32 portid;
1939 u32 clid;
1940};
1941
1942static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
1943 struct qdisc_walker *w)
1944{
1945 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
1946 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1947 struct tcf_block *block;
1948 struct tcf_chain *chain;
1949
1950 block = cops->tcf_block(q, cl, NULL);
1951 if (!block)
1952 return 0;
1953 for (chain = tcf_get_next_chain(block, NULL);
1954 chain;
1955 chain = tcf_get_next_chain(block, chain)) {
1956 struct tcf_proto *tp;
1957
1958 for (tp = tcf_get_next_proto(chain, NULL);
1959 tp; tp = tcf_get_next_proto(chain, tp)) {
1960 struct tcf_bind_args arg = {};
1961
1962 arg.w.fn = tcf_node_bind;
1963 arg.classid = a->clid;
1964 arg.base = cl;
1965 arg.cl = a->new_cl;
1966 tp->ops->walk(tp, &arg.w, true);
1967 }
1968 }
1969
1970 return 0;
1971}
1972
1973static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1974 unsigned long new_cl)
1975{
1976 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1977 struct tc_bind_class_args args = {};
1978
1979 if (!cops->tcf_block)
1980 return;
1981 args.portid = portid;
1982 args.clid = clid;
1983 args.new_cl = new_cl;
1984 args.w.fn = tc_bind_class_walker;
1985 q->ops->cl_ops->walk(q, &args.w);
1986}
1987
1988#else
1989
1990static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1991 unsigned long new_cl)
1992{
1993}
1994
1995#endif
1996
1997static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1998 struct netlink_ext_ack *extack)
1999{
2000 struct net *net = sock_net(skb->sk);
2001 struct tcmsg *tcm = nlmsg_data(n);
2002 struct nlattr *tca[TCA_MAX + 1];
2003 struct net_device *dev;
2004 struct Qdisc *q = NULL;
2005 const struct Qdisc_class_ops *cops;
2006 unsigned long cl = 0;
2007 unsigned long new_cl;
2008 u32 portid;
2009 u32 clid;
2010 u32 qid;
2011 int err;
2012
2013 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2014 rtm_tca_policy, extack);
2015 if (err < 0)
2016 return err;
2017
2018 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2019 if (!dev)
2020 return -ENODEV;
2021
2022 /*
2023 parent == TC_H_UNSPEC - unspecified parent.
2024 parent == TC_H_ROOT - class is root, which has no parent.
2025 parent == X:0 - parent is root class.
2026 parent == X:Y - parent is a node in hierarchy.
2027 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2028
2029 handle == 0:0 - generate handle from kernel pool.
2030 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2031 handle == X:Y - clear.
2032 handle == X:0 - root class.
2033 */
2034
2035 /* Step 1. Determine qdisc handle X:0 */
2036
2037 portid = tcm->tcm_parent;
2038 clid = tcm->tcm_handle;
2039 qid = TC_H_MAJ(clid);
2040
2041 if (portid != TC_H_ROOT) {
2042 u32 qid1 = TC_H_MAJ(portid);
2043
2044 if (qid && qid1) {
2045 /* If both majors are known, they must be identical. */
2046 if (qid != qid1)
2047 return -EINVAL;
2048 } else if (qid1) {
2049 qid = qid1;
2050 } else if (qid == 0)
2051 qid = rtnl_dereference(dev->qdisc)->handle;
2052
2053 /* Now qid is genuine qdisc handle consistent
2054 * both with parent and child.
2055 *
2056 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2057 */
2058 if (portid)
2059 portid = TC_H_MAKE(qid, portid);
2060 } else {
2061 if (qid == 0)
2062 qid = rtnl_dereference(dev->qdisc)->handle;
2063 }
2064
2065 /* OK. Locate qdisc */
2066 q = qdisc_lookup(dev, qid);
2067 if (!q)
2068 return -ENOENT;
2069
2070 /* An check that it supports classes */
2071 cops = q->ops->cl_ops;
2072 if (cops == NULL)
2073 return -EINVAL;
2074
2075 /* Now try to get class */
2076 if (clid == 0) {
2077 if (portid == TC_H_ROOT)
2078 clid = qid;
2079 } else
2080 clid = TC_H_MAKE(qid, clid);
2081
2082 if (clid)
2083 cl = cops->find(q, clid);
2084
2085 if (cl == 0) {
2086 err = -ENOENT;
2087 if (n->nlmsg_type != RTM_NEWTCLASS ||
2088 !(n->nlmsg_flags & NLM_F_CREATE))
2089 goto out;
2090 } else {
2091 switch (n->nlmsg_type) {
2092 case RTM_NEWTCLASS:
2093 err = -EEXIST;
2094 if (n->nlmsg_flags & NLM_F_EXCL)
2095 goto out;
2096 break;
2097 case RTM_DELTCLASS:
2098 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2099 /* Unbind the class with flilters with 0 */
2100 tc_bind_tclass(q, portid, clid, 0);
2101 goto out;
2102 case RTM_GETTCLASS:
2103 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
2104 goto out;
2105 default:
2106 err = -EINVAL;
2107 goto out;
2108 }
2109 }
2110
2111 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2112 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2113 return -EOPNOTSUPP;
2114 }
2115
2116 new_cl = cl;
2117 err = -EOPNOTSUPP;
2118 if (cops->change)
2119 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2120 if (err == 0) {
2121 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2122 /* We just create a new class, need to do reverse binding. */
2123 if (cl != new_cl)
2124 tc_bind_tclass(q, portid, clid, new_cl);
2125 }
2126out:
2127 return err;
2128}
2129
2130struct qdisc_dump_args {
2131 struct qdisc_walker w;
2132 struct sk_buff *skb;
2133 struct netlink_callback *cb;
2134};
2135
2136static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2137 struct qdisc_walker *arg)
2138{
2139 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2140
2141 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2142 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2143 RTM_NEWTCLASS);
2144}
2145
2146static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2147 struct tcmsg *tcm, struct netlink_callback *cb,
2148 int *t_p, int s_t)
2149{
2150 struct qdisc_dump_args arg;
2151
2152 if (tc_qdisc_dump_ignore(q, false) ||
2153 *t_p < s_t || !q->ops->cl_ops ||
2154 (tcm->tcm_parent &&
2155 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2156 (*t_p)++;
2157 return 0;
2158 }
2159 if (*t_p > s_t)
2160 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2161 arg.w.fn = qdisc_class_dump;
2162 arg.skb = skb;
2163 arg.cb = cb;
2164 arg.w.stop = 0;
2165 arg.w.skip = cb->args[1];
2166 arg.w.count = 0;
2167 q->ops->cl_ops->walk(q, &arg.w);
2168 cb->args[1] = arg.w.count;
2169 if (arg.w.stop)
2170 return -1;
2171 (*t_p)++;
2172 return 0;
2173}
2174
2175static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2176 struct tcmsg *tcm, struct netlink_callback *cb,
2177 int *t_p, int s_t, bool recur)
2178{
2179 struct Qdisc *q;
2180 int b;
2181
2182 if (!root)
2183 return 0;
2184
2185 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2186 return -1;
2187
2188 if (!qdisc_dev(root) || !recur)
2189 return 0;
2190
2191 if (tcm->tcm_parent) {
2192 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2193 if (q && q != root &&
2194 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2195 return -1;
2196 return 0;
2197 }
2198 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2199 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2200 return -1;
2201 }
2202
2203 return 0;
2204}
2205
2206static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2207{
2208 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2209 struct net *net = sock_net(skb->sk);
2210 struct netdev_queue *dev_queue;
2211 struct net_device *dev;
2212 int t, s_t;
2213
2214 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2215 return 0;
2216 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2217 if (!dev)
2218 return 0;
2219
2220 s_t = cb->args[0];
2221 t = 0;
2222
2223 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2224 skb, tcm, cb, &t, s_t, true) < 0)
2225 goto done;
2226
2227 dev_queue = dev_ingress_queue(dev);
2228 if (dev_queue &&
2229 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2230 &t, s_t, false) < 0)
2231 goto done;
2232
2233done:
2234 cb->args[0] = t;
2235
2236 dev_put(dev);
2237 return skb->len;
2238}
2239
2240#ifdef CONFIG_PROC_FS
2241static int psched_show(struct seq_file *seq, void *v)
2242{
2243 seq_printf(seq, "%08x %08x %08x %08x\n",
2244 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2245 1000000,
2246 (u32)NSEC_PER_SEC / hrtimer_resolution);
2247
2248 return 0;
2249}
2250
2251static int __net_init psched_net_init(struct net *net)
2252{
2253 struct proc_dir_entry *e;
2254
2255 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2256 if (e == NULL)
2257 return -ENOMEM;
2258
2259 return 0;
2260}
2261
2262static void __net_exit psched_net_exit(struct net *net)
2263{
2264 remove_proc_entry("psched", net->proc_net);
2265}
2266#else
2267static int __net_init psched_net_init(struct net *net)
2268{
2269 return 0;
2270}
2271
2272static void __net_exit psched_net_exit(struct net *net)
2273{
2274}
2275#endif
2276
2277static struct pernet_operations psched_net_ops = {
2278 .init = psched_net_init,
2279 .exit = psched_net_exit,
2280};
2281
2282DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2283
2284static int __init pktsched_init(void)
2285{
2286 int err;
2287
2288 err = register_pernet_subsys(&psched_net_ops);
2289 if (err) {
2290 pr_err("pktsched_init: "
2291 "cannot initialize per netns operations\n");
2292 return err;
2293 }
2294
2295 register_qdisc(&pfifo_fast_ops);
2296 register_qdisc(&pfifo_qdisc_ops);
2297 register_qdisc(&bfifo_qdisc_ops);
2298 register_qdisc(&pfifo_head_drop_qdisc_ops);
2299 register_qdisc(&mq_qdisc_ops);
2300 register_qdisc(&noqueue_qdisc_ops);
2301
2302 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2303 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2304 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2305 0);
2306 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2307 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2308 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2309 0);
2310
2311 tc_wrapper_init();
2312
2313 return 0;
2314}
2315
2316subsys_initcall(pktsched_init);