Loading...
1/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/skbuff.h>
24#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
29#include <linux/hrtimer.h>
30#include <linux/lockdep.h>
31#include <linux/slab.h>
32
33#include <net/net_namespace.h>
34#include <net/sock.h>
35#include <net/netlink.h>
36#include <net/pkt_sched.h>
37
38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
40 struct Qdisc *old, struct Qdisc *new);
41static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
44
45/*
46
47 Short review.
48 -------------
49
50 This file consists of two interrelated parts:
51
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
54
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
59
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
64
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
67
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
73
74 All real intelligent work is done inside qdisc modules.
75
76
77
78 Every discipline has two major routines: enqueue and dequeue.
79
80 ---dequeue
81
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
88
89 ---enqueue
90
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
93 not zero error code.
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100
101 Auxiliary routines:
102
103 ---peek
104
105 like dequeue but without removing a packet from the queue
106
107 ---reset
108
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
111
112 ---init
113
114 initializes newly created qdisc.
115
116 ---destroy
117
118 destroys resources allocated by init and during lifetime of qdisc.
119
120 ---change
121
122 changes qdisc parameters.
123 */
124
125/* Protects list of registered TC modules. It is pure SMP lock. */
126static DEFINE_RWLOCK(qdisc_mod_lock);
127
128
129/************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
132
133
134/* The list of all installed queueing disciplines. */
135
136static struct Qdisc_ops *qdisc_base;
137
138/* Register/unregister queueing discipline */
139
140int register_qdisc(struct Qdisc_ops *qops)
141{
142 struct Qdisc_ops *q, **qp;
143 int rc = -EEXIST;
144
145 write_lock(&qdisc_mod_lock);
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 if (!strcmp(qops->id, q->id))
148 goto out;
149
150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue;
152 if (qops->peek == NULL) {
153 if (qops->dequeue == NULL)
154 qops->peek = noop_qdisc_ops.peek;
155 else
156 goto out_einval;
157 }
158 if (qops->dequeue == NULL)
159 qops->dequeue = noop_qdisc_ops.dequeue;
160
161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
171 qops->next = NULL;
172 *qp = qops;
173 rc = 0;
174out:
175 write_unlock(&qdisc_mod_lock);
176 return rc;
177
178out_einval:
179 rc = -EINVAL;
180 goto out;
181}
182EXPORT_SYMBOL(register_qdisc);
183
184int unregister_qdisc(struct Qdisc_ops *qops)
185{
186 struct Qdisc_ops *q, **qp;
187 int err = -ENOENT;
188
189 write_lock(&qdisc_mod_lock);
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
191 if (q == qops)
192 break;
193 if (q) {
194 *qp = q->next;
195 q->next = NULL;
196 err = 0;
197 }
198 write_unlock(&qdisc_mod_lock);
199 return err;
200}
201EXPORT_SYMBOL(unregister_qdisc);
202
203/* Get default qdisc if not otherwise specified */
204void qdisc_get_default(char *name, size_t len)
205{
206 read_lock(&qdisc_mod_lock);
207 strlcpy(name, default_qdisc_ops->id, len);
208 read_unlock(&qdisc_mod_lock);
209}
210
211static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212{
213 struct Qdisc_ops *q = NULL;
214
215 for (q = qdisc_base; q; q = q->next) {
216 if (!strcmp(name, q->id)) {
217 if (!try_module_get(q->owner))
218 q = NULL;
219 break;
220 }
221 }
222
223 return q;
224}
225
226/* Set new default qdisc to use */
227int qdisc_set_default(const char *name)
228{
229 const struct Qdisc_ops *ops;
230
231 if (!capable(CAP_NET_ADMIN))
232 return -EPERM;
233
234 write_lock(&qdisc_mod_lock);
235 ops = qdisc_lookup_default(name);
236 if (!ops) {
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock);
239 request_module("sch_%s", name);
240 write_lock(&qdisc_mod_lock);
241
242 ops = qdisc_lookup_default(name);
243 }
244
245 if (ops) {
246 /* Set new default */
247 module_put(default_qdisc_ops->owner);
248 default_qdisc_ops = ops;
249 }
250 write_unlock(&qdisc_mod_lock);
251
252 return ops ? 0 : -ENOENT;
253}
254
255/* We know handle. Find qdisc among all qdisc's attached to device
256 * (root qdisc, all its children, children of children etc.)
257 * Note: caller either uses rtnl or rcu_read_lock()
258 */
259
260static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
261{
262 struct Qdisc *q;
263
264 if (!(root->flags & TCQ_F_BUILTIN) &&
265 root->handle == handle)
266 return root;
267
268 list_for_each_entry_rcu(q, &root->list, list) {
269 if (q->handle == handle)
270 return q;
271 }
272 return NULL;
273}
274
275void qdisc_list_add(struct Qdisc *q)
276{
277 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
278 struct Qdisc *root = qdisc_dev(q)->qdisc;
279
280 WARN_ON_ONCE(root == &noop_qdisc);
281 ASSERT_RTNL();
282 list_add_tail_rcu(&q->list, &root->list);
283 }
284}
285EXPORT_SYMBOL(qdisc_list_add);
286
287void qdisc_list_del(struct Qdisc *q)
288{
289 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
290 ASSERT_RTNL();
291 list_del_rcu(&q->list);
292 }
293}
294EXPORT_SYMBOL(qdisc_list_del);
295
296struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
297{
298 struct Qdisc *q;
299
300 q = qdisc_match_from_root(dev->qdisc, handle);
301 if (q)
302 goto out;
303
304 if (dev_ingress_queue(dev))
305 q = qdisc_match_from_root(
306 dev_ingress_queue(dev)->qdisc_sleeping,
307 handle);
308out:
309 return q;
310}
311
312static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
313{
314 unsigned long cl;
315 struct Qdisc *leaf;
316 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
317
318 if (cops == NULL)
319 return NULL;
320 cl = cops->get(p, classid);
321
322 if (cl == 0)
323 return NULL;
324 leaf = cops->leaf(p, cl);
325 cops->put(p, cl);
326 return leaf;
327}
328
329/* Find queueing discipline by name */
330
331static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
332{
333 struct Qdisc_ops *q = NULL;
334
335 if (kind) {
336 read_lock(&qdisc_mod_lock);
337 for (q = qdisc_base; q; q = q->next) {
338 if (nla_strcmp(kind, q->id) == 0) {
339 if (!try_module_get(q->owner))
340 q = NULL;
341 break;
342 }
343 }
344 read_unlock(&qdisc_mod_lock);
345 }
346 return q;
347}
348
349/* The linklayer setting were not transferred from iproute2, in older
350 * versions, and the rate tables lookup systems have been dropped in
351 * the kernel. To keep backward compatible with older iproute2 tc
352 * utils, we detect the linklayer setting by detecting if the rate
353 * table were modified.
354 *
355 * For linklayer ATM table entries, the rate table will be aligned to
356 * 48 bytes, thus some table entries will contain the same value. The
357 * mpu (min packet unit) is also encoded into the old rate table, thus
358 * starting from the mpu, we find low and high table entries for
359 * mapping this cell. If these entries contain the same value, when
360 * the rate tables have been modified for linklayer ATM.
361 *
362 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
363 * and then roundup to the next cell, calc the table entry one below,
364 * and compare.
365 */
366static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
367{
368 int low = roundup(r->mpu, 48);
369 int high = roundup(low+1, 48);
370 int cell_low = low >> r->cell_log;
371 int cell_high = (high >> r->cell_log) - 1;
372
373 /* rtab is too inaccurate at rates > 100Mbit/s */
374 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
375 pr_debug("TC linklayer: Giving up ATM detection\n");
376 return TC_LINKLAYER_ETHERNET;
377 }
378
379 if ((cell_high > cell_low) && (cell_high < 256)
380 && (rtab[cell_low] == rtab[cell_high])) {
381 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
382 cell_low, cell_high, rtab[cell_high]);
383 return TC_LINKLAYER_ATM;
384 }
385 return TC_LINKLAYER_ETHERNET;
386}
387
388static struct qdisc_rate_table *qdisc_rtab_list;
389
390struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
391{
392 struct qdisc_rate_table *rtab;
393
394 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
395 nla_len(tab) != TC_RTAB_SIZE)
396 return NULL;
397
398 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
399 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
400 !memcmp(&rtab->data, nla_data(tab), 1024)) {
401 rtab->refcnt++;
402 return rtab;
403 }
404 }
405
406 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
407 if (rtab) {
408 rtab->rate = *r;
409 rtab->refcnt = 1;
410 memcpy(rtab->data, nla_data(tab), 1024);
411 if (r->linklayer == TC_LINKLAYER_UNAWARE)
412 r->linklayer = __detect_linklayer(r, rtab->data);
413 rtab->next = qdisc_rtab_list;
414 qdisc_rtab_list = rtab;
415 }
416 return rtab;
417}
418EXPORT_SYMBOL(qdisc_get_rtab);
419
420void qdisc_put_rtab(struct qdisc_rate_table *tab)
421{
422 struct qdisc_rate_table *rtab, **rtabp;
423
424 if (!tab || --tab->refcnt)
425 return;
426
427 for (rtabp = &qdisc_rtab_list;
428 (rtab = *rtabp) != NULL;
429 rtabp = &rtab->next) {
430 if (rtab == tab) {
431 *rtabp = rtab->next;
432 kfree(rtab);
433 return;
434 }
435 }
436}
437EXPORT_SYMBOL(qdisc_put_rtab);
438
439static LIST_HEAD(qdisc_stab_list);
440static DEFINE_SPINLOCK(qdisc_stab_lock);
441
442static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
443 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
444 [TCA_STAB_DATA] = { .type = NLA_BINARY },
445};
446
447static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
448{
449 struct nlattr *tb[TCA_STAB_MAX + 1];
450 struct qdisc_size_table *stab;
451 struct tc_sizespec *s;
452 unsigned int tsize = 0;
453 u16 *tab = NULL;
454 int err;
455
456 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
457 if (err < 0)
458 return ERR_PTR(err);
459 if (!tb[TCA_STAB_BASE])
460 return ERR_PTR(-EINVAL);
461
462 s = nla_data(tb[TCA_STAB_BASE]);
463
464 if (s->tsize > 0) {
465 if (!tb[TCA_STAB_DATA])
466 return ERR_PTR(-EINVAL);
467 tab = nla_data(tb[TCA_STAB_DATA]);
468 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
469 }
470
471 if (tsize != s->tsize || (!tab && tsize > 0))
472 return ERR_PTR(-EINVAL);
473
474 spin_lock(&qdisc_stab_lock);
475
476 list_for_each_entry(stab, &qdisc_stab_list, list) {
477 if (memcmp(&stab->szopts, s, sizeof(*s)))
478 continue;
479 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
480 continue;
481 stab->refcnt++;
482 spin_unlock(&qdisc_stab_lock);
483 return stab;
484 }
485
486 spin_unlock(&qdisc_stab_lock);
487
488 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
489 if (!stab)
490 return ERR_PTR(-ENOMEM);
491
492 stab->refcnt = 1;
493 stab->szopts = *s;
494 if (tsize > 0)
495 memcpy(stab->data, tab, tsize * sizeof(u16));
496
497 spin_lock(&qdisc_stab_lock);
498 list_add_tail(&stab->list, &qdisc_stab_list);
499 spin_unlock(&qdisc_stab_lock);
500
501 return stab;
502}
503
504static void stab_kfree_rcu(struct rcu_head *head)
505{
506 kfree(container_of(head, struct qdisc_size_table, rcu));
507}
508
509void qdisc_put_stab(struct qdisc_size_table *tab)
510{
511 if (!tab)
512 return;
513
514 spin_lock(&qdisc_stab_lock);
515
516 if (--tab->refcnt == 0) {
517 list_del(&tab->list);
518 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
519 }
520
521 spin_unlock(&qdisc_stab_lock);
522}
523EXPORT_SYMBOL(qdisc_put_stab);
524
525static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
526{
527 struct nlattr *nest;
528
529 nest = nla_nest_start(skb, TCA_STAB);
530 if (nest == NULL)
531 goto nla_put_failure;
532 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
533 goto nla_put_failure;
534 nla_nest_end(skb, nest);
535
536 return skb->len;
537
538nla_put_failure:
539 return -1;
540}
541
542void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
543{
544 int pkt_len, slot;
545
546 pkt_len = skb->len + stab->szopts.overhead;
547 if (unlikely(!stab->szopts.tsize))
548 goto out;
549
550 slot = pkt_len + stab->szopts.cell_align;
551 if (unlikely(slot < 0))
552 slot = 0;
553
554 slot >>= stab->szopts.cell_log;
555 if (likely(slot < stab->szopts.tsize))
556 pkt_len = stab->data[slot];
557 else
558 pkt_len = stab->data[stab->szopts.tsize - 1] *
559 (slot / stab->szopts.tsize) +
560 stab->data[slot % stab->szopts.tsize];
561
562 pkt_len <<= stab->szopts.size_log;
563out:
564 if (unlikely(pkt_len < 1))
565 pkt_len = 1;
566 qdisc_skb_cb(skb)->pkt_len = pkt_len;
567}
568EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
569
570void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
571{
572 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
573 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
574 txt, qdisc->ops->id, qdisc->handle >> 16);
575 qdisc->flags |= TCQ_F_WARN_NONWC;
576 }
577}
578EXPORT_SYMBOL(qdisc_warn_nonwc);
579
580static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
581{
582 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
583 timer);
584
585 rcu_read_lock();
586 qdisc_unthrottled(wd->qdisc);
587 __netif_schedule(qdisc_root(wd->qdisc));
588 rcu_read_unlock();
589
590 return HRTIMER_NORESTART;
591}
592
593void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
594{
595 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
596 wd->timer.function = qdisc_watchdog;
597 wd->qdisc = qdisc;
598}
599EXPORT_SYMBOL(qdisc_watchdog_init);
600
601void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
602{
603 if (test_bit(__QDISC_STATE_DEACTIVATED,
604 &qdisc_root_sleeping(wd->qdisc)->state))
605 return;
606
607 if (throttle)
608 qdisc_throttled(wd->qdisc);
609
610 hrtimer_start(&wd->timer,
611 ns_to_ktime(expires),
612 HRTIMER_MODE_ABS_PINNED);
613}
614EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
615
616void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
617{
618 hrtimer_cancel(&wd->timer);
619 qdisc_unthrottled(wd->qdisc);
620}
621EXPORT_SYMBOL(qdisc_watchdog_cancel);
622
623static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
624{
625 unsigned int size = n * sizeof(struct hlist_head), i;
626 struct hlist_head *h;
627
628 if (size <= PAGE_SIZE)
629 h = kmalloc(size, GFP_KERNEL);
630 else
631 h = (struct hlist_head *)
632 __get_free_pages(GFP_KERNEL, get_order(size));
633
634 if (h != NULL) {
635 for (i = 0; i < n; i++)
636 INIT_HLIST_HEAD(&h[i]);
637 }
638 return h;
639}
640
641static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
642{
643 unsigned int size = n * sizeof(struct hlist_head);
644
645 if (size <= PAGE_SIZE)
646 kfree(h);
647 else
648 free_pages((unsigned long)h, get_order(size));
649}
650
651void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
652{
653 struct Qdisc_class_common *cl;
654 struct hlist_node *next;
655 struct hlist_head *nhash, *ohash;
656 unsigned int nsize, nmask, osize;
657 unsigned int i, h;
658
659 /* Rehash when load factor exceeds 0.75 */
660 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
661 return;
662 nsize = clhash->hashsize * 2;
663 nmask = nsize - 1;
664 nhash = qdisc_class_hash_alloc(nsize);
665 if (nhash == NULL)
666 return;
667
668 ohash = clhash->hash;
669 osize = clhash->hashsize;
670
671 sch_tree_lock(sch);
672 for (i = 0; i < osize; i++) {
673 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
674 h = qdisc_class_hash(cl->classid, nmask);
675 hlist_add_head(&cl->hnode, &nhash[h]);
676 }
677 }
678 clhash->hash = nhash;
679 clhash->hashsize = nsize;
680 clhash->hashmask = nmask;
681 sch_tree_unlock(sch);
682
683 qdisc_class_hash_free(ohash, osize);
684}
685EXPORT_SYMBOL(qdisc_class_hash_grow);
686
687int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
688{
689 unsigned int size = 4;
690
691 clhash->hash = qdisc_class_hash_alloc(size);
692 if (clhash->hash == NULL)
693 return -ENOMEM;
694 clhash->hashsize = size;
695 clhash->hashmask = size - 1;
696 clhash->hashelems = 0;
697 return 0;
698}
699EXPORT_SYMBOL(qdisc_class_hash_init);
700
701void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
702{
703 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
704}
705EXPORT_SYMBOL(qdisc_class_hash_destroy);
706
707void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
708 struct Qdisc_class_common *cl)
709{
710 unsigned int h;
711
712 INIT_HLIST_NODE(&cl->hnode);
713 h = qdisc_class_hash(cl->classid, clhash->hashmask);
714 hlist_add_head(&cl->hnode, &clhash->hash[h]);
715 clhash->hashelems++;
716}
717EXPORT_SYMBOL(qdisc_class_hash_insert);
718
719void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
720 struct Qdisc_class_common *cl)
721{
722 hlist_del(&cl->hnode);
723 clhash->hashelems--;
724}
725EXPORT_SYMBOL(qdisc_class_hash_remove);
726
727/* Allocate an unique handle from space managed by kernel
728 * Possible range is [8000-FFFF]:0000 (0x8000 values)
729 */
730static u32 qdisc_alloc_handle(struct net_device *dev)
731{
732 int i = 0x8000;
733 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
734
735 do {
736 autohandle += TC_H_MAKE(0x10000U, 0);
737 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
738 autohandle = TC_H_MAKE(0x80000000U, 0);
739 if (!qdisc_lookup(dev, autohandle))
740 return autohandle;
741 cond_resched();
742 } while (--i > 0);
743
744 return 0;
745}
746
747void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
748 unsigned int len)
749{
750 const struct Qdisc_class_ops *cops;
751 unsigned long cl;
752 u32 parentid;
753 int drops;
754
755 if (n == 0 && len == 0)
756 return;
757 drops = max_t(int, n, 0);
758 rcu_read_lock();
759 while ((parentid = sch->parent)) {
760 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
761 break;
762
763 if (sch->flags & TCQ_F_NOPARENT)
764 break;
765 /* TODO: perform the search on a per txq basis */
766 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
767 if (sch == NULL) {
768 WARN_ON_ONCE(parentid != TC_H_ROOT);
769 break;
770 }
771 cops = sch->ops->cl_ops;
772 if (cops->qlen_notify) {
773 cl = cops->get(sch, parentid);
774 cops->qlen_notify(sch, cl);
775 cops->put(sch, cl);
776 }
777 sch->q.qlen -= n;
778 sch->qstats.backlog -= len;
779 __qdisc_qstats_drop(sch, drops);
780 }
781 rcu_read_unlock();
782}
783EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
784
785static void notify_and_destroy(struct net *net, struct sk_buff *skb,
786 struct nlmsghdr *n, u32 clid,
787 struct Qdisc *old, struct Qdisc *new)
788{
789 if (new || old)
790 qdisc_notify(net, skb, n, clid, old, new);
791
792 if (old)
793 qdisc_destroy(old);
794}
795
796/* Graft qdisc "new" to class "classid" of qdisc "parent" or
797 * to device "dev".
798 *
799 * When appropriate send a netlink notification using 'skb'
800 * and "n".
801 *
802 * On success, destroy old qdisc.
803 */
804
805static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
806 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
807 struct Qdisc *new, struct Qdisc *old)
808{
809 struct Qdisc *q = old;
810 struct net *net = dev_net(dev);
811 int err = 0;
812
813 if (parent == NULL) {
814 unsigned int i, num_q, ingress;
815
816 ingress = 0;
817 num_q = dev->num_tx_queues;
818 if ((q && q->flags & TCQ_F_INGRESS) ||
819 (new && new->flags & TCQ_F_INGRESS)) {
820 num_q = 1;
821 ingress = 1;
822 if (!dev_ingress_queue(dev))
823 return -ENOENT;
824 }
825
826 if (dev->flags & IFF_UP)
827 dev_deactivate(dev);
828
829 if (new && new->ops->attach)
830 goto skip;
831
832 for (i = 0; i < num_q; i++) {
833 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
834
835 if (!ingress)
836 dev_queue = netdev_get_tx_queue(dev, i);
837
838 old = dev_graft_qdisc(dev_queue, new);
839 if (new && i > 0)
840 atomic_inc(&new->refcnt);
841
842 if (!ingress)
843 qdisc_destroy(old);
844 }
845
846skip:
847 if (!ingress) {
848 notify_and_destroy(net, skb, n, classid,
849 dev->qdisc, new);
850 if (new && !new->ops->attach)
851 atomic_inc(&new->refcnt);
852 dev->qdisc = new ? : &noop_qdisc;
853
854 if (new && new->ops->attach)
855 new->ops->attach(new);
856 } else {
857 notify_and_destroy(net, skb, n, classid, old, new);
858 }
859
860 if (dev->flags & IFF_UP)
861 dev_activate(dev);
862 } else {
863 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
864
865 err = -EOPNOTSUPP;
866 if (cops && cops->graft) {
867 unsigned long cl = cops->get(parent, classid);
868 if (cl) {
869 err = cops->graft(parent, cl, new, &old);
870 cops->put(parent, cl);
871 } else
872 err = -ENOENT;
873 }
874 if (!err)
875 notify_and_destroy(net, skb, n, classid, old, new);
876 }
877 return err;
878}
879
880/* lockdep annotation is needed for ingress; egress gets it only for name */
881static struct lock_class_key qdisc_tx_lock;
882static struct lock_class_key qdisc_rx_lock;
883
884/*
885 Allocate and initialize new qdisc.
886
887 Parameters are passed via opt.
888 */
889
890static struct Qdisc *
891qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
892 struct Qdisc *p, u32 parent, u32 handle,
893 struct nlattr **tca, int *errp)
894{
895 int err;
896 struct nlattr *kind = tca[TCA_KIND];
897 struct Qdisc *sch;
898 struct Qdisc_ops *ops;
899 struct qdisc_size_table *stab;
900
901 ops = qdisc_lookup_ops(kind);
902#ifdef CONFIG_MODULES
903 if (ops == NULL && kind != NULL) {
904 char name[IFNAMSIZ];
905 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
906 /* We dropped the RTNL semaphore in order to
907 * perform the module load. So, even if we
908 * succeeded in loading the module we have to
909 * tell the caller to replay the request. We
910 * indicate this using -EAGAIN.
911 * We replay the request because the device may
912 * go away in the mean time.
913 */
914 rtnl_unlock();
915 request_module("sch_%s", name);
916 rtnl_lock();
917 ops = qdisc_lookup_ops(kind);
918 if (ops != NULL) {
919 /* We will try again qdisc_lookup_ops,
920 * so don't keep a reference.
921 */
922 module_put(ops->owner);
923 err = -EAGAIN;
924 goto err_out;
925 }
926 }
927 }
928#endif
929
930 err = -ENOENT;
931 if (ops == NULL)
932 goto err_out;
933
934 sch = qdisc_alloc(dev_queue, ops);
935 if (IS_ERR(sch)) {
936 err = PTR_ERR(sch);
937 goto err_out2;
938 }
939
940 sch->parent = parent;
941
942 if (handle == TC_H_INGRESS) {
943 sch->flags |= TCQ_F_INGRESS;
944 handle = TC_H_MAKE(TC_H_INGRESS, 0);
945 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
946 } else {
947 if (handle == 0) {
948 handle = qdisc_alloc_handle(dev);
949 err = -ENOMEM;
950 if (handle == 0)
951 goto err_out3;
952 }
953 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
954 if (!netif_is_multiqueue(dev))
955 sch->flags |= TCQ_F_ONETXQUEUE;
956 }
957
958 sch->handle = handle;
959
960 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
961 if (qdisc_is_percpu_stats(sch)) {
962 sch->cpu_bstats =
963 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
964 if (!sch->cpu_bstats)
965 goto err_out4;
966
967 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
968 if (!sch->cpu_qstats)
969 goto err_out4;
970 }
971
972 if (tca[TCA_STAB]) {
973 stab = qdisc_get_stab(tca[TCA_STAB]);
974 if (IS_ERR(stab)) {
975 err = PTR_ERR(stab);
976 goto err_out4;
977 }
978 rcu_assign_pointer(sch->stab, stab);
979 }
980 if (tca[TCA_RATE]) {
981 spinlock_t *root_lock;
982
983 err = -EOPNOTSUPP;
984 if (sch->flags & TCQ_F_MQROOT)
985 goto err_out4;
986
987 if ((sch->parent != TC_H_ROOT) &&
988 !(sch->flags & TCQ_F_INGRESS) &&
989 (!p || !(p->flags & TCQ_F_MQROOT)))
990 root_lock = qdisc_root_sleeping_lock(sch);
991 else
992 root_lock = qdisc_lock(sch);
993
994 err = gen_new_estimator(&sch->bstats,
995 sch->cpu_bstats,
996 &sch->rate_est,
997 root_lock,
998 tca[TCA_RATE]);
999 if (err)
1000 goto err_out4;
1001 }
1002
1003 qdisc_list_add(sch);
1004
1005 return sch;
1006 }
1007err_out3:
1008 dev_put(dev);
1009 kfree((char *) sch - sch->padded);
1010err_out2:
1011 module_put(ops->owner);
1012err_out:
1013 *errp = err;
1014 return NULL;
1015
1016err_out4:
1017 free_percpu(sch->cpu_bstats);
1018 free_percpu(sch->cpu_qstats);
1019 /*
1020 * Any broken qdiscs that would require a ops->reset() here?
1021 * The qdisc was never in action so it shouldn't be necessary.
1022 */
1023 qdisc_put_stab(rtnl_dereference(sch->stab));
1024 if (ops->destroy)
1025 ops->destroy(sch);
1026 goto err_out3;
1027}
1028
1029static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1030{
1031 struct qdisc_size_table *ostab, *stab = NULL;
1032 int err = 0;
1033
1034 if (tca[TCA_OPTIONS]) {
1035 if (sch->ops->change == NULL)
1036 return -EINVAL;
1037 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1038 if (err)
1039 return err;
1040 }
1041
1042 if (tca[TCA_STAB]) {
1043 stab = qdisc_get_stab(tca[TCA_STAB]);
1044 if (IS_ERR(stab))
1045 return PTR_ERR(stab);
1046 }
1047
1048 ostab = rtnl_dereference(sch->stab);
1049 rcu_assign_pointer(sch->stab, stab);
1050 qdisc_put_stab(ostab);
1051
1052 if (tca[TCA_RATE]) {
1053 /* NB: ignores errors from replace_estimator
1054 because change can't be undone. */
1055 if (sch->flags & TCQ_F_MQROOT)
1056 goto out;
1057 gen_replace_estimator(&sch->bstats,
1058 sch->cpu_bstats,
1059 &sch->rate_est,
1060 qdisc_root_sleeping_lock(sch),
1061 tca[TCA_RATE]);
1062 }
1063out:
1064 return 0;
1065}
1066
1067struct check_loop_arg {
1068 struct qdisc_walker w;
1069 struct Qdisc *p;
1070 int depth;
1071};
1072
1073static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1074
1075static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1076{
1077 struct check_loop_arg arg;
1078
1079 if (q->ops->cl_ops == NULL)
1080 return 0;
1081
1082 arg.w.stop = arg.w.skip = arg.w.count = 0;
1083 arg.w.fn = check_loop_fn;
1084 arg.depth = depth;
1085 arg.p = p;
1086 q->ops->cl_ops->walk(q, &arg.w);
1087 return arg.w.stop ? -ELOOP : 0;
1088}
1089
1090static int
1091check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1092{
1093 struct Qdisc *leaf;
1094 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1095 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1096
1097 leaf = cops->leaf(q, cl);
1098 if (leaf) {
1099 if (leaf == arg->p || arg->depth > 7)
1100 return -ELOOP;
1101 return check_loop(leaf, arg->p, arg->depth + 1);
1102 }
1103 return 0;
1104}
1105
1106/*
1107 * Delete/get qdisc.
1108 */
1109
1110static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1111{
1112 struct net *net = sock_net(skb->sk);
1113 struct tcmsg *tcm = nlmsg_data(n);
1114 struct nlattr *tca[TCA_MAX + 1];
1115 struct net_device *dev;
1116 u32 clid;
1117 struct Qdisc *q = NULL;
1118 struct Qdisc *p = NULL;
1119 int err;
1120
1121 if ((n->nlmsg_type != RTM_GETQDISC) &&
1122 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1123 return -EPERM;
1124
1125 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1126 if (err < 0)
1127 return err;
1128
1129 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1130 if (!dev)
1131 return -ENODEV;
1132
1133 clid = tcm->tcm_parent;
1134 if (clid) {
1135 if (clid != TC_H_ROOT) {
1136 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1137 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1138 if (!p)
1139 return -ENOENT;
1140 q = qdisc_leaf(p, clid);
1141 } else if (dev_ingress_queue(dev)) {
1142 q = dev_ingress_queue(dev)->qdisc_sleeping;
1143 }
1144 } else {
1145 q = dev->qdisc;
1146 }
1147 if (!q)
1148 return -ENOENT;
1149
1150 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1151 return -EINVAL;
1152 } else {
1153 q = qdisc_lookup(dev, tcm->tcm_handle);
1154 if (!q)
1155 return -ENOENT;
1156 }
1157
1158 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1159 return -EINVAL;
1160
1161 if (n->nlmsg_type == RTM_DELQDISC) {
1162 if (!clid)
1163 return -EINVAL;
1164 if (q->handle == 0)
1165 return -ENOENT;
1166 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1167 if (err != 0)
1168 return err;
1169 } else {
1170 qdisc_notify(net, skb, n, clid, NULL, q);
1171 }
1172 return 0;
1173}
1174
1175/*
1176 * Create/change qdisc.
1177 */
1178
1179static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1180{
1181 struct net *net = sock_net(skb->sk);
1182 struct tcmsg *tcm;
1183 struct nlattr *tca[TCA_MAX + 1];
1184 struct net_device *dev;
1185 u32 clid;
1186 struct Qdisc *q, *p;
1187 int err;
1188
1189 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1190 return -EPERM;
1191
1192replay:
1193 /* Reinit, just in case something touches this. */
1194 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1195 if (err < 0)
1196 return err;
1197
1198 tcm = nlmsg_data(n);
1199 clid = tcm->tcm_parent;
1200 q = p = NULL;
1201
1202 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1203 if (!dev)
1204 return -ENODEV;
1205
1206
1207 if (clid) {
1208 if (clid != TC_H_ROOT) {
1209 if (clid != TC_H_INGRESS) {
1210 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1211 if (!p)
1212 return -ENOENT;
1213 q = qdisc_leaf(p, clid);
1214 } else if (dev_ingress_queue_create(dev)) {
1215 q = dev_ingress_queue(dev)->qdisc_sleeping;
1216 }
1217 } else {
1218 q = dev->qdisc;
1219 }
1220
1221 /* It may be default qdisc, ignore it */
1222 if (q && q->handle == 0)
1223 q = NULL;
1224
1225 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1226 if (tcm->tcm_handle) {
1227 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1228 return -EEXIST;
1229 if (TC_H_MIN(tcm->tcm_handle))
1230 return -EINVAL;
1231 q = qdisc_lookup(dev, tcm->tcm_handle);
1232 if (!q)
1233 goto create_n_graft;
1234 if (n->nlmsg_flags & NLM_F_EXCL)
1235 return -EEXIST;
1236 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1237 return -EINVAL;
1238 if (q == p ||
1239 (p && check_loop(q, p, 0)))
1240 return -ELOOP;
1241 atomic_inc(&q->refcnt);
1242 goto graft;
1243 } else {
1244 if (!q)
1245 goto create_n_graft;
1246
1247 /* This magic test requires explanation.
1248 *
1249 * We know, that some child q is already
1250 * attached to this parent and have choice:
1251 * either to change it or to create/graft new one.
1252 *
1253 * 1. We are allowed to create/graft only
1254 * if CREATE and REPLACE flags are set.
1255 *
1256 * 2. If EXCL is set, requestor wanted to say,
1257 * that qdisc tcm_handle is not expected
1258 * to exist, so that we choose create/graft too.
1259 *
1260 * 3. The last case is when no flags are set.
1261 * Alas, it is sort of hole in API, we
1262 * cannot decide what to do unambiguously.
1263 * For now we select create/graft, if
1264 * user gave KIND, which does not match existing.
1265 */
1266 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1267 (n->nlmsg_flags & NLM_F_REPLACE) &&
1268 ((n->nlmsg_flags & NLM_F_EXCL) ||
1269 (tca[TCA_KIND] &&
1270 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1271 goto create_n_graft;
1272 }
1273 }
1274 } else {
1275 if (!tcm->tcm_handle)
1276 return -EINVAL;
1277 q = qdisc_lookup(dev, tcm->tcm_handle);
1278 }
1279
1280 /* Change qdisc parameters */
1281 if (q == NULL)
1282 return -ENOENT;
1283 if (n->nlmsg_flags & NLM_F_EXCL)
1284 return -EEXIST;
1285 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1286 return -EINVAL;
1287 err = qdisc_change(q, tca);
1288 if (err == 0)
1289 qdisc_notify(net, skb, n, clid, NULL, q);
1290 return err;
1291
1292create_n_graft:
1293 if (!(n->nlmsg_flags & NLM_F_CREATE))
1294 return -ENOENT;
1295 if (clid == TC_H_INGRESS) {
1296 if (dev_ingress_queue(dev))
1297 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1298 tcm->tcm_parent, tcm->tcm_parent,
1299 tca, &err);
1300 else
1301 err = -ENOENT;
1302 } else {
1303 struct netdev_queue *dev_queue;
1304
1305 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1306 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1307 else if (p)
1308 dev_queue = p->dev_queue;
1309 else
1310 dev_queue = netdev_get_tx_queue(dev, 0);
1311
1312 q = qdisc_create(dev, dev_queue, p,
1313 tcm->tcm_parent, tcm->tcm_handle,
1314 tca, &err);
1315 }
1316 if (q == NULL) {
1317 if (err == -EAGAIN)
1318 goto replay;
1319 return err;
1320 }
1321
1322graft:
1323 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1324 if (err) {
1325 if (q)
1326 qdisc_destroy(q);
1327 return err;
1328 }
1329
1330 return 0;
1331}
1332
1333static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1334 u32 portid, u32 seq, u16 flags, int event)
1335{
1336 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1337 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1338 struct tcmsg *tcm;
1339 struct nlmsghdr *nlh;
1340 unsigned char *b = skb_tail_pointer(skb);
1341 struct gnet_dump d;
1342 struct qdisc_size_table *stab;
1343 __u32 qlen;
1344
1345 cond_resched();
1346 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1347 if (!nlh)
1348 goto out_nlmsg_trim;
1349 tcm = nlmsg_data(nlh);
1350 tcm->tcm_family = AF_UNSPEC;
1351 tcm->tcm__pad1 = 0;
1352 tcm->tcm__pad2 = 0;
1353 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1354 tcm->tcm_parent = clid;
1355 tcm->tcm_handle = q->handle;
1356 tcm->tcm_info = atomic_read(&q->refcnt);
1357 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1358 goto nla_put_failure;
1359 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1360 goto nla_put_failure;
1361 qlen = q->q.qlen;
1362
1363 stab = rtnl_dereference(q->stab);
1364 if (stab && qdisc_dump_stab(skb, stab) < 0)
1365 goto nla_put_failure;
1366
1367 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1368 qdisc_root_sleeping_lock(q), &d) < 0)
1369 goto nla_put_failure;
1370
1371 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1372 goto nla_put_failure;
1373
1374 if (qdisc_is_percpu_stats(q)) {
1375 cpu_bstats = q->cpu_bstats;
1376 cpu_qstats = q->cpu_qstats;
1377 }
1378
1379 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
1380 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1381 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1382 goto nla_put_failure;
1383
1384 if (gnet_stats_finish_copy(&d) < 0)
1385 goto nla_put_failure;
1386
1387 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1388 return skb->len;
1389
1390out_nlmsg_trim:
1391nla_put_failure:
1392 nlmsg_trim(skb, b);
1393 return -1;
1394}
1395
1396static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1397{
1398 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1399}
1400
1401static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1402 struct nlmsghdr *n, u32 clid,
1403 struct Qdisc *old, struct Qdisc *new)
1404{
1405 struct sk_buff *skb;
1406 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1407
1408 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1409 if (!skb)
1410 return -ENOBUFS;
1411
1412 if (old && !tc_qdisc_dump_ignore(old)) {
1413 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1414 0, RTM_DELQDISC) < 0)
1415 goto err_out;
1416 }
1417 if (new && !tc_qdisc_dump_ignore(new)) {
1418 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1419 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1420 goto err_out;
1421 }
1422
1423 if (skb->len)
1424 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1425 n->nlmsg_flags & NLM_F_ECHO);
1426
1427err_out:
1428 kfree_skb(skb);
1429 return -EINVAL;
1430}
1431
1432static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1433 struct netlink_callback *cb,
1434 int *q_idx_p, int s_q_idx)
1435{
1436 int ret = 0, q_idx = *q_idx_p;
1437 struct Qdisc *q;
1438
1439 if (!root)
1440 return 0;
1441
1442 q = root;
1443 if (q_idx < s_q_idx) {
1444 q_idx++;
1445 } else {
1446 if (!tc_qdisc_dump_ignore(q) &&
1447 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1448 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1449 goto done;
1450 q_idx++;
1451 }
1452 list_for_each_entry(q, &root->list, list) {
1453 if (q_idx < s_q_idx) {
1454 q_idx++;
1455 continue;
1456 }
1457 if (!tc_qdisc_dump_ignore(q) &&
1458 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1459 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1460 goto done;
1461 q_idx++;
1462 }
1463
1464out:
1465 *q_idx_p = q_idx;
1466 return ret;
1467done:
1468 ret = -1;
1469 goto out;
1470}
1471
1472static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1473{
1474 struct net *net = sock_net(skb->sk);
1475 int idx, q_idx;
1476 int s_idx, s_q_idx;
1477 struct net_device *dev;
1478
1479 s_idx = cb->args[0];
1480 s_q_idx = q_idx = cb->args[1];
1481
1482 idx = 0;
1483 ASSERT_RTNL();
1484 for_each_netdev(net, dev) {
1485 struct netdev_queue *dev_queue;
1486
1487 if (idx < s_idx)
1488 goto cont;
1489 if (idx > s_idx)
1490 s_q_idx = 0;
1491 q_idx = 0;
1492
1493 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1494 goto done;
1495
1496 dev_queue = dev_ingress_queue(dev);
1497 if (dev_queue &&
1498 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1499 &q_idx, s_q_idx) < 0)
1500 goto done;
1501
1502cont:
1503 idx++;
1504 }
1505
1506done:
1507 cb->args[0] = idx;
1508 cb->args[1] = q_idx;
1509
1510 return skb->len;
1511}
1512
1513
1514
1515/************************************************
1516 * Traffic classes manipulation. *
1517 ************************************************/
1518
1519
1520
1521static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1522{
1523 struct net *net = sock_net(skb->sk);
1524 struct tcmsg *tcm = nlmsg_data(n);
1525 struct nlattr *tca[TCA_MAX + 1];
1526 struct net_device *dev;
1527 struct Qdisc *q = NULL;
1528 const struct Qdisc_class_ops *cops;
1529 unsigned long cl = 0;
1530 unsigned long new_cl;
1531 u32 portid;
1532 u32 clid;
1533 u32 qid;
1534 int err;
1535
1536 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1537 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1538 return -EPERM;
1539
1540 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1541 if (err < 0)
1542 return err;
1543
1544 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1545 if (!dev)
1546 return -ENODEV;
1547
1548 /*
1549 parent == TC_H_UNSPEC - unspecified parent.
1550 parent == TC_H_ROOT - class is root, which has no parent.
1551 parent == X:0 - parent is root class.
1552 parent == X:Y - parent is a node in hierarchy.
1553 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1554
1555 handle == 0:0 - generate handle from kernel pool.
1556 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1557 handle == X:Y - clear.
1558 handle == X:0 - root class.
1559 */
1560
1561 /* Step 1. Determine qdisc handle X:0 */
1562
1563 portid = tcm->tcm_parent;
1564 clid = tcm->tcm_handle;
1565 qid = TC_H_MAJ(clid);
1566
1567 if (portid != TC_H_ROOT) {
1568 u32 qid1 = TC_H_MAJ(portid);
1569
1570 if (qid && qid1) {
1571 /* If both majors are known, they must be identical. */
1572 if (qid != qid1)
1573 return -EINVAL;
1574 } else if (qid1) {
1575 qid = qid1;
1576 } else if (qid == 0)
1577 qid = dev->qdisc->handle;
1578
1579 /* Now qid is genuine qdisc handle consistent
1580 * both with parent and child.
1581 *
1582 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1583 */
1584 if (portid)
1585 portid = TC_H_MAKE(qid, portid);
1586 } else {
1587 if (qid == 0)
1588 qid = dev->qdisc->handle;
1589 }
1590
1591 /* OK. Locate qdisc */
1592 q = qdisc_lookup(dev, qid);
1593 if (!q)
1594 return -ENOENT;
1595
1596 /* An check that it supports classes */
1597 cops = q->ops->cl_ops;
1598 if (cops == NULL)
1599 return -EINVAL;
1600
1601 /* Now try to get class */
1602 if (clid == 0) {
1603 if (portid == TC_H_ROOT)
1604 clid = qid;
1605 } else
1606 clid = TC_H_MAKE(qid, clid);
1607
1608 if (clid)
1609 cl = cops->get(q, clid);
1610
1611 if (cl == 0) {
1612 err = -ENOENT;
1613 if (n->nlmsg_type != RTM_NEWTCLASS ||
1614 !(n->nlmsg_flags & NLM_F_CREATE))
1615 goto out;
1616 } else {
1617 switch (n->nlmsg_type) {
1618 case RTM_NEWTCLASS:
1619 err = -EEXIST;
1620 if (n->nlmsg_flags & NLM_F_EXCL)
1621 goto out;
1622 break;
1623 case RTM_DELTCLASS:
1624 err = -EOPNOTSUPP;
1625 if (cops->delete)
1626 err = cops->delete(q, cl);
1627 if (err == 0)
1628 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1629 goto out;
1630 case RTM_GETTCLASS:
1631 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1632 goto out;
1633 default:
1634 err = -EINVAL;
1635 goto out;
1636 }
1637 }
1638
1639 new_cl = cl;
1640 err = -EOPNOTSUPP;
1641 if (cops->change)
1642 err = cops->change(q, clid, portid, tca, &new_cl);
1643 if (err == 0)
1644 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1645
1646out:
1647 if (cl)
1648 cops->put(q, cl);
1649
1650 return err;
1651}
1652
1653
1654static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1655 unsigned long cl,
1656 u32 portid, u32 seq, u16 flags, int event)
1657{
1658 struct tcmsg *tcm;
1659 struct nlmsghdr *nlh;
1660 unsigned char *b = skb_tail_pointer(skb);
1661 struct gnet_dump d;
1662 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1663
1664 cond_resched();
1665 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1666 if (!nlh)
1667 goto out_nlmsg_trim;
1668 tcm = nlmsg_data(nlh);
1669 tcm->tcm_family = AF_UNSPEC;
1670 tcm->tcm__pad1 = 0;
1671 tcm->tcm__pad2 = 0;
1672 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1673 tcm->tcm_parent = q->handle;
1674 tcm->tcm_handle = q->handle;
1675 tcm->tcm_info = 0;
1676 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1677 goto nla_put_failure;
1678 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1679 goto nla_put_failure;
1680
1681 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1682 qdisc_root_sleeping_lock(q), &d) < 0)
1683 goto nla_put_failure;
1684
1685 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1686 goto nla_put_failure;
1687
1688 if (gnet_stats_finish_copy(&d) < 0)
1689 goto nla_put_failure;
1690
1691 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1692 return skb->len;
1693
1694out_nlmsg_trim:
1695nla_put_failure:
1696 nlmsg_trim(skb, b);
1697 return -1;
1698}
1699
1700static int tclass_notify(struct net *net, struct sk_buff *oskb,
1701 struct nlmsghdr *n, struct Qdisc *q,
1702 unsigned long cl, int event)
1703{
1704 struct sk_buff *skb;
1705 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1706
1707 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1708 if (!skb)
1709 return -ENOBUFS;
1710
1711 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1712 kfree_skb(skb);
1713 return -EINVAL;
1714 }
1715
1716 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1717 n->nlmsg_flags & NLM_F_ECHO);
1718}
1719
1720struct qdisc_dump_args {
1721 struct qdisc_walker w;
1722 struct sk_buff *skb;
1723 struct netlink_callback *cb;
1724};
1725
1726static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1727{
1728 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1729
1730 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1731 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1732}
1733
1734static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1735 struct tcmsg *tcm, struct netlink_callback *cb,
1736 int *t_p, int s_t)
1737{
1738 struct qdisc_dump_args arg;
1739
1740 if (tc_qdisc_dump_ignore(q) ||
1741 *t_p < s_t || !q->ops->cl_ops ||
1742 (tcm->tcm_parent &&
1743 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1744 (*t_p)++;
1745 return 0;
1746 }
1747 if (*t_p > s_t)
1748 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1749 arg.w.fn = qdisc_class_dump;
1750 arg.skb = skb;
1751 arg.cb = cb;
1752 arg.w.stop = 0;
1753 arg.w.skip = cb->args[1];
1754 arg.w.count = 0;
1755 q->ops->cl_ops->walk(q, &arg.w);
1756 cb->args[1] = arg.w.count;
1757 if (arg.w.stop)
1758 return -1;
1759 (*t_p)++;
1760 return 0;
1761}
1762
1763static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1764 struct tcmsg *tcm, struct netlink_callback *cb,
1765 int *t_p, int s_t)
1766{
1767 struct Qdisc *q;
1768
1769 if (!root)
1770 return 0;
1771
1772 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1773 return -1;
1774
1775 list_for_each_entry(q, &root->list, list) {
1776 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1777 return -1;
1778 }
1779
1780 return 0;
1781}
1782
1783static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1784{
1785 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1786 struct net *net = sock_net(skb->sk);
1787 struct netdev_queue *dev_queue;
1788 struct net_device *dev;
1789 int t, s_t;
1790
1791 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1792 return 0;
1793 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1794 if (!dev)
1795 return 0;
1796
1797 s_t = cb->args[0];
1798 t = 0;
1799
1800 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1801 goto done;
1802
1803 dev_queue = dev_ingress_queue(dev);
1804 if (dev_queue &&
1805 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1806 &t, s_t) < 0)
1807 goto done;
1808
1809done:
1810 cb->args[0] = t;
1811
1812 dev_put(dev);
1813 return skb->len;
1814}
1815
1816/* Main classifier routine: scans classifier chain attached
1817 * to this qdisc, (optionally) tests for protocol and asks
1818 * specific classifiers.
1819 */
1820int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1821 struct tcf_result *res, bool compat_mode)
1822{
1823 __be16 protocol = tc_skb_protocol(skb);
1824#ifdef CONFIG_NET_CLS_ACT
1825 const struct tcf_proto *old_tp = tp;
1826 int limit = 0;
1827
1828reclassify:
1829#endif
1830 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1831 int err;
1832
1833 if (tp->protocol != protocol &&
1834 tp->protocol != htons(ETH_P_ALL))
1835 continue;
1836
1837 err = tp->classify(skb, tp, res);
1838#ifdef CONFIG_NET_CLS_ACT
1839 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
1840 goto reset;
1841#endif
1842 if (err >= 0)
1843 return err;
1844 }
1845
1846 return TC_ACT_UNSPEC; /* signal: continue lookup */
1847#ifdef CONFIG_NET_CLS_ACT
1848reset:
1849 if (unlikely(limit++ >= MAX_REC_LOOP)) {
1850 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1851 tp->q->ops->id, tp->prio & 0xffff,
1852 ntohs(tp->protocol));
1853 return TC_ACT_SHOT;
1854 }
1855
1856 tp = old_tp;
1857 protocol = tc_skb_protocol(skb);
1858 goto reclassify;
1859#endif
1860}
1861EXPORT_SYMBOL(tc_classify);
1862
1863bool tcf_destroy(struct tcf_proto *tp, bool force)
1864{
1865 if (tp->ops->destroy(tp, force)) {
1866 module_put(tp->ops->owner);
1867 kfree_rcu(tp, rcu);
1868 return true;
1869 }
1870
1871 return false;
1872}
1873
1874void tcf_destroy_chain(struct tcf_proto __rcu **fl)
1875{
1876 struct tcf_proto *tp;
1877
1878 while ((tp = rtnl_dereference(*fl)) != NULL) {
1879 RCU_INIT_POINTER(*fl, tp->next);
1880 tcf_destroy(tp, true);
1881 }
1882}
1883EXPORT_SYMBOL(tcf_destroy_chain);
1884
1885#ifdef CONFIG_PROC_FS
1886static int psched_show(struct seq_file *seq, void *v)
1887{
1888 seq_printf(seq, "%08x %08x %08x %08x\n",
1889 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1890 1000000,
1891 (u32)NSEC_PER_SEC / hrtimer_resolution);
1892
1893 return 0;
1894}
1895
1896static int psched_open(struct inode *inode, struct file *file)
1897{
1898 return single_open(file, psched_show, NULL);
1899}
1900
1901static const struct file_operations psched_fops = {
1902 .owner = THIS_MODULE,
1903 .open = psched_open,
1904 .read = seq_read,
1905 .llseek = seq_lseek,
1906 .release = single_release,
1907};
1908
1909static int __net_init psched_net_init(struct net *net)
1910{
1911 struct proc_dir_entry *e;
1912
1913 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1914 if (e == NULL)
1915 return -ENOMEM;
1916
1917 return 0;
1918}
1919
1920static void __net_exit psched_net_exit(struct net *net)
1921{
1922 remove_proc_entry("psched", net->proc_net);
1923}
1924#else
1925static int __net_init psched_net_init(struct net *net)
1926{
1927 return 0;
1928}
1929
1930static void __net_exit psched_net_exit(struct net *net)
1931{
1932}
1933#endif
1934
1935static struct pernet_operations psched_net_ops = {
1936 .init = psched_net_init,
1937 .exit = psched_net_exit,
1938};
1939
1940static int __init pktsched_init(void)
1941{
1942 int err;
1943
1944 err = register_pernet_subsys(&psched_net_ops);
1945 if (err) {
1946 pr_err("pktsched_init: "
1947 "cannot initialize per netns operations\n");
1948 return err;
1949 }
1950
1951 register_qdisc(&pfifo_fast_ops);
1952 register_qdisc(&pfifo_qdisc_ops);
1953 register_qdisc(&bfifo_qdisc_ops);
1954 register_qdisc(&pfifo_head_drop_qdisc_ops);
1955 register_qdisc(&mq_qdisc_ops);
1956 register_qdisc(&noqueue_qdisc_ops);
1957
1958 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1959 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1960 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1961 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1962 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1963 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1964
1965 return 0;
1966}
1967
1968subsys_initcall(pktsched_init);
1/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/skbuff.h>
24#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
29#include <linux/hrtimer.h>
30#include <linux/lockdep.h>
31#include <linux/slab.h>
32
33#include <net/net_namespace.h>
34#include <net/sock.h>
35#include <net/netlink.h>
36#include <net/pkt_sched.h>
37
38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
40 struct Qdisc *old, struct Qdisc *new);
41static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
44
45/*
46
47 Short review.
48 -------------
49
50 This file consists of two interrelated parts:
51
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
54
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
59
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
64
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
67
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
73
74 All real intelligent work is done inside qdisc modules.
75
76
77
78 Every discipline has two major routines: enqueue and dequeue.
79
80 ---dequeue
81
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
88
89 ---enqueue
90
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
93 not zero error code.
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100
101 Auxiliary routines:
102
103 ---peek
104
105 like dequeue but without removing a packet from the queue
106
107 ---reset
108
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
111
112 ---init
113
114 initializes newly created qdisc.
115
116 ---destroy
117
118 destroys resources allocated by init and during lifetime of qdisc.
119
120 ---change
121
122 changes qdisc parameters.
123 */
124
125/* Protects list of registered TC modules. It is pure SMP lock. */
126static DEFINE_RWLOCK(qdisc_mod_lock);
127
128
129/************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
132
133
134/* The list of all installed queueing disciplines. */
135
136static struct Qdisc_ops *qdisc_base;
137
138/* Register/unregister queueing discipline */
139
140int register_qdisc(struct Qdisc_ops *qops)
141{
142 struct Qdisc_ops *q, **qp;
143 int rc = -EEXIST;
144
145 write_lock(&qdisc_mod_lock);
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 if (!strcmp(qops->id, q->id))
148 goto out;
149
150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue;
152 if (qops->peek == NULL) {
153 if (qops->dequeue == NULL)
154 qops->peek = noop_qdisc_ops.peek;
155 else
156 goto out_einval;
157 }
158 if (qops->dequeue == NULL)
159 qops->dequeue = noop_qdisc_ops.dequeue;
160
161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
171 qops->next = NULL;
172 *qp = qops;
173 rc = 0;
174out:
175 write_unlock(&qdisc_mod_lock);
176 return rc;
177
178out_einval:
179 rc = -EINVAL;
180 goto out;
181}
182EXPORT_SYMBOL(register_qdisc);
183
184int unregister_qdisc(struct Qdisc_ops *qops)
185{
186 struct Qdisc_ops *q, **qp;
187 int err = -ENOENT;
188
189 write_lock(&qdisc_mod_lock);
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
191 if (q == qops)
192 break;
193 if (q) {
194 *qp = q->next;
195 q->next = NULL;
196 err = 0;
197 }
198 write_unlock(&qdisc_mod_lock);
199 return err;
200}
201EXPORT_SYMBOL(unregister_qdisc);
202
203/* Get default qdisc if not otherwise specified */
204void qdisc_get_default(char *name, size_t len)
205{
206 read_lock(&qdisc_mod_lock);
207 strlcpy(name, default_qdisc_ops->id, len);
208 read_unlock(&qdisc_mod_lock);
209}
210
211static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212{
213 struct Qdisc_ops *q = NULL;
214
215 for (q = qdisc_base; q; q = q->next) {
216 if (!strcmp(name, q->id)) {
217 if (!try_module_get(q->owner))
218 q = NULL;
219 break;
220 }
221 }
222
223 return q;
224}
225
226/* Set new default qdisc to use */
227int qdisc_set_default(const char *name)
228{
229 const struct Qdisc_ops *ops;
230
231 if (!capable(CAP_NET_ADMIN))
232 return -EPERM;
233
234 write_lock(&qdisc_mod_lock);
235 ops = qdisc_lookup_default(name);
236 if (!ops) {
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock);
239 request_module("sch_%s", name);
240 write_lock(&qdisc_mod_lock);
241
242 ops = qdisc_lookup_default(name);
243 }
244
245 if (ops) {
246 /* Set new default */
247 module_put(default_qdisc_ops->owner);
248 default_qdisc_ops = ops;
249 }
250 write_unlock(&qdisc_mod_lock);
251
252 return ops ? 0 : -ENOENT;
253}
254
255/* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.)
257 */
258
259static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
260{
261 struct Qdisc *q;
262
263 if (!(root->flags & TCQ_F_BUILTIN) &&
264 root->handle == handle)
265 return root;
266
267 list_for_each_entry(q, &root->list, list) {
268 if (q->handle == handle)
269 return q;
270 }
271 return NULL;
272}
273
274void qdisc_list_add(struct Qdisc *q)
275{
276 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
277 struct Qdisc *root = qdisc_dev(q)->qdisc;
278
279 WARN_ON_ONCE(root == &noop_qdisc);
280 list_add_tail(&q->list, &root->list);
281 }
282}
283EXPORT_SYMBOL(qdisc_list_add);
284
285void qdisc_list_del(struct Qdisc *q)
286{
287 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
288 list_del(&q->list);
289}
290EXPORT_SYMBOL(qdisc_list_del);
291
292struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
293{
294 struct Qdisc *q;
295
296 q = qdisc_match_from_root(dev->qdisc, handle);
297 if (q)
298 goto out;
299
300 if (dev_ingress_queue(dev))
301 q = qdisc_match_from_root(
302 dev_ingress_queue(dev)->qdisc_sleeping,
303 handle);
304out:
305 return q;
306}
307
308static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
309{
310 unsigned long cl;
311 struct Qdisc *leaf;
312 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
313
314 if (cops == NULL)
315 return NULL;
316 cl = cops->get(p, classid);
317
318 if (cl == 0)
319 return NULL;
320 leaf = cops->leaf(p, cl);
321 cops->put(p, cl);
322 return leaf;
323}
324
325/* Find queueing discipline by name */
326
327static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
328{
329 struct Qdisc_ops *q = NULL;
330
331 if (kind) {
332 read_lock(&qdisc_mod_lock);
333 for (q = qdisc_base; q; q = q->next) {
334 if (nla_strcmp(kind, q->id) == 0) {
335 if (!try_module_get(q->owner))
336 q = NULL;
337 break;
338 }
339 }
340 read_unlock(&qdisc_mod_lock);
341 }
342 return q;
343}
344
345/* The linklayer setting were not transferred from iproute2, in older
346 * versions, and the rate tables lookup systems have been dropped in
347 * the kernel. To keep backward compatible with older iproute2 tc
348 * utils, we detect the linklayer setting by detecting if the rate
349 * table were modified.
350 *
351 * For linklayer ATM table entries, the rate table will be aligned to
352 * 48 bytes, thus some table entries will contain the same value. The
353 * mpu (min packet unit) is also encoded into the old rate table, thus
354 * starting from the mpu, we find low and high table entries for
355 * mapping this cell. If these entries contain the same value, when
356 * the rate tables have been modified for linklayer ATM.
357 *
358 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
359 * and then roundup to the next cell, calc the table entry one below,
360 * and compare.
361 */
362static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
363{
364 int low = roundup(r->mpu, 48);
365 int high = roundup(low+1, 48);
366 int cell_low = low >> r->cell_log;
367 int cell_high = (high >> r->cell_log) - 1;
368
369 /* rtab is too inaccurate at rates > 100Mbit/s */
370 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
371 pr_debug("TC linklayer: Giving up ATM detection\n");
372 return TC_LINKLAYER_ETHERNET;
373 }
374
375 if ((cell_high > cell_low) && (cell_high < 256)
376 && (rtab[cell_low] == rtab[cell_high])) {
377 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
378 cell_low, cell_high, rtab[cell_high]);
379 return TC_LINKLAYER_ATM;
380 }
381 return TC_LINKLAYER_ETHERNET;
382}
383
384static struct qdisc_rate_table *qdisc_rtab_list;
385
386struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
387{
388 struct qdisc_rate_table *rtab;
389
390 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
391 nla_len(tab) != TC_RTAB_SIZE)
392 return NULL;
393
394 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
395 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
396 !memcmp(&rtab->data, nla_data(tab), 1024)) {
397 rtab->refcnt++;
398 return rtab;
399 }
400 }
401
402 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
403 if (rtab) {
404 rtab->rate = *r;
405 rtab->refcnt = 1;
406 memcpy(rtab->data, nla_data(tab), 1024);
407 if (r->linklayer == TC_LINKLAYER_UNAWARE)
408 r->linklayer = __detect_linklayer(r, rtab->data);
409 rtab->next = qdisc_rtab_list;
410 qdisc_rtab_list = rtab;
411 }
412 return rtab;
413}
414EXPORT_SYMBOL(qdisc_get_rtab);
415
416void qdisc_put_rtab(struct qdisc_rate_table *tab)
417{
418 struct qdisc_rate_table *rtab, **rtabp;
419
420 if (!tab || --tab->refcnt)
421 return;
422
423 for (rtabp = &qdisc_rtab_list;
424 (rtab = *rtabp) != NULL;
425 rtabp = &rtab->next) {
426 if (rtab == tab) {
427 *rtabp = rtab->next;
428 kfree(rtab);
429 return;
430 }
431 }
432}
433EXPORT_SYMBOL(qdisc_put_rtab);
434
435static LIST_HEAD(qdisc_stab_list);
436static DEFINE_SPINLOCK(qdisc_stab_lock);
437
438static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
439 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
440 [TCA_STAB_DATA] = { .type = NLA_BINARY },
441};
442
443static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
444{
445 struct nlattr *tb[TCA_STAB_MAX + 1];
446 struct qdisc_size_table *stab;
447 struct tc_sizespec *s;
448 unsigned int tsize = 0;
449 u16 *tab = NULL;
450 int err;
451
452 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
453 if (err < 0)
454 return ERR_PTR(err);
455 if (!tb[TCA_STAB_BASE])
456 return ERR_PTR(-EINVAL);
457
458 s = nla_data(tb[TCA_STAB_BASE]);
459
460 if (s->tsize > 0) {
461 if (!tb[TCA_STAB_DATA])
462 return ERR_PTR(-EINVAL);
463 tab = nla_data(tb[TCA_STAB_DATA]);
464 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
465 }
466
467 if (tsize != s->tsize || (!tab && tsize > 0))
468 return ERR_PTR(-EINVAL);
469
470 spin_lock(&qdisc_stab_lock);
471
472 list_for_each_entry(stab, &qdisc_stab_list, list) {
473 if (memcmp(&stab->szopts, s, sizeof(*s)))
474 continue;
475 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
476 continue;
477 stab->refcnt++;
478 spin_unlock(&qdisc_stab_lock);
479 return stab;
480 }
481
482 spin_unlock(&qdisc_stab_lock);
483
484 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
485 if (!stab)
486 return ERR_PTR(-ENOMEM);
487
488 stab->refcnt = 1;
489 stab->szopts = *s;
490 if (tsize > 0)
491 memcpy(stab->data, tab, tsize * sizeof(u16));
492
493 spin_lock(&qdisc_stab_lock);
494 list_add_tail(&stab->list, &qdisc_stab_list);
495 spin_unlock(&qdisc_stab_lock);
496
497 return stab;
498}
499
500static void stab_kfree_rcu(struct rcu_head *head)
501{
502 kfree(container_of(head, struct qdisc_size_table, rcu));
503}
504
505void qdisc_put_stab(struct qdisc_size_table *tab)
506{
507 if (!tab)
508 return;
509
510 spin_lock(&qdisc_stab_lock);
511
512 if (--tab->refcnt == 0) {
513 list_del(&tab->list);
514 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
515 }
516
517 spin_unlock(&qdisc_stab_lock);
518}
519EXPORT_SYMBOL(qdisc_put_stab);
520
521static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
522{
523 struct nlattr *nest;
524
525 nest = nla_nest_start(skb, TCA_STAB);
526 if (nest == NULL)
527 goto nla_put_failure;
528 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
529 goto nla_put_failure;
530 nla_nest_end(skb, nest);
531
532 return skb->len;
533
534nla_put_failure:
535 return -1;
536}
537
538void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
539{
540 int pkt_len, slot;
541
542 pkt_len = skb->len + stab->szopts.overhead;
543 if (unlikely(!stab->szopts.tsize))
544 goto out;
545
546 slot = pkt_len + stab->szopts.cell_align;
547 if (unlikely(slot < 0))
548 slot = 0;
549
550 slot >>= stab->szopts.cell_log;
551 if (likely(slot < stab->szopts.tsize))
552 pkt_len = stab->data[slot];
553 else
554 pkt_len = stab->data[stab->szopts.tsize - 1] *
555 (slot / stab->szopts.tsize) +
556 stab->data[slot % stab->szopts.tsize];
557
558 pkt_len <<= stab->szopts.size_log;
559out:
560 if (unlikely(pkt_len < 1))
561 pkt_len = 1;
562 qdisc_skb_cb(skb)->pkt_len = pkt_len;
563}
564EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
565
566void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
567{
568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
570 txt, qdisc->ops->id, qdisc->handle >> 16);
571 qdisc->flags |= TCQ_F_WARN_NONWC;
572 }
573}
574EXPORT_SYMBOL(qdisc_warn_nonwc);
575
576static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
577{
578 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
579 timer);
580
581 qdisc_unthrottled(wd->qdisc);
582 __netif_schedule(qdisc_root(wd->qdisc));
583
584 return HRTIMER_NORESTART;
585}
586
587void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
588{
589 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
590 wd->timer.function = qdisc_watchdog;
591 wd->qdisc = qdisc;
592}
593EXPORT_SYMBOL(qdisc_watchdog_init);
594
595void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
596{
597 if (test_bit(__QDISC_STATE_DEACTIVATED,
598 &qdisc_root_sleeping(wd->qdisc)->state))
599 return;
600
601 qdisc_throttled(wd->qdisc);
602
603 hrtimer_start(&wd->timer,
604 ns_to_ktime(expires),
605 HRTIMER_MODE_ABS);
606}
607EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
608
609void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
610{
611 hrtimer_cancel(&wd->timer);
612 qdisc_unthrottled(wd->qdisc);
613}
614EXPORT_SYMBOL(qdisc_watchdog_cancel);
615
616static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
617{
618 unsigned int size = n * sizeof(struct hlist_head), i;
619 struct hlist_head *h;
620
621 if (size <= PAGE_SIZE)
622 h = kmalloc(size, GFP_KERNEL);
623 else
624 h = (struct hlist_head *)
625 __get_free_pages(GFP_KERNEL, get_order(size));
626
627 if (h != NULL) {
628 for (i = 0; i < n; i++)
629 INIT_HLIST_HEAD(&h[i]);
630 }
631 return h;
632}
633
634static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
635{
636 unsigned int size = n * sizeof(struct hlist_head);
637
638 if (size <= PAGE_SIZE)
639 kfree(h);
640 else
641 free_pages((unsigned long)h, get_order(size));
642}
643
644void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
645{
646 struct Qdisc_class_common *cl;
647 struct hlist_node *next;
648 struct hlist_head *nhash, *ohash;
649 unsigned int nsize, nmask, osize;
650 unsigned int i, h;
651
652 /* Rehash when load factor exceeds 0.75 */
653 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
654 return;
655 nsize = clhash->hashsize * 2;
656 nmask = nsize - 1;
657 nhash = qdisc_class_hash_alloc(nsize);
658 if (nhash == NULL)
659 return;
660
661 ohash = clhash->hash;
662 osize = clhash->hashsize;
663
664 sch_tree_lock(sch);
665 for (i = 0; i < osize; i++) {
666 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
667 h = qdisc_class_hash(cl->classid, nmask);
668 hlist_add_head(&cl->hnode, &nhash[h]);
669 }
670 }
671 clhash->hash = nhash;
672 clhash->hashsize = nsize;
673 clhash->hashmask = nmask;
674 sch_tree_unlock(sch);
675
676 qdisc_class_hash_free(ohash, osize);
677}
678EXPORT_SYMBOL(qdisc_class_hash_grow);
679
680int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
681{
682 unsigned int size = 4;
683
684 clhash->hash = qdisc_class_hash_alloc(size);
685 if (clhash->hash == NULL)
686 return -ENOMEM;
687 clhash->hashsize = size;
688 clhash->hashmask = size - 1;
689 clhash->hashelems = 0;
690 return 0;
691}
692EXPORT_SYMBOL(qdisc_class_hash_init);
693
694void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
695{
696 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
697}
698EXPORT_SYMBOL(qdisc_class_hash_destroy);
699
700void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
701 struct Qdisc_class_common *cl)
702{
703 unsigned int h;
704
705 INIT_HLIST_NODE(&cl->hnode);
706 h = qdisc_class_hash(cl->classid, clhash->hashmask);
707 hlist_add_head(&cl->hnode, &clhash->hash[h]);
708 clhash->hashelems++;
709}
710EXPORT_SYMBOL(qdisc_class_hash_insert);
711
712void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
713 struct Qdisc_class_common *cl)
714{
715 hlist_del(&cl->hnode);
716 clhash->hashelems--;
717}
718EXPORT_SYMBOL(qdisc_class_hash_remove);
719
720/* Allocate an unique handle from space managed by kernel
721 * Possible range is [8000-FFFF]:0000 (0x8000 values)
722 */
723static u32 qdisc_alloc_handle(struct net_device *dev)
724{
725 int i = 0x8000;
726 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
727
728 do {
729 autohandle += TC_H_MAKE(0x10000U, 0);
730 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
731 autohandle = TC_H_MAKE(0x80000000U, 0);
732 if (!qdisc_lookup(dev, autohandle))
733 return autohandle;
734 cond_resched();
735 } while (--i > 0);
736
737 return 0;
738}
739
740void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
741{
742 const struct Qdisc_class_ops *cops;
743 unsigned long cl;
744 u32 parentid;
745 int drops;
746
747 if (n == 0)
748 return;
749 drops = max_t(int, n, 0);
750 while ((parentid = sch->parent)) {
751 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
752 return;
753
754 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
755 if (sch == NULL) {
756 WARN_ON(parentid != TC_H_ROOT);
757 return;
758 }
759 cops = sch->ops->cl_ops;
760 if (cops->qlen_notify) {
761 cl = cops->get(sch, parentid);
762 cops->qlen_notify(sch, cl);
763 cops->put(sch, cl);
764 }
765 sch->q.qlen -= n;
766 sch->qstats.drops += drops;
767 }
768}
769EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
770
771static void notify_and_destroy(struct net *net, struct sk_buff *skb,
772 struct nlmsghdr *n, u32 clid,
773 struct Qdisc *old, struct Qdisc *new)
774{
775 if (new || old)
776 qdisc_notify(net, skb, n, clid, old, new);
777
778 if (old)
779 qdisc_destroy(old);
780}
781
782/* Graft qdisc "new" to class "classid" of qdisc "parent" or
783 * to device "dev".
784 *
785 * When appropriate send a netlink notification using 'skb'
786 * and "n".
787 *
788 * On success, destroy old qdisc.
789 */
790
791static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
792 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
793 struct Qdisc *new, struct Qdisc *old)
794{
795 struct Qdisc *q = old;
796 struct net *net = dev_net(dev);
797 int err = 0;
798
799 if (parent == NULL) {
800 unsigned int i, num_q, ingress;
801
802 ingress = 0;
803 num_q = dev->num_tx_queues;
804 if ((q && q->flags & TCQ_F_INGRESS) ||
805 (new && new->flags & TCQ_F_INGRESS)) {
806 num_q = 1;
807 ingress = 1;
808 if (!dev_ingress_queue(dev))
809 return -ENOENT;
810 }
811
812 if (dev->flags & IFF_UP)
813 dev_deactivate(dev);
814
815 if (new && new->ops->attach) {
816 new->ops->attach(new);
817 num_q = 0;
818 }
819
820 for (i = 0; i < num_q; i++) {
821 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
822
823 if (!ingress)
824 dev_queue = netdev_get_tx_queue(dev, i);
825
826 old = dev_graft_qdisc(dev_queue, new);
827 if (new && i > 0)
828 atomic_inc(&new->refcnt);
829
830 if (!ingress)
831 qdisc_destroy(old);
832 }
833
834 if (!ingress) {
835 notify_and_destroy(net, skb, n, classid,
836 dev->qdisc, new);
837 if (new && !new->ops->attach)
838 atomic_inc(&new->refcnt);
839 dev->qdisc = new ? : &noop_qdisc;
840 } else {
841 notify_and_destroy(net, skb, n, classid, old, new);
842 }
843
844 if (dev->flags & IFF_UP)
845 dev_activate(dev);
846 } else {
847 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
848
849 err = -EOPNOTSUPP;
850 if (cops && cops->graft) {
851 unsigned long cl = cops->get(parent, classid);
852 if (cl) {
853 err = cops->graft(parent, cl, new, &old);
854 cops->put(parent, cl);
855 } else
856 err = -ENOENT;
857 }
858 if (!err)
859 notify_and_destroy(net, skb, n, classid, old, new);
860 }
861 return err;
862}
863
864/* lockdep annotation is needed for ingress; egress gets it only for name */
865static struct lock_class_key qdisc_tx_lock;
866static struct lock_class_key qdisc_rx_lock;
867
868/*
869 Allocate and initialize new qdisc.
870
871 Parameters are passed via opt.
872 */
873
874static struct Qdisc *
875qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
876 struct Qdisc *p, u32 parent, u32 handle,
877 struct nlattr **tca, int *errp)
878{
879 int err;
880 struct nlattr *kind = tca[TCA_KIND];
881 struct Qdisc *sch;
882 struct Qdisc_ops *ops;
883 struct qdisc_size_table *stab;
884
885 ops = qdisc_lookup_ops(kind);
886#ifdef CONFIG_MODULES
887 if (ops == NULL && kind != NULL) {
888 char name[IFNAMSIZ];
889 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
890 /* We dropped the RTNL semaphore in order to
891 * perform the module load. So, even if we
892 * succeeded in loading the module we have to
893 * tell the caller to replay the request. We
894 * indicate this using -EAGAIN.
895 * We replay the request because the device may
896 * go away in the mean time.
897 */
898 rtnl_unlock();
899 request_module("sch_%s", name);
900 rtnl_lock();
901 ops = qdisc_lookup_ops(kind);
902 if (ops != NULL) {
903 /* We will try again qdisc_lookup_ops,
904 * so don't keep a reference.
905 */
906 module_put(ops->owner);
907 err = -EAGAIN;
908 goto err_out;
909 }
910 }
911 }
912#endif
913
914 err = -ENOENT;
915 if (ops == NULL)
916 goto err_out;
917
918 sch = qdisc_alloc(dev_queue, ops);
919 if (IS_ERR(sch)) {
920 err = PTR_ERR(sch);
921 goto err_out2;
922 }
923
924 sch->parent = parent;
925
926 if (handle == TC_H_INGRESS) {
927 sch->flags |= TCQ_F_INGRESS;
928 handle = TC_H_MAKE(TC_H_INGRESS, 0);
929 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
930 } else {
931 if (handle == 0) {
932 handle = qdisc_alloc_handle(dev);
933 err = -ENOMEM;
934 if (handle == 0)
935 goto err_out3;
936 }
937 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
938 if (!netif_is_multiqueue(dev))
939 sch->flags |= TCQ_F_ONETXQUEUE;
940 }
941
942 sch->handle = handle;
943
944 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
945 if (tca[TCA_STAB]) {
946 stab = qdisc_get_stab(tca[TCA_STAB]);
947 if (IS_ERR(stab)) {
948 err = PTR_ERR(stab);
949 goto err_out4;
950 }
951 rcu_assign_pointer(sch->stab, stab);
952 }
953 if (tca[TCA_RATE]) {
954 spinlock_t *root_lock;
955
956 err = -EOPNOTSUPP;
957 if (sch->flags & TCQ_F_MQROOT)
958 goto err_out4;
959
960 if ((sch->parent != TC_H_ROOT) &&
961 !(sch->flags & TCQ_F_INGRESS) &&
962 (!p || !(p->flags & TCQ_F_MQROOT)))
963 root_lock = qdisc_root_sleeping_lock(sch);
964 else
965 root_lock = qdisc_lock(sch);
966
967 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
968 root_lock, tca[TCA_RATE]);
969 if (err)
970 goto err_out4;
971 }
972
973 qdisc_list_add(sch);
974
975 return sch;
976 }
977err_out3:
978 dev_put(dev);
979 kfree((char *) sch - sch->padded);
980err_out2:
981 module_put(ops->owner);
982err_out:
983 *errp = err;
984 return NULL;
985
986err_out4:
987 /*
988 * Any broken qdiscs that would require a ops->reset() here?
989 * The qdisc was never in action so it shouldn't be necessary.
990 */
991 qdisc_put_stab(rtnl_dereference(sch->stab));
992 if (ops->destroy)
993 ops->destroy(sch);
994 goto err_out3;
995}
996
997static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
998{
999 struct qdisc_size_table *ostab, *stab = NULL;
1000 int err = 0;
1001
1002 if (tca[TCA_OPTIONS]) {
1003 if (sch->ops->change == NULL)
1004 return -EINVAL;
1005 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1006 if (err)
1007 return err;
1008 }
1009
1010 if (tca[TCA_STAB]) {
1011 stab = qdisc_get_stab(tca[TCA_STAB]);
1012 if (IS_ERR(stab))
1013 return PTR_ERR(stab);
1014 }
1015
1016 ostab = rtnl_dereference(sch->stab);
1017 rcu_assign_pointer(sch->stab, stab);
1018 qdisc_put_stab(ostab);
1019
1020 if (tca[TCA_RATE]) {
1021 /* NB: ignores errors from replace_estimator
1022 because change can't be undone. */
1023 if (sch->flags & TCQ_F_MQROOT)
1024 goto out;
1025 gen_replace_estimator(&sch->bstats, &sch->rate_est,
1026 qdisc_root_sleeping_lock(sch),
1027 tca[TCA_RATE]);
1028 }
1029out:
1030 return 0;
1031}
1032
1033struct check_loop_arg {
1034 struct qdisc_walker w;
1035 struct Qdisc *p;
1036 int depth;
1037};
1038
1039static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1040
1041static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1042{
1043 struct check_loop_arg arg;
1044
1045 if (q->ops->cl_ops == NULL)
1046 return 0;
1047
1048 arg.w.stop = arg.w.skip = arg.w.count = 0;
1049 arg.w.fn = check_loop_fn;
1050 arg.depth = depth;
1051 arg.p = p;
1052 q->ops->cl_ops->walk(q, &arg.w);
1053 return arg.w.stop ? -ELOOP : 0;
1054}
1055
1056static int
1057check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1058{
1059 struct Qdisc *leaf;
1060 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1061 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1062
1063 leaf = cops->leaf(q, cl);
1064 if (leaf) {
1065 if (leaf == arg->p || arg->depth > 7)
1066 return -ELOOP;
1067 return check_loop(leaf, arg->p, arg->depth + 1);
1068 }
1069 return 0;
1070}
1071
1072/*
1073 * Delete/get qdisc.
1074 */
1075
1076static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1077{
1078 struct net *net = sock_net(skb->sk);
1079 struct tcmsg *tcm = nlmsg_data(n);
1080 struct nlattr *tca[TCA_MAX + 1];
1081 struct net_device *dev;
1082 u32 clid;
1083 struct Qdisc *q = NULL;
1084 struct Qdisc *p = NULL;
1085 int err;
1086
1087 if ((n->nlmsg_type != RTM_GETQDISC) && !netlink_capable(skb, CAP_NET_ADMIN))
1088 return -EPERM;
1089
1090 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1091 if (err < 0)
1092 return err;
1093
1094 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1095 if (!dev)
1096 return -ENODEV;
1097
1098 clid = tcm->tcm_parent;
1099 if (clid) {
1100 if (clid != TC_H_ROOT) {
1101 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1102 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1103 if (!p)
1104 return -ENOENT;
1105 q = qdisc_leaf(p, clid);
1106 } else if (dev_ingress_queue(dev)) {
1107 q = dev_ingress_queue(dev)->qdisc_sleeping;
1108 }
1109 } else {
1110 q = dev->qdisc;
1111 }
1112 if (!q)
1113 return -ENOENT;
1114
1115 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1116 return -EINVAL;
1117 } else {
1118 q = qdisc_lookup(dev, tcm->tcm_handle);
1119 if (!q)
1120 return -ENOENT;
1121 }
1122
1123 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1124 return -EINVAL;
1125
1126 if (n->nlmsg_type == RTM_DELQDISC) {
1127 if (!clid)
1128 return -EINVAL;
1129 if (q->handle == 0)
1130 return -ENOENT;
1131 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1132 if (err != 0)
1133 return err;
1134 } else {
1135 qdisc_notify(net, skb, n, clid, NULL, q);
1136 }
1137 return 0;
1138}
1139
1140/*
1141 * Create/change qdisc.
1142 */
1143
1144static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1145{
1146 struct net *net = sock_net(skb->sk);
1147 struct tcmsg *tcm;
1148 struct nlattr *tca[TCA_MAX + 1];
1149 struct net_device *dev;
1150 u32 clid;
1151 struct Qdisc *q, *p;
1152 int err;
1153
1154 if (!netlink_capable(skb, CAP_NET_ADMIN))
1155 return -EPERM;
1156
1157replay:
1158 /* Reinit, just in case something touches this. */
1159 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1160 if (err < 0)
1161 return err;
1162
1163 tcm = nlmsg_data(n);
1164 clid = tcm->tcm_parent;
1165 q = p = NULL;
1166
1167 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1168 if (!dev)
1169 return -ENODEV;
1170
1171
1172 if (clid) {
1173 if (clid != TC_H_ROOT) {
1174 if (clid != TC_H_INGRESS) {
1175 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1176 if (!p)
1177 return -ENOENT;
1178 q = qdisc_leaf(p, clid);
1179 } else if (dev_ingress_queue_create(dev)) {
1180 q = dev_ingress_queue(dev)->qdisc_sleeping;
1181 }
1182 } else {
1183 q = dev->qdisc;
1184 }
1185
1186 /* It may be default qdisc, ignore it */
1187 if (q && q->handle == 0)
1188 q = NULL;
1189
1190 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1191 if (tcm->tcm_handle) {
1192 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1193 return -EEXIST;
1194 if (TC_H_MIN(tcm->tcm_handle))
1195 return -EINVAL;
1196 q = qdisc_lookup(dev, tcm->tcm_handle);
1197 if (!q)
1198 goto create_n_graft;
1199 if (n->nlmsg_flags & NLM_F_EXCL)
1200 return -EEXIST;
1201 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1202 return -EINVAL;
1203 if (q == p ||
1204 (p && check_loop(q, p, 0)))
1205 return -ELOOP;
1206 atomic_inc(&q->refcnt);
1207 goto graft;
1208 } else {
1209 if (!q)
1210 goto create_n_graft;
1211
1212 /* This magic test requires explanation.
1213 *
1214 * We know, that some child q is already
1215 * attached to this parent and have choice:
1216 * either to change it or to create/graft new one.
1217 *
1218 * 1. We are allowed to create/graft only
1219 * if CREATE and REPLACE flags are set.
1220 *
1221 * 2. If EXCL is set, requestor wanted to say,
1222 * that qdisc tcm_handle is not expected
1223 * to exist, so that we choose create/graft too.
1224 *
1225 * 3. The last case is when no flags are set.
1226 * Alas, it is sort of hole in API, we
1227 * cannot decide what to do unambiguously.
1228 * For now we select create/graft, if
1229 * user gave KIND, which does not match existing.
1230 */
1231 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1232 (n->nlmsg_flags & NLM_F_REPLACE) &&
1233 ((n->nlmsg_flags & NLM_F_EXCL) ||
1234 (tca[TCA_KIND] &&
1235 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1236 goto create_n_graft;
1237 }
1238 }
1239 } else {
1240 if (!tcm->tcm_handle)
1241 return -EINVAL;
1242 q = qdisc_lookup(dev, tcm->tcm_handle);
1243 }
1244
1245 /* Change qdisc parameters */
1246 if (q == NULL)
1247 return -ENOENT;
1248 if (n->nlmsg_flags & NLM_F_EXCL)
1249 return -EEXIST;
1250 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1251 return -EINVAL;
1252 err = qdisc_change(q, tca);
1253 if (err == 0)
1254 qdisc_notify(net, skb, n, clid, NULL, q);
1255 return err;
1256
1257create_n_graft:
1258 if (!(n->nlmsg_flags & NLM_F_CREATE))
1259 return -ENOENT;
1260 if (clid == TC_H_INGRESS) {
1261 if (dev_ingress_queue(dev))
1262 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1263 tcm->tcm_parent, tcm->tcm_parent,
1264 tca, &err);
1265 else
1266 err = -ENOENT;
1267 } else {
1268 struct netdev_queue *dev_queue;
1269
1270 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1271 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1272 else if (p)
1273 dev_queue = p->dev_queue;
1274 else
1275 dev_queue = netdev_get_tx_queue(dev, 0);
1276
1277 q = qdisc_create(dev, dev_queue, p,
1278 tcm->tcm_parent, tcm->tcm_handle,
1279 tca, &err);
1280 }
1281 if (q == NULL) {
1282 if (err == -EAGAIN)
1283 goto replay;
1284 return err;
1285 }
1286
1287graft:
1288 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1289 if (err) {
1290 if (q)
1291 qdisc_destroy(q);
1292 return err;
1293 }
1294
1295 return 0;
1296}
1297
1298static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1299 u32 portid, u32 seq, u16 flags, int event)
1300{
1301 struct tcmsg *tcm;
1302 struct nlmsghdr *nlh;
1303 unsigned char *b = skb_tail_pointer(skb);
1304 struct gnet_dump d;
1305 struct qdisc_size_table *stab;
1306
1307 cond_resched();
1308 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1309 if (!nlh)
1310 goto out_nlmsg_trim;
1311 tcm = nlmsg_data(nlh);
1312 tcm->tcm_family = AF_UNSPEC;
1313 tcm->tcm__pad1 = 0;
1314 tcm->tcm__pad2 = 0;
1315 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1316 tcm->tcm_parent = clid;
1317 tcm->tcm_handle = q->handle;
1318 tcm->tcm_info = atomic_read(&q->refcnt);
1319 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1320 goto nla_put_failure;
1321 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1322 goto nla_put_failure;
1323 q->qstats.qlen = q->q.qlen;
1324
1325 stab = rtnl_dereference(q->stab);
1326 if (stab && qdisc_dump_stab(skb, stab) < 0)
1327 goto nla_put_failure;
1328
1329 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1330 qdisc_root_sleeping_lock(q), &d) < 0)
1331 goto nla_put_failure;
1332
1333 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1334 goto nla_put_failure;
1335
1336 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
1337 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1338 gnet_stats_copy_queue(&d, &q->qstats) < 0)
1339 goto nla_put_failure;
1340
1341 if (gnet_stats_finish_copy(&d) < 0)
1342 goto nla_put_failure;
1343
1344 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1345 return skb->len;
1346
1347out_nlmsg_trim:
1348nla_put_failure:
1349 nlmsg_trim(skb, b);
1350 return -1;
1351}
1352
1353static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1354{
1355 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1356}
1357
1358static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1359 struct nlmsghdr *n, u32 clid,
1360 struct Qdisc *old, struct Qdisc *new)
1361{
1362 struct sk_buff *skb;
1363 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1364
1365 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1366 if (!skb)
1367 return -ENOBUFS;
1368
1369 if (old && !tc_qdisc_dump_ignore(old)) {
1370 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1371 0, RTM_DELQDISC) < 0)
1372 goto err_out;
1373 }
1374 if (new && !tc_qdisc_dump_ignore(new)) {
1375 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1376 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1377 goto err_out;
1378 }
1379
1380 if (skb->len)
1381 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1382 n->nlmsg_flags & NLM_F_ECHO);
1383
1384err_out:
1385 kfree_skb(skb);
1386 return -EINVAL;
1387}
1388
1389static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1390 struct netlink_callback *cb,
1391 int *q_idx_p, int s_q_idx)
1392{
1393 int ret = 0, q_idx = *q_idx_p;
1394 struct Qdisc *q;
1395
1396 if (!root)
1397 return 0;
1398
1399 q = root;
1400 if (q_idx < s_q_idx) {
1401 q_idx++;
1402 } else {
1403 if (!tc_qdisc_dump_ignore(q) &&
1404 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1405 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1406 goto done;
1407 q_idx++;
1408 }
1409 list_for_each_entry(q, &root->list, list) {
1410 if (q_idx < s_q_idx) {
1411 q_idx++;
1412 continue;
1413 }
1414 if (!tc_qdisc_dump_ignore(q) &&
1415 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1416 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1417 goto done;
1418 q_idx++;
1419 }
1420
1421out:
1422 *q_idx_p = q_idx;
1423 return ret;
1424done:
1425 ret = -1;
1426 goto out;
1427}
1428
1429static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1430{
1431 struct net *net = sock_net(skb->sk);
1432 int idx, q_idx;
1433 int s_idx, s_q_idx;
1434 struct net_device *dev;
1435
1436 s_idx = cb->args[0];
1437 s_q_idx = q_idx = cb->args[1];
1438
1439 idx = 0;
1440 ASSERT_RTNL();
1441 for_each_netdev(net, dev) {
1442 struct netdev_queue *dev_queue;
1443
1444 if (idx < s_idx)
1445 goto cont;
1446 if (idx > s_idx)
1447 s_q_idx = 0;
1448 q_idx = 0;
1449
1450 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1451 goto done;
1452
1453 dev_queue = dev_ingress_queue(dev);
1454 if (dev_queue &&
1455 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1456 &q_idx, s_q_idx) < 0)
1457 goto done;
1458
1459cont:
1460 idx++;
1461 }
1462
1463done:
1464 cb->args[0] = idx;
1465 cb->args[1] = q_idx;
1466
1467 return skb->len;
1468}
1469
1470
1471
1472/************************************************
1473 * Traffic classes manipulation. *
1474 ************************************************/
1475
1476
1477
1478static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1479{
1480 struct net *net = sock_net(skb->sk);
1481 struct tcmsg *tcm = nlmsg_data(n);
1482 struct nlattr *tca[TCA_MAX + 1];
1483 struct net_device *dev;
1484 struct Qdisc *q = NULL;
1485 const struct Qdisc_class_ops *cops;
1486 unsigned long cl = 0;
1487 unsigned long new_cl;
1488 u32 portid;
1489 u32 clid;
1490 u32 qid;
1491 int err;
1492
1493 if ((n->nlmsg_type != RTM_GETTCLASS) && !netlink_capable(skb, CAP_NET_ADMIN))
1494 return -EPERM;
1495
1496 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1497 if (err < 0)
1498 return err;
1499
1500 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1501 if (!dev)
1502 return -ENODEV;
1503
1504 /*
1505 parent == TC_H_UNSPEC - unspecified parent.
1506 parent == TC_H_ROOT - class is root, which has no parent.
1507 parent == X:0 - parent is root class.
1508 parent == X:Y - parent is a node in hierarchy.
1509 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1510
1511 handle == 0:0 - generate handle from kernel pool.
1512 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1513 handle == X:Y - clear.
1514 handle == X:0 - root class.
1515 */
1516
1517 /* Step 1. Determine qdisc handle X:0 */
1518
1519 portid = tcm->tcm_parent;
1520 clid = tcm->tcm_handle;
1521 qid = TC_H_MAJ(clid);
1522
1523 if (portid != TC_H_ROOT) {
1524 u32 qid1 = TC_H_MAJ(portid);
1525
1526 if (qid && qid1) {
1527 /* If both majors are known, they must be identical. */
1528 if (qid != qid1)
1529 return -EINVAL;
1530 } else if (qid1) {
1531 qid = qid1;
1532 } else if (qid == 0)
1533 qid = dev->qdisc->handle;
1534
1535 /* Now qid is genuine qdisc handle consistent
1536 * both with parent and child.
1537 *
1538 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1539 */
1540 if (portid)
1541 portid = TC_H_MAKE(qid, portid);
1542 } else {
1543 if (qid == 0)
1544 qid = dev->qdisc->handle;
1545 }
1546
1547 /* OK. Locate qdisc */
1548 q = qdisc_lookup(dev, qid);
1549 if (!q)
1550 return -ENOENT;
1551
1552 /* An check that it supports classes */
1553 cops = q->ops->cl_ops;
1554 if (cops == NULL)
1555 return -EINVAL;
1556
1557 /* Now try to get class */
1558 if (clid == 0) {
1559 if (portid == TC_H_ROOT)
1560 clid = qid;
1561 } else
1562 clid = TC_H_MAKE(qid, clid);
1563
1564 if (clid)
1565 cl = cops->get(q, clid);
1566
1567 if (cl == 0) {
1568 err = -ENOENT;
1569 if (n->nlmsg_type != RTM_NEWTCLASS ||
1570 !(n->nlmsg_flags & NLM_F_CREATE))
1571 goto out;
1572 } else {
1573 switch (n->nlmsg_type) {
1574 case RTM_NEWTCLASS:
1575 err = -EEXIST;
1576 if (n->nlmsg_flags & NLM_F_EXCL)
1577 goto out;
1578 break;
1579 case RTM_DELTCLASS:
1580 err = -EOPNOTSUPP;
1581 if (cops->delete)
1582 err = cops->delete(q, cl);
1583 if (err == 0)
1584 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1585 goto out;
1586 case RTM_GETTCLASS:
1587 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1588 goto out;
1589 default:
1590 err = -EINVAL;
1591 goto out;
1592 }
1593 }
1594
1595 new_cl = cl;
1596 err = -EOPNOTSUPP;
1597 if (cops->change)
1598 err = cops->change(q, clid, portid, tca, &new_cl);
1599 if (err == 0)
1600 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1601
1602out:
1603 if (cl)
1604 cops->put(q, cl);
1605
1606 return err;
1607}
1608
1609
1610static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1611 unsigned long cl,
1612 u32 portid, u32 seq, u16 flags, int event)
1613{
1614 struct tcmsg *tcm;
1615 struct nlmsghdr *nlh;
1616 unsigned char *b = skb_tail_pointer(skb);
1617 struct gnet_dump d;
1618 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1619
1620 cond_resched();
1621 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1622 if (!nlh)
1623 goto out_nlmsg_trim;
1624 tcm = nlmsg_data(nlh);
1625 tcm->tcm_family = AF_UNSPEC;
1626 tcm->tcm__pad1 = 0;
1627 tcm->tcm__pad2 = 0;
1628 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1629 tcm->tcm_parent = q->handle;
1630 tcm->tcm_handle = q->handle;
1631 tcm->tcm_info = 0;
1632 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1633 goto nla_put_failure;
1634 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1635 goto nla_put_failure;
1636
1637 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1638 qdisc_root_sleeping_lock(q), &d) < 0)
1639 goto nla_put_failure;
1640
1641 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1642 goto nla_put_failure;
1643
1644 if (gnet_stats_finish_copy(&d) < 0)
1645 goto nla_put_failure;
1646
1647 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1648 return skb->len;
1649
1650out_nlmsg_trim:
1651nla_put_failure:
1652 nlmsg_trim(skb, b);
1653 return -1;
1654}
1655
1656static int tclass_notify(struct net *net, struct sk_buff *oskb,
1657 struct nlmsghdr *n, struct Qdisc *q,
1658 unsigned long cl, int event)
1659{
1660 struct sk_buff *skb;
1661 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1662
1663 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1664 if (!skb)
1665 return -ENOBUFS;
1666
1667 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1668 kfree_skb(skb);
1669 return -EINVAL;
1670 }
1671
1672 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1673 n->nlmsg_flags & NLM_F_ECHO);
1674}
1675
1676struct qdisc_dump_args {
1677 struct qdisc_walker w;
1678 struct sk_buff *skb;
1679 struct netlink_callback *cb;
1680};
1681
1682static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1683{
1684 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1685
1686 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1687 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1688}
1689
1690static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1691 struct tcmsg *tcm, struct netlink_callback *cb,
1692 int *t_p, int s_t)
1693{
1694 struct qdisc_dump_args arg;
1695
1696 if (tc_qdisc_dump_ignore(q) ||
1697 *t_p < s_t || !q->ops->cl_ops ||
1698 (tcm->tcm_parent &&
1699 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1700 (*t_p)++;
1701 return 0;
1702 }
1703 if (*t_p > s_t)
1704 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1705 arg.w.fn = qdisc_class_dump;
1706 arg.skb = skb;
1707 arg.cb = cb;
1708 arg.w.stop = 0;
1709 arg.w.skip = cb->args[1];
1710 arg.w.count = 0;
1711 q->ops->cl_ops->walk(q, &arg.w);
1712 cb->args[1] = arg.w.count;
1713 if (arg.w.stop)
1714 return -1;
1715 (*t_p)++;
1716 return 0;
1717}
1718
1719static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1720 struct tcmsg *tcm, struct netlink_callback *cb,
1721 int *t_p, int s_t)
1722{
1723 struct Qdisc *q;
1724
1725 if (!root)
1726 return 0;
1727
1728 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1729 return -1;
1730
1731 list_for_each_entry(q, &root->list, list) {
1732 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1733 return -1;
1734 }
1735
1736 return 0;
1737}
1738
1739static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1740{
1741 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1742 struct net *net = sock_net(skb->sk);
1743 struct netdev_queue *dev_queue;
1744 struct net_device *dev;
1745 int t, s_t;
1746
1747 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1748 return 0;
1749 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1750 if (!dev)
1751 return 0;
1752
1753 s_t = cb->args[0];
1754 t = 0;
1755
1756 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1757 goto done;
1758
1759 dev_queue = dev_ingress_queue(dev);
1760 if (dev_queue &&
1761 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1762 &t, s_t) < 0)
1763 goto done;
1764
1765done:
1766 cb->args[0] = t;
1767
1768 dev_put(dev);
1769 return skb->len;
1770}
1771
1772/* Main classifier routine: scans classifier chain attached
1773 * to this qdisc, (optionally) tests for protocol and asks
1774 * specific classifiers.
1775 */
1776int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
1777 struct tcf_result *res)
1778{
1779 __be16 protocol = skb->protocol;
1780 int err;
1781
1782 for (; tp; tp = tp->next) {
1783 if (tp->protocol != protocol &&
1784 tp->protocol != htons(ETH_P_ALL))
1785 continue;
1786 err = tp->classify(skb, tp, res);
1787
1788 if (err >= 0) {
1789#ifdef CONFIG_NET_CLS_ACT
1790 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1791 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1792#endif
1793 return err;
1794 }
1795 }
1796 return -1;
1797}
1798EXPORT_SYMBOL(tc_classify_compat);
1799
1800int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1801 struct tcf_result *res)
1802{
1803 int err = 0;
1804#ifdef CONFIG_NET_CLS_ACT
1805 const struct tcf_proto *otp = tp;
1806reclassify:
1807#endif
1808
1809 err = tc_classify_compat(skb, tp, res);
1810#ifdef CONFIG_NET_CLS_ACT
1811 if (err == TC_ACT_RECLASSIFY) {
1812 u32 verd = G_TC_VERD(skb->tc_verd);
1813 tp = otp;
1814
1815 if (verd++ >= MAX_REC_LOOP) {
1816 net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1817 tp->q->ops->id,
1818 tp->prio & 0xffff,
1819 ntohs(tp->protocol));
1820 return TC_ACT_SHOT;
1821 }
1822 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1823 goto reclassify;
1824 }
1825#endif
1826 return err;
1827}
1828EXPORT_SYMBOL(tc_classify);
1829
1830void tcf_destroy(struct tcf_proto *tp)
1831{
1832 tp->ops->destroy(tp);
1833 module_put(tp->ops->owner);
1834 kfree(tp);
1835}
1836
1837void tcf_destroy_chain(struct tcf_proto **fl)
1838{
1839 struct tcf_proto *tp;
1840
1841 while ((tp = *fl) != NULL) {
1842 *fl = tp->next;
1843 tcf_destroy(tp);
1844 }
1845}
1846EXPORT_SYMBOL(tcf_destroy_chain);
1847
1848#ifdef CONFIG_PROC_FS
1849static int psched_show(struct seq_file *seq, void *v)
1850{
1851 struct timespec ts;
1852
1853 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1854 seq_printf(seq, "%08x %08x %08x %08x\n",
1855 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1856 1000000,
1857 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1858
1859 return 0;
1860}
1861
1862static int psched_open(struct inode *inode, struct file *file)
1863{
1864 return single_open(file, psched_show, NULL);
1865}
1866
1867static const struct file_operations psched_fops = {
1868 .owner = THIS_MODULE,
1869 .open = psched_open,
1870 .read = seq_read,
1871 .llseek = seq_lseek,
1872 .release = single_release,
1873};
1874
1875static int __net_init psched_net_init(struct net *net)
1876{
1877 struct proc_dir_entry *e;
1878
1879 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1880 if (e == NULL)
1881 return -ENOMEM;
1882
1883 return 0;
1884}
1885
1886static void __net_exit psched_net_exit(struct net *net)
1887{
1888 remove_proc_entry("psched", net->proc_net);
1889}
1890#else
1891static int __net_init psched_net_init(struct net *net)
1892{
1893 return 0;
1894}
1895
1896static void __net_exit psched_net_exit(struct net *net)
1897{
1898}
1899#endif
1900
1901static struct pernet_operations psched_net_ops = {
1902 .init = psched_net_init,
1903 .exit = psched_net_exit,
1904};
1905
1906static int __init pktsched_init(void)
1907{
1908 int err;
1909
1910 err = register_pernet_subsys(&psched_net_ops);
1911 if (err) {
1912 pr_err("pktsched_init: "
1913 "cannot initialize per netns operations\n");
1914 return err;
1915 }
1916
1917 register_qdisc(&pfifo_fast_ops);
1918 register_qdisc(&pfifo_qdisc_ops);
1919 register_qdisc(&bfifo_qdisc_ops);
1920 register_qdisc(&pfifo_head_drop_qdisc_ops);
1921 register_qdisc(&mq_qdisc_ops);
1922
1923 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1924 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1925 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1926 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1927 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1928 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1929
1930 return 0;
1931}
1932
1933subsys_initcall(pktsched_init);