Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * net/sched/sch_api.c	Packet scheduler API.
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 *
  11 * Fixes:
  12 *
  13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
  14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/string.h>
  22#include <linux/errno.h>
  23#include <linux/skbuff.h>
  24#include <linux/init.h>
  25#include <linux/proc_fs.h>
  26#include <linux/seq_file.h>
  27#include <linux/kmod.h>
  28#include <linux/list.h>
  29#include <linux/hrtimer.h>
  30#include <linux/lockdep.h>
  31#include <linux/slab.h>
  32#include <linux/hashtable.h>
  33
  34#include <net/net_namespace.h>
  35#include <net/sock.h>
  36#include <net/netlink.h>
  37#include <net/pkt_sched.h>
  38#include <net/pkt_cls.h>
 
 
 
 
 
 
  39
  40/*
  41
  42   Short review.
  43   -------------
  44
  45   This file consists of two interrelated parts:
  46
  47   1. queueing disciplines manager frontend.
  48   2. traffic classes manager frontend.
  49
  50   Generally, queueing discipline ("qdisc") is a black box,
  51   which is able to enqueue packets and to dequeue them (when
  52   device is ready to send something) in order and at times
  53   determined by algorithm hidden in it.
  54
  55   qdisc's are divided to two categories:
  56   - "queues", which have no internal structure visible from outside.
  57   - "schedulers", which split all the packets to "traffic classes",
  58     using "packet classifiers" (look at cls_api.c)
  59
  60   In turn, classes may have child qdiscs (as rule, queues)
  61   attached to them etc. etc. etc.
  62
  63   The goal of the routines in this file is to translate
  64   information supplied by user in the form of handles
  65   to more intelligible for kernel form, to make some sanity
  66   checks and part of work, which is common to all qdiscs
  67   and to provide rtnetlink notifications.
  68
  69   All real intelligent work is done inside qdisc modules.
  70
  71
  72
  73   Every discipline has two major routines: enqueue and dequeue.
  74
  75   ---dequeue
  76
  77   dequeue usually returns a skb to send. It is allowed to return NULL,
  78   but it does not mean that queue is empty, it just means that
  79   discipline does not want to send anything this time.
  80   Queue is really empty if q->q.qlen == 0.
  81   For complicated disciplines with multiple queues q->q is not
  82   real packet queue, but however q->q.qlen must be valid.
  83
  84   ---enqueue
  85
  86   enqueue returns 0, if packet was enqueued successfully.
  87   If packet (this one or another one) was dropped, it returns
  88   not zero error code.
  89   NET_XMIT_DROP 	- this packet dropped
  90     Expected action: do not backoff, but wait until queue will clear.
  91   NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
  92     Expected action: backoff or ignore
 
 
  93
  94   Auxiliary routines:
  95
  96   ---peek
  97
  98   like dequeue but without removing a packet from the queue
  99
 100   ---reset
 101
 102   returns qdisc to initial state: purge all buffers, clear all
 103   timers, counters (except for statistics) etc.
 104
 105   ---init
 106
 107   initializes newly created qdisc.
 108
 109   ---destroy
 110
 111   destroys resources allocated by init and during lifetime of qdisc.
 112
 113   ---change
 114
 115   changes qdisc parameters.
 116 */
 117
 118/* Protects list of registered TC modules. It is pure SMP lock. */
 119static DEFINE_RWLOCK(qdisc_mod_lock);
 120
 121
 122/************************************************
 123 *	Queueing disciplines manipulation.	*
 124 ************************************************/
 125
 126
 127/* The list of all installed queueing disciplines. */
 128
 129static struct Qdisc_ops *qdisc_base;
 130
 131/* Register/unregister queueing discipline */
 132
 133int register_qdisc(struct Qdisc_ops *qops)
 134{
 135	struct Qdisc_ops *q, **qp;
 136	int rc = -EEXIST;
 137
 138	write_lock(&qdisc_mod_lock);
 139	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 140		if (!strcmp(qops->id, q->id))
 141			goto out;
 142
 143	if (qops->enqueue == NULL)
 144		qops->enqueue = noop_qdisc_ops.enqueue;
 145	if (qops->peek == NULL) {
 146		if (qops->dequeue == NULL)
 147			qops->peek = noop_qdisc_ops.peek;
 148		else
 149			goto out_einval;
 150	}
 151	if (qops->dequeue == NULL)
 152		qops->dequeue = noop_qdisc_ops.dequeue;
 153
 154	if (qops->cl_ops) {
 155		const struct Qdisc_class_ops *cops = qops->cl_ops;
 156
 157		if (!(cops->find && cops->walk && cops->leaf))
 158			goto out_einval;
 159
 160		if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
 161			goto out_einval;
 162	}
 163
 164	qops->next = NULL;
 165	*qp = qops;
 166	rc = 0;
 167out:
 168	write_unlock(&qdisc_mod_lock);
 169	return rc;
 170
 171out_einval:
 172	rc = -EINVAL;
 173	goto out;
 174}
 175EXPORT_SYMBOL(register_qdisc);
 176
 177int unregister_qdisc(struct Qdisc_ops *qops)
 178{
 179	struct Qdisc_ops *q, **qp;
 180	int err = -ENOENT;
 181
 182	write_lock(&qdisc_mod_lock);
 183	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 184		if (q == qops)
 185			break;
 186	if (q) {
 187		*qp = q->next;
 188		q->next = NULL;
 189		err = 0;
 190	}
 191	write_unlock(&qdisc_mod_lock);
 192	return err;
 193}
 194EXPORT_SYMBOL(unregister_qdisc);
 195
 196/* Get default qdisc if not otherwise specified */
 197void qdisc_get_default(char *name, size_t len)
 198{
 199	read_lock(&qdisc_mod_lock);
 200	strlcpy(name, default_qdisc_ops->id, len);
 201	read_unlock(&qdisc_mod_lock);
 202}
 203
 204static struct Qdisc_ops *qdisc_lookup_default(const char *name)
 205{
 206	struct Qdisc_ops *q = NULL;
 207
 208	for (q = qdisc_base; q; q = q->next) {
 209		if (!strcmp(name, q->id)) {
 210			if (!try_module_get(q->owner))
 211				q = NULL;
 212			break;
 213		}
 214	}
 215
 216	return q;
 217}
 218
 219/* Set new default qdisc to use */
 220int qdisc_set_default(const char *name)
 221{
 222	const struct Qdisc_ops *ops;
 223
 224	if (!capable(CAP_NET_ADMIN))
 225		return -EPERM;
 226
 227	write_lock(&qdisc_mod_lock);
 228	ops = qdisc_lookup_default(name);
 229	if (!ops) {
 230		/* Not found, drop lock and try to load module */
 231		write_unlock(&qdisc_mod_lock);
 232		request_module("sch_%s", name);
 233		write_lock(&qdisc_mod_lock);
 234
 235		ops = qdisc_lookup_default(name);
 236	}
 237
 238	if (ops) {
 239		/* Set new default */
 240		module_put(default_qdisc_ops->owner);
 241		default_qdisc_ops = ops;
 242	}
 243	write_unlock(&qdisc_mod_lock);
 244
 245	return ops ? 0 : -ENOENT;
 246}
 247
 248#ifdef CONFIG_NET_SCH_DEFAULT
 249/* Set default value from kernel config */
 250static int __init sch_default_qdisc(void)
 251{
 252	return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
 253}
 254late_initcall(sch_default_qdisc);
 255#endif
 256
 257/* We know handle. Find qdisc among all qdisc's attached to device
 258 * (root qdisc, all its children, children of children etc.)
 259 * Note: caller either uses rtnl or rcu_read_lock()
 260 */
 261
 262static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 263{
 264	struct Qdisc *q;
 265
 266	if (!qdisc_dev(root))
 267		return (root->handle == handle ? root : NULL);
 268
 269	if (!(root->flags & TCQ_F_BUILTIN) &&
 270	    root->handle == handle)
 271		return root;
 272
 273	hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
 274		if (q->handle == handle)
 275			return q;
 276	}
 277	return NULL;
 278}
 279
 280void qdisc_hash_add(struct Qdisc *q, bool invisible)
 281{
 282	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
 
 
 
 283		ASSERT_RTNL();
 284		hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
 285		if (invisible)
 286			q->flags |= TCQ_F_INVISIBLE;
 287	}
 288}
 289EXPORT_SYMBOL(qdisc_hash_add);
 290
 291void qdisc_hash_del(struct Qdisc *q)
 292{
 293	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
 294		ASSERT_RTNL();
 295		hash_del_rcu(&q->hash);
 296	}
 297}
 298EXPORT_SYMBOL(qdisc_hash_del);
 299
 300struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 301{
 302	struct Qdisc *q;
 303
 304	if (!handle)
 305		return NULL;
 306	q = qdisc_match_from_root(dev->qdisc, handle);
 307	if (q)
 308		goto out;
 309
 310	if (dev_ingress_queue(dev))
 311		q = qdisc_match_from_root(
 312			dev_ingress_queue(dev)->qdisc_sleeping,
 313			handle);
 314out:
 315	return q;
 316}
 317
 318static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 319{
 320	unsigned long cl;
 321	struct Qdisc *leaf;
 322	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
 323
 324	if (cops == NULL)
 325		return NULL;
 326	cl = cops->find(p, classid);
 327
 328	if (cl == 0)
 329		return NULL;
 330	leaf = cops->leaf(p, cl);
 
 331	return leaf;
 332}
 333
 334/* Find queueing discipline by name */
 335
 336static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
 337{
 338	struct Qdisc_ops *q = NULL;
 339
 340	if (kind) {
 341		read_lock(&qdisc_mod_lock);
 342		for (q = qdisc_base; q; q = q->next) {
 343			if (nla_strcmp(kind, q->id) == 0) {
 344				if (!try_module_get(q->owner))
 345					q = NULL;
 346				break;
 347			}
 348		}
 349		read_unlock(&qdisc_mod_lock);
 350	}
 351	return q;
 352}
 353
 354/* The linklayer setting were not transferred from iproute2, in older
 355 * versions, and the rate tables lookup systems have been dropped in
 356 * the kernel. To keep backward compatible with older iproute2 tc
 357 * utils, we detect the linklayer setting by detecting if the rate
 358 * table were modified.
 359 *
 360 * For linklayer ATM table entries, the rate table will be aligned to
 361 * 48 bytes, thus some table entries will contain the same value.  The
 362 * mpu (min packet unit) is also encoded into the old rate table, thus
 363 * starting from the mpu, we find low and high table entries for
 364 * mapping this cell.  If these entries contain the same value, when
 365 * the rate tables have been modified for linklayer ATM.
 366 *
 367 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
 368 * and then roundup to the next cell, calc the table entry one below,
 369 * and compare.
 370 */
 371static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 372{
 373	int low       = roundup(r->mpu, 48);
 374	int high      = roundup(low+1, 48);
 375	int cell_low  = low >> r->cell_log;
 376	int cell_high = (high >> r->cell_log) - 1;
 377
 378	/* rtab is too inaccurate at rates > 100Mbit/s */
 379	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
 380		pr_debug("TC linklayer: Giving up ATM detection\n");
 381		return TC_LINKLAYER_ETHERNET;
 382	}
 383
 384	if ((cell_high > cell_low) && (cell_high < 256)
 385	    && (rtab[cell_low] == rtab[cell_high])) {
 386		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
 387			 cell_low, cell_high, rtab[cell_high]);
 388		return TC_LINKLAYER_ATM;
 389	}
 390	return TC_LINKLAYER_ETHERNET;
 391}
 392
 393static struct qdisc_rate_table *qdisc_rtab_list;
 394
 395struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 396					struct nlattr *tab,
 397					struct netlink_ext_ack *extack)
 398{
 399	struct qdisc_rate_table *rtab;
 400
 401	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
 402	    nla_len(tab) != TC_RTAB_SIZE) {
 403		NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
 404		return NULL;
 405	}
 406
 407	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
 408		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
 409		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
 410			rtab->refcnt++;
 411			return rtab;
 412		}
 413	}
 414
 415	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
 416	if (rtab) {
 417		rtab->rate = *r;
 418		rtab->refcnt = 1;
 419		memcpy(rtab->data, nla_data(tab), 1024);
 420		if (r->linklayer == TC_LINKLAYER_UNAWARE)
 421			r->linklayer = __detect_linklayer(r, rtab->data);
 422		rtab->next = qdisc_rtab_list;
 423		qdisc_rtab_list = rtab;
 424	} else {
 425		NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
 426	}
 427	return rtab;
 428}
 429EXPORT_SYMBOL(qdisc_get_rtab);
 430
 431void qdisc_put_rtab(struct qdisc_rate_table *tab)
 432{
 433	struct qdisc_rate_table *rtab, **rtabp;
 434
 435	if (!tab || --tab->refcnt)
 436		return;
 437
 438	for (rtabp = &qdisc_rtab_list;
 439	     (rtab = *rtabp) != NULL;
 440	     rtabp = &rtab->next) {
 441		if (rtab == tab) {
 442			*rtabp = rtab->next;
 443			kfree(rtab);
 444			return;
 445		}
 446	}
 447}
 448EXPORT_SYMBOL(qdisc_put_rtab);
 449
 450static LIST_HEAD(qdisc_stab_list);
 
 451
 452static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
 453	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
 454	[TCA_STAB_DATA] = { .type = NLA_BINARY },
 455};
 456
 457static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
 458					       struct netlink_ext_ack *extack)
 459{
 460	struct nlattr *tb[TCA_STAB_MAX + 1];
 461	struct qdisc_size_table *stab;
 462	struct tc_sizespec *s;
 463	unsigned int tsize = 0;
 464	u16 *tab = NULL;
 465	int err;
 466
 467	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
 468	if (err < 0)
 469		return ERR_PTR(err);
 470	if (!tb[TCA_STAB_BASE]) {
 471		NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
 472		return ERR_PTR(-EINVAL);
 473	}
 474
 475	s = nla_data(tb[TCA_STAB_BASE]);
 476
 477	if (s->tsize > 0) {
 478		if (!tb[TCA_STAB_DATA]) {
 479			NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
 480			return ERR_PTR(-EINVAL);
 481		}
 482		tab = nla_data(tb[TCA_STAB_DATA]);
 483		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
 484	}
 485
 486	if (tsize != s->tsize || (!tab && tsize > 0)) {
 487		NL_SET_ERR_MSG(extack, "Invalid size of size table");
 488		return ERR_PTR(-EINVAL);
 489	}
 
 490
 491	list_for_each_entry(stab, &qdisc_stab_list, list) {
 492		if (memcmp(&stab->szopts, s, sizeof(*s)))
 493			continue;
 494		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
 495			continue;
 496		stab->refcnt++;
 
 497		return stab;
 498	}
 499
 
 
 500	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
 501	if (!stab)
 502		return ERR_PTR(-ENOMEM);
 503
 504	stab->refcnt = 1;
 505	stab->szopts = *s;
 506	if (tsize > 0)
 507		memcpy(stab->data, tab, tsize * sizeof(u16));
 508
 
 509	list_add_tail(&stab->list, &qdisc_stab_list);
 
 510
 511	return stab;
 512}
 513
 514static void stab_kfree_rcu(struct rcu_head *head)
 515{
 516	kfree(container_of(head, struct qdisc_size_table, rcu));
 517}
 518
 519void qdisc_put_stab(struct qdisc_size_table *tab)
 520{
 521	if (!tab)
 522		return;
 523
 
 
 524	if (--tab->refcnt == 0) {
 525		list_del(&tab->list);
 526		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
 527	}
 
 
 528}
 529EXPORT_SYMBOL(qdisc_put_stab);
 530
 531static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
 532{
 533	struct nlattr *nest;
 534
 535	nest = nla_nest_start(skb, TCA_STAB);
 536	if (nest == NULL)
 537		goto nla_put_failure;
 538	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
 539		goto nla_put_failure;
 540	nla_nest_end(skb, nest);
 541
 542	return skb->len;
 543
 544nla_put_failure:
 545	return -1;
 546}
 547
 548void __qdisc_calculate_pkt_len(struct sk_buff *skb,
 549			       const struct qdisc_size_table *stab)
 550{
 551	int pkt_len, slot;
 552
 553	pkt_len = skb->len + stab->szopts.overhead;
 554	if (unlikely(!stab->szopts.tsize))
 555		goto out;
 556
 557	slot = pkt_len + stab->szopts.cell_align;
 558	if (unlikely(slot < 0))
 559		slot = 0;
 560
 561	slot >>= stab->szopts.cell_log;
 562	if (likely(slot < stab->szopts.tsize))
 563		pkt_len = stab->data[slot];
 564	else
 565		pkt_len = stab->data[stab->szopts.tsize - 1] *
 566				(slot / stab->szopts.tsize) +
 567				stab->data[slot % stab->szopts.tsize];
 568
 569	pkt_len <<= stab->szopts.size_log;
 570out:
 571	if (unlikely(pkt_len < 1))
 572		pkt_len = 1;
 573	qdisc_skb_cb(skb)->pkt_len = pkt_len;
 574}
 575EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 576
 577void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
 578{
 579	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
 580		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
 581			txt, qdisc->ops->id, qdisc->handle >> 16);
 582		qdisc->flags |= TCQ_F_WARN_NONWC;
 583	}
 584}
 585EXPORT_SYMBOL(qdisc_warn_nonwc);
 586
 587static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 588{
 589	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 590						 timer);
 591
 592	rcu_read_lock();
 
 593	__netif_schedule(qdisc_root(wd->qdisc));
 594	rcu_read_unlock();
 595
 596	return HRTIMER_NORESTART;
 597}
 598
 599void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 600{
 601	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
 602	wd->timer.function = qdisc_watchdog;
 603	wd->qdisc = qdisc;
 604}
 605EXPORT_SYMBOL(qdisc_watchdog_init);
 606
 607void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
 608{
 609	if (test_bit(__QDISC_STATE_DEACTIVATED,
 610		     &qdisc_root_sleeping(wd->qdisc)->state))
 611		return;
 612
 613	if (wd->last_expires == expires)
 614		return;
 615
 616	wd->last_expires = expires;
 617	hrtimer_start(&wd->timer,
 618		      ns_to_ktime(expires),
 619		      HRTIMER_MODE_ABS_PINNED);
 620}
 621EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
 622
 623void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 624{
 625	hrtimer_cancel(&wd->timer);
 
 626}
 627EXPORT_SYMBOL(qdisc_watchdog_cancel);
 628
 629static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
 630{
 
 631	struct hlist_head *h;
 632	unsigned int i;
 633
 634	h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
 
 
 
 
 635
 636	if (h != NULL) {
 637		for (i = 0; i < n; i++)
 638			INIT_HLIST_HEAD(&h[i]);
 639	}
 640	return h;
 641}
 642
 
 
 
 
 
 
 
 
 
 
 643void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
 644{
 645	struct Qdisc_class_common *cl;
 646	struct hlist_node *next;
 647	struct hlist_head *nhash, *ohash;
 648	unsigned int nsize, nmask, osize;
 649	unsigned int i, h;
 650
 651	/* Rehash when load factor exceeds 0.75 */
 652	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
 653		return;
 654	nsize = clhash->hashsize * 2;
 655	nmask = nsize - 1;
 656	nhash = qdisc_class_hash_alloc(nsize);
 657	if (nhash == NULL)
 658		return;
 659
 660	ohash = clhash->hash;
 661	osize = clhash->hashsize;
 662
 663	sch_tree_lock(sch);
 664	for (i = 0; i < osize; i++) {
 665		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
 666			h = qdisc_class_hash(cl->classid, nmask);
 667			hlist_add_head(&cl->hnode, &nhash[h]);
 668		}
 669	}
 670	clhash->hash     = nhash;
 671	clhash->hashsize = nsize;
 672	clhash->hashmask = nmask;
 673	sch_tree_unlock(sch);
 674
 675	kvfree(ohash);
 676}
 677EXPORT_SYMBOL(qdisc_class_hash_grow);
 678
 679int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
 680{
 681	unsigned int size = 4;
 682
 683	clhash->hash = qdisc_class_hash_alloc(size);
 684	if (!clhash->hash)
 685		return -ENOMEM;
 686	clhash->hashsize  = size;
 687	clhash->hashmask  = size - 1;
 688	clhash->hashelems = 0;
 689	return 0;
 690}
 691EXPORT_SYMBOL(qdisc_class_hash_init);
 692
 693void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
 694{
 695	kvfree(clhash->hash);
 696}
 697EXPORT_SYMBOL(qdisc_class_hash_destroy);
 698
 699void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
 700			     struct Qdisc_class_common *cl)
 701{
 702	unsigned int h;
 703
 704	INIT_HLIST_NODE(&cl->hnode);
 705	h = qdisc_class_hash(cl->classid, clhash->hashmask);
 706	hlist_add_head(&cl->hnode, &clhash->hash[h]);
 707	clhash->hashelems++;
 708}
 709EXPORT_SYMBOL(qdisc_class_hash_insert);
 710
 711void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
 712			     struct Qdisc_class_common *cl)
 713{
 714	hlist_del(&cl->hnode);
 715	clhash->hashelems--;
 716}
 717EXPORT_SYMBOL(qdisc_class_hash_remove);
 718
 719/* Allocate an unique handle from space managed by kernel
 720 * Possible range is [8000-FFFF]:0000 (0x8000 values)
 721 */
 722static u32 qdisc_alloc_handle(struct net_device *dev)
 723{
 724	int i = 0x8000;
 725	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
 726
 727	do {
 728		autohandle += TC_H_MAKE(0x10000U, 0);
 729		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
 730			autohandle = TC_H_MAKE(0x80000000U, 0);
 731		if (!qdisc_lookup(dev, autohandle))
 732			return autohandle;
 733		cond_resched();
 734	} while	(--i > 0);
 735
 736	return 0;
 737}
 738
 739void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
 740			       unsigned int len)
 741{
 742	bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
 743	const struct Qdisc_class_ops *cops;
 744	unsigned long cl;
 745	u32 parentid;
 746	bool notify;
 747	int drops;
 748
 749	if (n == 0 && len == 0)
 750		return;
 751	drops = max_t(int, n, 0);
 752	rcu_read_lock();
 753	while ((parentid = sch->parent)) {
 754		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
 755			break;
 756
 757		if (sch->flags & TCQ_F_NOPARENT)
 758			break;
 759		/* Notify parent qdisc only if child qdisc becomes empty.
 760		 *
 761		 * If child was empty even before update then backlog
 762		 * counter is screwed and we skip notification because
 763		 * parent class is already passive.
 764		 *
 765		 * If the original child was offloaded then it is allowed
 766		 * to be seem as empty, so the parent is notified anyway.
 767		 */
 768		notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
 769						       !qdisc_is_offloaded);
 770		/* TODO: perform the search on a per txq basis */
 771		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
 772		if (sch == NULL) {
 773			WARN_ON_ONCE(parentid != TC_H_ROOT);
 774			break;
 775		}
 776		cops = sch->ops->cl_ops;
 777		if (notify && cops->qlen_notify) {
 778			cl = cops->find(sch, parentid);
 779			cops->qlen_notify(sch, cl);
 
 780		}
 781		sch->q.qlen -= n;
 782		sch->qstats.backlog -= len;
 783		__qdisc_qstats_drop(sch, drops);
 784	}
 785	rcu_read_unlock();
 786}
 787EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
 788
 789static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 790			 u32 portid, u32 seq, u16 flags, int event)
 791{
 792	struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
 793	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
 794	struct tcmsg *tcm;
 795	struct nlmsghdr  *nlh;
 796	unsigned char *b = skb_tail_pointer(skb);
 797	struct gnet_dump d;
 798	struct qdisc_size_table *stab;
 799	u32 block_index;
 800	__u32 qlen;
 801
 802	cond_resched();
 803	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
 804	if (!nlh)
 805		goto out_nlmsg_trim;
 806	tcm = nlmsg_data(nlh);
 807	tcm->tcm_family = AF_UNSPEC;
 808	tcm->tcm__pad1 = 0;
 809	tcm->tcm__pad2 = 0;
 810	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
 811	tcm->tcm_parent = clid;
 812	tcm->tcm_handle = q->handle;
 813	tcm->tcm_info = refcount_read(&q->refcnt);
 814	if (nla_put_string(skb, TCA_KIND, q->ops->id))
 815		goto nla_put_failure;
 816	if (q->ops->ingress_block_get) {
 817		block_index = q->ops->ingress_block_get(q);
 818		if (block_index &&
 819		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
 820			goto nla_put_failure;
 821	}
 822	if (q->ops->egress_block_get) {
 823		block_index = q->ops->egress_block_get(q);
 824		if (block_index &&
 825		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
 826			goto nla_put_failure;
 827	}
 828	if (q->ops->dump && q->ops->dump(q, skb) < 0)
 829		goto nla_put_failure;
 830	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
 831		goto nla_put_failure;
 832	qlen = qdisc_qlen_sum(q);
 833
 834	stab = rtnl_dereference(q->stab);
 835	if (stab && qdisc_dump_stab(skb, stab) < 0)
 836		goto nla_put_failure;
 837
 838	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
 839					 NULL, &d, TCA_PAD) < 0)
 840		goto nla_put_failure;
 841
 842	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
 843		goto nla_put_failure;
 844
 845	if (qdisc_is_percpu_stats(q)) {
 846		cpu_bstats = q->cpu_bstats;
 847		cpu_qstats = q->cpu_qstats;
 848	}
 849
 850	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
 851				  &d, cpu_bstats, &q->bstats) < 0 ||
 852	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
 853	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
 854		goto nla_put_failure;
 855
 856	if (gnet_stats_finish_copy(&d) < 0)
 857		goto nla_put_failure;
 858
 859	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 860	return skb->len;
 861
 862out_nlmsg_trim:
 863nla_put_failure:
 864	nlmsg_trim(skb, b);
 865	return -1;
 866}
 867
 868static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
 869{
 870	if (q->flags & TCQ_F_BUILTIN)
 871		return true;
 872	if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
 873		return true;
 874
 875	return false;
 876}
 877
 878static int qdisc_notify(struct net *net, struct sk_buff *oskb,
 879			struct nlmsghdr *n, u32 clid,
 880			struct Qdisc *old, struct Qdisc *new)
 881{
 882	struct sk_buff *skb;
 883	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
 884
 885	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 886	if (!skb)
 887		return -ENOBUFS;
 888
 889	if (old && !tc_qdisc_dump_ignore(old, false)) {
 890		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
 891				  0, RTM_DELQDISC) < 0)
 892			goto err_out;
 893	}
 894	if (new && !tc_qdisc_dump_ignore(new, false)) {
 895		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
 896				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
 897			goto err_out;
 898	}
 899
 900	if (skb->len)
 901		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
 902				      n->nlmsg_flags & NLM_F_ECHO);
 903
 904err_out:
 905	kfree_skb(skb);
 906	return -EINVAL;
 907}
 908
 909static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 910			       struct nlmsghdr *n, u32 clid,
 911			       struct Qdisc *old, struct Qdisc *new)
 912{
 913	if (new || old)
 914		qdisc_notify(net, skb, n, clid, old, new);
 915
 916	if (old)
 917		qdisc_destroy(old);
 918}
 919
 920/* Graft qdisc "new" to class "classid" of qdisc "parent" or
 921 * to device "dev".
 922 *
 923 * When appropriate send a netlink notification using 'skb'
 924 * and "n".
 925 *
 926 * On success, destroy old qdisc.
 927 */
 928
 929static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 930		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
 931		       struct Qdisc *new, struct Qdisc *old,
 932		       struct netlink_ext_ack *extack)
 933{
 934	struct Qdisc *q = old;
 935	struct net *net = dev_net(dev);
 936	int err = 0;
 937
 938	if (parent == NULL) {
 939		unsigned int i, num_q, ingress;
 940
 941		ingress = 0;
 942		num_q = dev->num_tx_queues;
 943		if ((q && q->flags & TCQ_F_INGRESS) ||
 944		    (new && new->flags & TCQ_F_INGRESS)) {
 945			num_q = 1;
 946			ingress = 1;
 947			if (!dev_ingress_queue(dev)) {
 948				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
 949				return -ENOENT;
 950			}
 951		}
 952
 953		if (dev->flags & IFF_UP)
 954			dev_deactivate(dev);
 955
 956		if (new && new->ops->attach)
 957			goto skip;
 958
 959		for (i = 0; i < num_q; i++) {
 960			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
 961
 962			if (!ingress)
 963				dev_queue = netdev_get_tx_queue(dev, i);
 964
 965			old = dev_graft_qdisc(dev_queue, new);
 966			if (new && i > 0)
 967				qdisc_refcount_inc(new);
 968
 969			if (!ingress)
 970				qdisc_destroy(old);
 971		}
 972
 973skip:
 974		if (!ingress) {
 975			notify_and_destroy(net, skb, n, classid,
 976					   dev->qdisc, new);
 977			if (new && !new->ops->attach)
 978				qdisc_refcount_inc(new);
 979			dev->qdisc = new ? : &noop_qdisc;
 980
 981			if (new && new->ops->attach)
 982				new->ops->attach(new);
 983		} else {
 984			notify_and_destroy(net, skb, n, classid, old, new);
 985		}
 986
 987		if (dev->flags & IFF_UP)
 988			dev_activate(dev);
 989	} else {
 990		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
 991
 992		/* Only support running class lockless if parent is lockless */
 993		if (new && (new->flags & TCQ_F_NOLOCK) &&
 994		    parent && !(parent->flags & TCQ_F_NOLOCK))
 995			new->flags &= ~TCQ_F_NOLOCK;
 996
 997		err = -EOPNOTSUPP;
 998		if (cops && cops->graft) {
 999			unsigned long cl = cops->find(parent, classid);
1000
1001			if (cl) {
1002				err = cops->graft(parent, cl, new, &old,
1003						  extack);
1004			} else {
1005				NL_SET_ERR_MSG(extack, "Specified class not found");
1006				err = -ENOENT;
1007			}
1008		}
1009		if (!err)
1010			notify_and_destroy(net, skb, n, classid, old, new);
1011	}
1012	return err;
1013}
1014
1015static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1016				   struct netlink_ext_ack *extack)
1017{
1018	u32 block_index;
1019
1020	if (tca[TCA_INGRESS_BLOCK]) {
1021		block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1022
1023		if (!block_index) {
1024			NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1025			return -EINVAL;
1026		}
1027		if (!sch->ops->ingress_block_set) {
1028			NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1029			return -EOPNOTSUPP;
1030		}
1031		sch->ops->ingress_block_set(sch, block_index);
1032	}
1033	if (tca[TCA_EGRESS_BLOCK]) {
1034		block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1035
1036		if (!block_index) {
1037			NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1038			return -EINVAL;
1039		}
1040		if (!sch->ops->egress_block_set) {
1041			NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1042			return -EOPNOTSUPP;
1043		}
1044		sch->ops->egress_block_set(sch, block_index);
1045	}
1046	return 0;
1047}
1048
1049/* lockdep annotation is needed for ingress; egress gets it only for name */
1050static struct lock_class_key qdisc_tx_lock;
1051static struct lock_class_key qdisc_rx_lock;
1052
1053/*
1054   Allocate and initialize new qdisc.
1055
1056   Parameters are passed via opt.
1057 */
1058
1059static struct Qdisc *qdisc_create(struct net_device *dev,
1060				  struct netdev_queue *dev_queue,
1061				  struct Qdisc *p, u32 parent, u32 handle,
1062				  struct nlattr **tca, int *errp,
1063				  struct netlink_ext_ack *extack)
1064{
1065	int err;
1066	struct nlattr *kind = tca[TCA_KIND];
1067	struct Qdisc *sch;
1068	struct Qdisc_ops *ops;
1069	struct qdisc_size_table *stab;
1070
1071	ops = qdisc_lookup_ops(kind);
1072#ifdef CONFIG_MODULES
1073	if (ops == NULL && kind != NULL) {
1074		char name[IFNAMSIZ];
1075		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1076			/* We dropped the RTNL semaphore in order to
1077			 * perform the module load.  So, even if we
1078			 * succeeded in loading the module we have to
1079			 * tell the caller to replay the request.  We
1080			 * indicate this using -EAGAIN.
1081			 * We replay the request because the device may
1082			 * go away in the mean time.
1083			 */
1084			rtnl_unlock();
1085			request_module("sch_%s", name);
1086			rtnl_lock();
1087			ops = qdisc_lookup_ops(kind);
1088			if (ops != NULL) {
1089				/* We will try again qdisc_lookup_ops,
1090				 * so don't keep a reference.
1091				 */
1092				module_put(ops->owner);
1093				err = -EAGAIN;
1094				goto err_out;
1095			}
1096		}
1097	}
1098#endif
1099
1100	err = -ENOENT;
1101	if (!ops) {
1102		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1103		goto err_out;
1104	}
1105
1106	sch = qdisc_alloc(dev_queue, ops, extack);
1107	if (IS_ERR(sch)) {
1108		err = PTR_ERR(sch);
1109		goto err_out2;
1110	}
1111
1112	sch->parent = parent;
1113
1114	if (handle == TC_H_INGRESS) {
1115		sch->flags |= TCQ_F_INGRESS;
1116		handle = TC_H_MAKE(TC_H_INGRESS, 0);
1117		lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
1118	} else {
1119		if (handle == 0) {
1120			handle = qdisc_alloc_handle(dev);
1121			err = -ENOMEM;
1122			if (handle == 0)
1123				goto err_out3;
1124		}
1125		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
1126		if (!netif_is_multiqueue(dev))
1127			sch->flags |= TCQ_F_ONETXQUEUE;
1128	}
1129
1130	sch->handle = handle;
1131
1132	/* This exist to keep backward compatible with a userspace
1133	 * loophole, what allowed userspace to get IFF_NO_QUEUE
1134	 * facility on older kernels by setting tx_queue_len=0 (prior
1135	 * to qdisc init), and then forgot to reinit tx_queue_len
1136	 * before again attaching a qdisc.
1137	 */
1138	if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1139		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1140		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1141	}
1142
1143	err = qdisc_block_indexes_set(sch, tca, extack);
1144	if (err)
1145		goto err_out3;
1146
1147	if (ops->init) {
1148		err = ops->init(sch, tca[TCA_OPTIONS], extack);
1149		if (err != 0)
1150			goto err_out5;
1151	}
1152
1153	if (tca[TCA_STAB]) {
1154		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1155		if (IS_ERR(stab)) {
1156			err = PTR_ERR(stab);
1157			goto err_out4;
1158		}
1159		rcu_assign_pointer(sch->stab, stab);
1160	}
1161	if (tca[TCA_RATE]) {
1162		seqcount_t *running;
1163
1164		err = -EOPNOTSUPP;
1165		if (sch->flags & TCQ_F_MQROOT) {
1166			NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1167			goto err_out4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1168		}
1169
1170		if (sch->parent != TC_H_ROOT &&
1171		    !(sch->flags & TCQ_F_INGRESS) &&
1172		    (!p || !(p->flags & TCQ_F_MQROOT)))
1173			running = qdisc_root_sleeping_running(sch);
1174		else
1175			running = &sch->running;
1176
1177		err = gen_new_estimator(&sch->bstats,
1178					sch->cpu_bstats,
1179					&sch->rate_est,
1180					NULL,
1181					running,
1182					tca[TCA_RATE]);
1183		if (err) {
1184			NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1185			goto err_out4;
1186		}
1187	}
1188
1189	qdisc_hash_add(sch, false);
1190
1191	return sch;
1192
1193err_out5:
1194	/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1195	if (ops->destroy)
1196		ops->destroy(sch);
1197err_out3:
1198	dev_put(dev);
1199	qdisc_free(sch);
1200err_out2:
1201	module_put(ops->owner);
1202err_out:
1203	*errp = err;
1204	return NULL;
1205
1206err_out4:
 
 
1207	/*
1208	 * Any broken qdiscs that would require a ops->reset() here?
1209	 * The qdisc was never in action so it shouldn't be necessary.
1210	 */
1211	qdisc_put_stab(rtnl_dereference(sch->stab));
1212	if (ops->destroy)
1213		ops->destroy(sch);
1214	goto err_out3;
1215}
1216
1217static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1218			struct netlink_ext_ack *extack)
1219{
1220	struct qdisc_size_table *ostab, *stab = NULL;
1221	int err = 0;
1222
1223	if (tca[TCA_OPTIONS]) {
1224		if (!sch->ops->change) {
1225			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1226			return -EINVAL;
1227		}
1228		if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1229			NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1230			return -EOPNOTSUPP;
1231		}
1232		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1233		if (err)
1234			return err;
1235	}
1236
1237	if (tca[TCA_STAB]) {
1238		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1239		if (IS_ERR(stab))
1240			return PTR_ERR(stab);
1241	}
1242
1243	ostab = rtnl_dereference(sch->stab);
1244	rcu_assign_pointer(sch->stab, stab);
1245	qdisc_put_stab(ostab);
1246
1247	if (tca[TCA_RATE]) {
1248		/* NB: ignores errors from replace_estimator
1249		   because change can't be undone. */
1250		if (sch->flags & TCQ_F_MQROOT)
1251			goto out;
1252		gen_replace_estimator(&sch->bstats,
1253				      sch->cpu_bstats,
1254				      &sch->rate_est,
1255				      NULL,
1256				      qdisc_root_sleeping_running(sch),
1257				      tca[TCA_RATE]);
1258	}
1259out:
1260	return 0;
1261}
1262
1263struct check_loop_arg {
1264	struct qdisc_walker	w;
1265	struct Qdisc		*p;
1266	int			depth;
1267};
1268
1269static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1270			 struct qdisc_walker *w);
1271
1272static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1273{
1274	struct check_loop_arg	arg;
1275
1276	if (q->ops->cl_ops == NULL)
1277		return 0;
1278
1279	arg.w.stop = arg.w.skip = arg.w.count = 0;
1280	arg.w.fn = check_loop_fn;
1281	arg.depth = depth;
1282	arg.p = p;
1283	q->ops->cl_ops->walk(q, &arg.w);
1284	return arg.w.stop ? -ELOOP : 0;
1285}
1286
1287static int
1288check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1289{
1290	struct Qdisc *leaf;
1291	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1292	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1293
1294	leaf = cops->leaf(q, cl);
1295	if (leaf) {
1296		if (leaf == arg->p || arg->depth > 7)
1297			return -ELOOP;
1298		return check_loop(leaf, arg->p, arg->depth + 1);
1299	}
1300	return 0;
1301}
1302
1303/*
1304 * Delete/get qdisc.
1305 */
1306
1307static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1308			struct netlink_ext_ack *extack)
1309{
1310	struct net *net = sock_net(skb->sk);
1311	struct tcmsg *tcm = nlmsg_data(n);
1312	struct nlattr *tca[TCA_MAX + 1];
1313	struct net_device *dev;
1314	u32 clid;
1315	struct Qdisc *q = NULL;
1316	struct Qdisc *p = NULL;
1317	int err;
1318
1319	if ((n->nlmsg_type != RTM_GETQDISC) &&
1320	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1321		return -EPERM;
1322
1323	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1324	if (err < 0)
1325		return err;
1326
1327	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1328	if (!dev)
1329		return -ENODEV;
1330
1331	clid = tcm->tcm_parent;
1332	if (clid) {
1333		if (clid != TC_H_ROOT) {
1334			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1335				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1336				if (!p) {
1337					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1338					return -ENOENT;
1339				}
1340				q = qdisc_leaf(p, clid);
1341			} else if (dev_ingress_queue(dev)) {
1342				q = dev_ingress_queue(dev)->qdisc_sleeping;
1343			}
1344		} else {
1345			q = dev->qdisc;
1346		}
1347		if (!q) {
1348			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1349			return -ENOENT;
1350		}
1351
1352		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1353			NL_SET_ERR_MSG(extack, "Invalid handle");
1354			return -EINVAL;
1355		}
1356	} else {
1357		q = qdisc_lookup(dev, tcm->tcm_handle);
1358		if (!q) {
1359			NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1360			return -ENOENT;
1361		}
1362	}
1363
1364	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1365		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1366		return -EINVAL;
1367	}
1368
1369	if (n->nlmsg_type == RTM_DELQDISC) {
1370		if (!clid) {
1371			NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1372			return -EINVAL;
1373		}
1374		if (q->handle == 0) {
1375			NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1376			return -ENOENT;
1377		}
1378		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1379		if (err != 0)
1380			return err;
1381	} else {
1382		qdisc_notify(net, skb, n, clid, NULL, q);
1383	}
1384	return 0;
1385}
1386
1387/*
1388 * Create/change qdisc.
1389 */
1390
1391static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1392			   struct netlink_ext_ack *extack)
1393{
1394	struct net *net = sock_net(skb->sk);
1395	struct tcmsg *tcm;
1396	struct nlattr *tca[TCA_MAX + 1];
1397	struct net_device *dev;
1398	u32 clid;
1399	struct Qdisc *q, *p;
1400	int err;
1401
1402	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1403		return -EPERM;
1404
1405replay:
1406	/* Reinit, just in case something touches this. */
1407	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1408	if (err < 0)
1409		return err;
1410
1411	tcm = nlmsg_data(n);
1412	clid = tcm->tcm_parent;
1413	q = p = NULL;
1414
1415	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1416	if (!dev)
1417		return -ENODEV;
1418
1419
1420	if (clid) {
1421		if (clid != TC_H_ROOT) {
1422			if (clid != TC_H_INGRESS) {
1423				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1424				if (!p) {
1425					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1426					return -ENOENT;
1427				}
1428				q = qdisc_leaf(p, clid);
1429			} else if (dev_ingress_queue_create(dev)) {
1430				q = dev_ingress_queue(dev)->qdisc_sleeping;
1431			}
1432		} else {
1433			q = dev->qdisc;
1434		}
1435
1436		/* It may be default qdisc, ignore it */
1437		if (q && q->handle == 0)
1438			q = NULL;
1439
1440		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1441			if (tcm->tcm_handle) {
1442				if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1443					NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1444					return -EEXIST;
1445				}
1446				if (TC_H_MIN(tcm->tcm_handle)) {
1447					NL_SET_ERR_MSG(extack, "Invalid minor handle");
1448					return -EINVAL;
1449				}
1450				q = qdisc_lookup(dev, tcm->tcm_handle);
1451				if (!q)
1452					goto create_n_graft;
1453				if (n->nlmsg_flags & NLM_F_EXCL) {
1454					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1455					return -EEXIST;
1456				}
1457				if (tca[TCA_KIND] &&
1458				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1459					NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1460					return -EINVAL;
1461				}
1462				if (q == p ||
1463				    (p && check_loop(q, p, 0))) {
1464					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1465					return -ELOOP;
1466				}
1467				qdisc_refcount_inc(q);
1468				goto graft;
1469			} else {
1470				if (!q)
1471					goto create_n_graft;
1472
1473				/* This magic test requires explanation.
1474				 *
1475				 *   We know, that some child q is already
1476				 *   attached to this parent and have choice:
1477				 *   either to change it or to create/graft new one.
1478				 *
1479				 *   1. We are allowed to create/graft only
1480				 *   if CREATE and REPLACE flags are set.
1481				 *
1482				 *   2. If EXCL is set, requestor wanted to say,
1483				 *   that qdisc tcm_handle is not expected
1484				 *   to exist, so that we choose create/graft too.
1485				 *
1486				 *   3. The last case is when no flags are set.
1487				 *   Alas, it is sort of hole in API, we
1488				 *   cannot decide what to do unambiguously.
1489				 *   For now we select create/graft, if
1490				 *   user gave KIND, which does not match existing.
1491				 */
1492				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1493				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1494				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1495				     (tca[TCA_KIND] &&
1496				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1497					goto create_n_graft;
1498			}
1499		}
1500	} else {
1501		if (!tcm->tcm_handle) {
1502			NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1503			return -EINVAL;
1504		}
1505		q = qdisc_lookup(dev, tcm->tcm_handle);
1506	}
1507
1508	/* Change qdisc parameters */
1509	if (!q) {
1510		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1511		return -ENOENT;
1512	}
1513	if (n->nlmsg_flags & NLM_F_EXCL) {
1514		NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1515		return -EEXIST;
1516	}
1517	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1518		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1519		return -EINVAL;
1520	}
1521	err = qdisc_change(q, tca, extack);
1522	if (err == 0)
1523		qdisc_notify(net, skb, n, clid, NULL, q);
1524	return err;
1525
1526create_n_graft:
1527	if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1528		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1529		return -ENOENT;
1530	}
1531	if (clid == TC_H_INGRESS) {
1532		if (dev_ingress_queue(dev)) {
1533			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1534					 tcm->tcm_parent, tcm->tcm_parent,
1535					 tca, &err, extack);
1536		} else {
1537			NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1538			err = -ENOENT;
1539		}
1540	} else {
1541		struct netdev_queue *dev_queue;
1542
1543		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1544			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1545		else if (p)
1546			dev_queue = p->dev_queue;
1547		else
1548			dev_queue = netdev_get_tx_queue(dev, 0);
1549
1550		q = qdisc_create(dev, dev_queue, p,
1551				 tcm->tcm_parent, tcm->tcm_handle,
1552				 tca, &err, extack);
1553	}
1554	if (q == NULL) {
1555		if (err == -EAGAIN)
1556			goto replay;
1557		return err;
1558	}
1559
1560graft:
1561	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1562	if (err) {
1563		if (q)
1564			qdisc_destroy(q);
1565		return err;
1566	}
1567
1568	return 0;
1569}
1570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1571static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1572			      struct netlink_callback *cb,
1573			      int *q_idx_p, int s_q_idx, bool recur,
1574			      bool dump_invisible)
1575{
1576	int ret = 0, q_idx = *q_idx_p;
1577	struct Qdisc *q;
1578	int b;
1579
1580	if (!root)
1581		return 0;
1582
1583	q = root;
1584	if (q_idx < s_q_idx) {
1585		q_idx++;
1586	} else {
1587		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1588		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1589				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1590				  RTM_NEWQDISC) <= 0)
1591			goto done;
1592		q_idx++;
1593	}
1594
1595	/* If dumping singletons, there is no qdisc_dev(root) and the singleton
1596	 * itself has already been dumped.
1597	 *
1598	 * If we've already dumped the top-level (ingress) qdisc above and the global
1599	 * qdisc hashtable, we don't want to hit it again
1600	 */
1601	if (!qdisc_dev(root) || !recur)
1602		goto out;
1603
1604	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1605		if (q_idx < s_q_idx) {
1606			q_idx++;
1607			continue;
1608		}
1609		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1610		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1611				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1612				  RTM_NEWQDISC) <= 0)
1613			goto done;
1614		q_idx++;
1615	}
1616
1617out:
1618	*q_idx_p = q_idx;
1619	return ret;
1620done:
1621	ret = -1;
1622	goto out;
1623}
1624
1625static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1626{
1627	struct net *net = sock_net(skb->sk);
1628	int idx, q_idx;
1629	int s_idx, s_q_idx;
1630	struct net_device *dev;
1631	const struct nlmsghdr *nlh = cb->nlh;
1632	struct nlattr *tca[TCA_MAX + 1];
1633	int err;
1634
1635	s_idx = cb->args[0];
1636	s_q_idx = q_idx = cb->args[1];
1637
1638	idx = 0;
1639	ASSERT_RTNL();
1640
1641	err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
1642	if (err < 0)
1643		return err;
1644
1645	for_each_netdev(net, dev) {
1646		struct netdev_queue *dev_queue;
1647
1648		if (idx < s_idx)
1649			goto cont;
1650		if (idx > s_idx)
1651			s_q_idx = 0;
1652		q_idx = 0;
1653
1654		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1655				       true, tca[TCA_DUMP_INVISIBLE]) < 0)
1656			goto done;
1657
1658		dev_queue = dev_ingress_queue(dev);
1659		if (dev_queue &&
1660		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1661				       &q_idx, s_q_idx, false,
1662				       tca[TCA_DUMP_INVISIBLE]) < 0)
1663			goto done;
1664
1665cont:
1666		idx++;
1667	}
1668
1669done:
1670	cb->args[0] = idx;
1671	cb->args[1] = q_idx;
1672
1673	return skb->len;
1674}
1675
1676
1677
1678/************************************************
1679 *	Traffic classes manipulation.		*
1680 ************************************************/
1681
1682static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1683			  unsigned long cl,
1684			  u32 portid, u32 seq, u16 flags, int event)
1685{
1686	struct tcmsg *tcm;
1687	struct nlmsghdr  *nlh;
1688	unsigned char *b = skb_tail_pointer(skb);
1689	struct gnet_dump d;
1690	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1691
1692	cond_resched();
1693	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1694	if (!nlh)
1695		goto out_nlmsg_trim;
1696	tcm = nlmsg_data(nlh);
1697	tcm->tcm_family = AF_UNSPEC;
1698	tcm->tcm__pad1 = 0;
1699	tcm->tcm__pad2 = 0;
1700	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1701	tcm->tcm_parent = q->handle;
1702	tcm->tcm_handle = q->handle;
1703	tcm->tcm_info = 0;
1704	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1705		goto nla_put_failure;
1706	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1707		goto nla_put_failure;
1708
1709	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1710					 NULL, &d, TCA_PAD) < 0)
1711		goto nla_put_failure;
1712
1713	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1714		goto nla_put_failure;
1715
1716	if (gnet_stats_finish_copy(&d) < 0)
1717		goto nla_put_failure;
1718
1719	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1720	return skb->len;
1721
1722out_nlmsg_trim:
1723nla_put_failure:
1724	nlmsg_trim(skb, b);
1725	return -1;
1726}
1727
1728static int tclass_notify(struct net *net, struct sk_buff *oskb,
1729			 struct nlmsghdr *n, struct Qdisc *q,
1730			 unsigned long cl, int event)
1731{
1732	struct sk_buff *skb;
1733	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1734
1735	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1736	if (!skb)
1737		return -ENOBUFS;
1738
1739	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1740		kfree_skb(skb);
1741		return -EINVAL;
1742	}
1743
1744	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1745			      n->nlmsg_flags & NLM_F_ECHO);
1746}
1747
1748static int tclass_del_notify(struct net *net,
1749			     const struct Qdisc_class_ops *cops,
1750			     struct sk_buff *oskb, struct nlmsghdr *n,
1751			     struct Qdisc *q, unsigned long cl)
1752{
1753	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1754	struct sk_buff *skb;
1755	int err = 0;
1756
1757	if (!cops->delete)
1758		return -EOPNOTSUPP;
1759
1760	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1761	if (!skb)
1762		return -ENOBUFS;
1763
1764	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1765			   RTM_DELTCLASS) < 0) {
1766		kfree_skb(skb);
1767		return -EINVAL;
1768	}
1769
1770	err = cops->delete(q, cl);
1771	if (err) {
1772		kfree_skb(skb);
1773		return err;
1774	}
1775
1776	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1777			      n->nlmsg_flags & NLM_F_ECHO);
1778}
1779
1780#ifdef CONFIG_NET_CLS
1781
1782struct tcf_bind_args {
1783	struct tcf_walker w;
1784	u32 classid;
1785	unsigned long cl;
1786};
1787
1788static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1789{
1790	struct tcf_bind_args *a = (void *)arg;
1791
1792	if (tp->ops->bind_class) {
1793		struct Qdisc *q = tcf_block_q(tp->chain->block);
1794
1795		sch_tree_lock(q);
1796		tp->ops->bind_class(n, a->classid, a->cl);
1797		sch_tree_unlock(q);
1798	}
1799	return 0;
1800}
1801
1802static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1803			   unsigned long new_cl)
1804{
1805	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1806	struct tcf_block *block;
1807	struct tcf_chain *chain;
1808	unsigned long cl;
1809
1810	cl = cops->find(q, portid);
1811	if (!cl)
1812		return;
1813	block = cops->tcf_block(q, cl, NULL);
1814	if (!block)
1815		return;
1816	list_for_each_entry(chain, &block->chain_list, list) {
1817		struct tcf_proto *tp;
1818
1819		for (tp = rtnl_dereference(chain->filter_chain);
1820		     tp; tp = rtnl_dereference(tp->next)) {
1821			struct tcf_bind_args arg = {};
1822
1823			arg.w.fn = tcf_node_bind;
1824			arg.classid = clid;
1825			arg.cl = new_cl;
1826			tp->ops->walk(tp, &arg.w);
1827		}
1828	}
1829}
1830
1831#else
1832
1833static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1834			   unsigned long new_cl)
1835{
1836}
1837
1838#endif
1839
1840static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1841			 struct netlink_ext_ack *extack)
1842{
1843	struct net *net = sock_net(skb->sk);
1844	struct tcmsg *tcm = nlmsg_data(n);
1845	struct nlattr *tca[TCA_MAX + 1];
1846	struct net_device *dev;
1847	struct Qdisc *q = NULL;
1848	const struct Qdisc_class_ops *cops;
1849	unsigned long cl = 0;
1850	unsigned long new_cl;
1851	u32 portid;
1852	u32 clid;
1853	u32 qid;
1854	int err;
1855
1856	if ((n->nlmsg_type != RTM_GETTCLASS) &&
1857	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1858		return -EPERM;
1859
1860	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1861	if (err < 0)
1862		return err;
1863
1864	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1865	if (!dev)
1866		return -ENODEV;
1867
1868	/*
1869	   parent == TC_H_UNSPEC - unspecified parent.
1870	   parent == TC_H_ROOT   - class is root, which has no parent.
1871	   parent == X:0	 - parent is root class.
1872	   parent == X:Y	 - parent is a node in hierarchy.
1873	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1874
1875	   handle == 0:0	 - generate handle from kernel pool.
1876	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1877	   handle == X:Y	 - clear.
1878	   handle == X:0	 - root class.
1879	 */
1880
1881	/* Step 1. Determine qdisc handle X:0 */
1882
1883	portid = tcm->tcm_parent;
1884	clid = tcm->tcm_handle;
1885	qid = TC_H_MAJ(clid);
1886
1887	if (portid != TC_H_ROOT) {
1888		u32 qid1 = TC_H_MAJ(portid);
1889
1890		if (qid && qid1) {
1891			/* If both majors are known, they must be identical. */
1892			if (qid != qid1)
1893				return -EINVAL;
1894		} else if (qid1) {
1895			qid = qid1;
1896		} else if (qid == 0)
1897			qid = dev->qdisc->handle;
1898
1899		/* Now qid is genuine qdisc handle consistent
1900		 * both with parent and child.
1901		 *
1902		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1903		 */
1904		if (portid)
1905			portid = TC_H_MAKE(qid, portid);
1906	} else {
1907		if (qid == 0)
1908			qid = dev->qdisc->handle;
1909	}
1910
1911	/* OK. Locate qdisc */
1912	q = qdisc_lookup(dev, qid);
1913	if (!q)
1914		return -ENOENT;
1915
1916	/* An check that it supports classes */
1917	cops = q->ops->cl_ops;
1918	if (cops == NULL)
1919		return -EINVAL;
1920
1921	/* Now try to get class */
1922	if (clid == 0) {
1923		if (portid == TC_H_ROOT)
1924			clid = qid;
1925	} else
1926		clid = TC_H_MAKE(qid, clid);
1927
1928	if (clid)
1929		cl = cops->find(q, clid);
1930
1931	if (cl == 0) {
1932		err = -ENOENT;
1933		if (n->nlmsg_type != RTM_NEWTCLASS ||
1934		    !(n->nlmsg_flags & NLM_F_CREATE))
1935			goto out;
1936	} else {
1937		switch (n->nlmsg_type) {
1938		case RTM_NEWTCLASS:
1939			err = -EEXIST;
1940			if (n->nlmsg_flags & NLM_F_EXCL)
1941				goto out;
1942			break;
1943		case RTM_DELTCLASS:
1944			err = tclass_del_notify(net, cops, skb, n, q, cl);
1945			/* Unbind the class with flilters with 0 */
1946			tc_bind_tclass(q, portid, clid, 0);
 
 
1947			goto out;
1948		case RTM_GETTCLASS:
1949			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1950			goto out;
1951		default:
1952			err = -EINVAL;
1953			goto out;
1954		}
1955	}
1956
1957	if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1958		NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
1959		return -EOPNOTSUPP;
1960	}
1961
1962	new_cl = cl;
1963	err = -EOPNOTSUPP;
1964	if (cops->change)
1965		err = cops->change(q, clid, portid, tca, &new_cl, extack);
1966	if (err == 0) {
1967		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1968		/* We just create a new class, need to do reverse binding. */
1969		if (cl != new_cl)
1970			tc_bind_tclass(q, portid, clid, new_cl);
1971	}
1972out:
 
 
 
1973	return err;
1974}
1975
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1976struct qdisc_dump_args {
1977	struct qdisc_walker	w;
1978	struct sk_buff		*skb;
1979	struct netlink_callback	*cb;
1980};
1981
1982static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
1983			    struct qdisc_walker *arg)
1984{
1985	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1986
1987	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1988			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1989			      RTM_NEWTCLASS);
1990}
1991
1992static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1993				struct tcmsg *tcm, struct netlink_callback *cb,
1994				int *t_p, int s_t)
1995{
1996	struct qdisc_dump_args arg;
1997
1998	if (tc_qdisc_dump_ignore(q, false) ||
1999	    *t_p < s_t || !q->ops->cl_ops ||
2000	    (tcm->tcm_parent &&
2001	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2002		(*t_p)++;
2003		return 0;
2004	}
2005	if (*t_p > s_t)
2006		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2007	arg.w.fn = qdisc_class_dump;
2008	arg.skb = skb;
2009	arg.cb = cb;
2010	arg.w.stop  = 0;
2011	arg.w.skip = cb->args[1];
2012	arg.w.count = 0;
2013	q->ops->cl_ops->walk(q, &arg.w);
2014	cb->args[1] = arg.w.count;
2015	if (arg.w.stop)
2016		return -1;
2017	(*t_p)++;
2018	return 0;
2019}
2020
2021static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2022			       struct tcmsg *tcm, struct netlink_callback *cb,
2023			       int *t_p, int s_t)
2024{
2025	struct Qdisc *q;
2026	int b;
2027
2028	if (!root)
2029		return 0;
2030
2031	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2032		return -1;
2033
2034	if (!qdisc_dev(root))
2035		return 0;
2036
2037	if (tcm->tcm_parent) {
2038		q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2039		if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2040			return -1;
2041		return 0;
2042	}
2043	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2044		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2045			return -1;
2046	}
2047
2048	return 0;
2049}
2050
2051static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2052{
2053	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2054	struct net *net = sock_net(skb->sk);
2055	struct netdev_queue *dev_queue;
2056	struct net_device *dev;
2057	int t, s_t;
2058
2059	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2060		return 0;
2061	dev = dev_get_by_index(net, tcm->tcm_ifindex);
2062	if (!dev)
2063		return 0;
2064
2065	s_t = cb->args[0];
2066	t = 0;
2067
2068	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
2069		goto done;
2070
2071	dev_queue = dev_ingress_queue(dev);
2072	if (dev_queue &&
2073	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2074				&t, s_t) < 0)
2075		goto done;
2076
2077done:
2078	cb->args[0] = t;
2079
2080	dev_put(dev);
2081	return skb->len;
2082}
2083
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2084#ifdef CONFIG_PROC_FS
2085static int psched_show(struct seq_file *seq, void *v)
2086{
2087	seq_printf(seq, "%08x %08x %08x %08x\n",
2088		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2089		   1000000,
2090		   (u32)NSEC_PER_SEC / hrtimer_resolution);
2091
2092	return 0;
2093}
2094
2095static int psched_open(struct inode *inode, struct file *file)
2096{
2097	return single_open(file, psched_show, NULL);
2098}
2099
2100static const struct file_operations psched_fops = {
 
2101	.open = psched_open,
2102	.read  = seq_read,
2103	.llseek = seq_lseek,
2104	.release = single_release,
2105};
2106
2107static int __net_init psched_net_init(struct net *net)
2108{
2109	struct proc_dir_entry *e;
2110
2111	e = proc_create("psched", 0, net->proc_net, &psched_fops);
2112	if (e == NULL)
2113		return -ENOMEM;
2114
2115	return 0;
2116}
2117
2118static void __net_exit psched_net_exit(struct net *net)
2119{
2120	remove_proc_entry("psched", net->proc_net);
2121}
2122#else
2123static int __net_init psched_net_init(struct net *net)
2124{
2125	return 0;
2126}
2127
2128static void __net_exit psched_net_exit(struct net *net)
2129{
2130}
2131#endif
2132
2133static struct pernet_operations psched_net_ops = {
2134	.init = psched_net_init,
2135	.exit = psched_net_exit,
2136};
2137
2138static int __init pktsched_init(void)
2139{
2140	int err;
2141
2142	err = register_pernet_subsys(&psched_net_ops);
2143	if (err) {
2144		pr_err("pktsched_init: "
2145		       "cannot initialize per netns operations\n");
2146		return err;
2147	}
2148
2149	register_qdisc(&pfifo_fast_ops);
2150	register_qdisc(&pfifo_qdisc_ops);
2151	register_qdisc(&bfifo_qdisc_ops);
2152	register_qdisc(&pfifo_head_drop_qdisc_ops);
2153	register_qdisc(&mq_qdisc_ops);
2154	register_qdisc(&noqueue_qdisc_ops);
2155
2156	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2157	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2158	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2159		      0);
2160	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2161	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2162	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2163		      0);
2164
2165	return 0;
2166}
2167
2168subsys_initcall(pktsched_init);
v4.6
   1/*
   2 * net/sched/sch_api.c	Packet scheduler API.
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 *
  11 * Fixes:
  12 *
  13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
  14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/string.h>
  22#include <linux/errno.h>
  23#include <linux/skbuff.h>
  24#include <linux/init.h>
  25#include <linux/proc_fs.h>
  26#include <linux/seq_file.h>
  27#include <linux/kmod.h>
  28#include <linux/list.h>
  29#include <linux/hrtimer.h>
  30#include <linux/lockdep.h>
  31#include <linux/slab.h>
 
  32
  33#include <net/net_namespace.h>
  34#include <net/sock.h>
  35#include <net/netlink.h>
  36#include <net/pkt_sched.h>
  37
  38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
  39			struct nlmsghdr *n, u32 clid,
  40			struct Qdisc *old, struct Qdisc *new);
  41static int tclass_notify(struct net *net, struct sk_buff *oskb,
  42			 struct nlmsghdr *n, struct Qdisc *q,
  43			 unsigned long cl, int event);
  44
  45/*
  46
  47   Short review.
  48   -------------
  49
  50   This file consists of two interrelated parts:
  51
  52   1. queueing disciplines manager frontend.
  53   2. traffic classes manager frontend.
  54
  55   Generally, queueing discipline ("qdisc") is a black box,
  56   which is able to enqueue packets and to dequeue them (when
  57   device is ready to send something) in order and at times
  58   determined by algorithm hidden in it.
  59
  60   qdisc's are divided to two categories:
  61   - "queues", which have no internal structure visible from outside.
  62   - "schedulers", which split all the packets to "traffic classes",
  63     using "packet classifiers" (look at cls_api.c)
  64
  65   In turn, classes may have child qdiscs (as rule, queues)
  66   attached to them etc. etc. etc.
  67
  68   The goal of the routines in this file is to translate
  69   information supplied by user in the form of handles
  70   to more intelligible for kernel form, to make some sanity
  71   checks and part of work, which is common to all qdiscs
  72   and to provide rtnetlink notifications.
  73
  74   All real intelligent work is done inside qdisc modules.
  75
  76
  77
  78   Every discipline has two major routines: enqueue and dequeue.
  79
  80   ---dequeue
  81
  82   dequeue usually returns a skb to send. It is allowed to return NULL,
  83   but it does not mean that queue is empty, it just means that
  84   discipline does not want to send anything this time.
  85   Queue is really empty if q->q.qlen == 0.
  86   For complicated disciplines with multiple queues q->q is not
  87   real packet queue, but however q->q.qlen must be valid.
  88
  89   ---enqueue
  90
  91   enqueue returns 0, if packet was enqueued successfully.
  92   If packet (this one or another one) was dropped, it returns
  93   not zero error code.
  94   NET_XMIT_DROP 	- this packet dropped
  95     Expected action: do not backoff, but wait until queue will clear.
  96   NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
  97     Expected action: backoff or ignore
  98   NET_XMIT_POLICED	- dropped by police.
  99     Expected action: backoff or error to real-time apps.
 100
 101   Auxiliary routines:
 102
 103   ---peek
 104
 105   like dequeue but without removing a packet from the queue
 106
 107   ---reset
 108
 109   returns qdisc to initial state: purge all buffers, clear all
 110   timers, counters (except for statistics) etc.
 111
 112   ---init
 113
 114   initializes newly created qdisc.
 115
 116   ---destroy
 117
 118   destroys resources allocated by init and during lifetime of qdisc.
 119
 120   ---change
 121
 122   changes qdisc parameters.
 123 */
 124
 125/* Protects list of registered TC modules. It is pure SMP lock. */
 126static DEFINE_RWLOCK(qdisc_mod_lock);
 127
 128
 129/************************************************
 130 *	Queueing disciplines manipulation.	*
 131 ************************************************/
 132
 133
 134/* The list of all installed queueing disciplines. */
 135
 136static struct Qdisc_ops *qdisc_base;
 137
 138/* Register/unregister queueing discipline */
 139
 140int register_qdisc(struct Qdisc_ops *qops)
 141{
 142	struct Qdisc_ops *q, **qp;
 143	int rc = -EEXIST;
 144
 145	write_lock(&qdisc_mod_lock);
 146	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 147		if (!strcmp(qops->id, q->id))
 148			goto out;
 149
 150	if (qops->enqueue == NULL)
 151		qops->enqueue = noop_qdisc_ops.enqueue;
 152	if (qops->peek == NULL) {
 153		if (qops->dequeue == NULL)
 154			qops->peek = noop_qdisc_ops.peek;
 155		else
 156			goto out_einval;
 157	}
 158	if (qops->dequeue == NULL)
 159		qops->dequeue = noop_qdisc_ops.dequeue;
 160
 161	if (qops->cl_ops) {
 162		const struct Qdisc_class_ops *cops = qops->cl_ops;
 163
 164		if (!(cops->get && cops->put && cops->walk && cops->leaf))
 165			goto out_einval;
 166
 167		if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
 168			goto out_einval;
 169	}
 170
 171	qops->next = NULL;
 172	*qp = qops;
 173	rc = 0;
 174out:
 175	write_unlock(&qdisc_mod_lock);
 176	return rc;
 177
 178out_einval:
 179	rc = -EINVAL;
 180	goto out;
 181}
 182EXPORT_SYMBOL(register_qdisc);
 183
 184int unregister_qdisc(struct Qdisc_ops *qops)
 185{
 186	struct Qdisc_ops *q, **qp;
 187	int err = -ENOENT;
 188
 189	write_lock(&qdisc_mod_lock);
 190	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 191		if (q == qops)
 192			break;
 193	if (q) {
 194		*qp = q->next;
 195		q->next = NULL;
 196		err = 0;
 197	}
 198	write_unlock(&qdisc_mod_lock);
 199	return err;
 200}
 201EXPORT_SYMBOL(unregister_qdisc);
 202
 203/* Get default qdisc if not otherwise specified */
 204void qdisc_get_default(char *name, size_t len)
 205{
 206	read_lock(&qdisc_mod_lock);
 207	strlcpy(name, default_qdisc_ops->id, len);
 208	read_unlock(&qdisc_mod_lock);
 209}
 210
 211static struct Qdisc_ops *qdisc_lookup_default(const char *name)
 212{
 213	struct Qdisc_ops *q = NULL;
 214
 215	for (q = qdisc_base; q; q = q->next) {
 216		if (!strcmp(name, q->id)) {
 217			if (!try_module_get(q->owner))
 218				q = NULL;
 219			break;
 220		}
 221	}
 222
 223	return q;
 224}
 225
 226/* Set new default qdisc to use */
 227int qdisc_set_default(const char *name)
 228{
 229	const struct Qdisc_ops *ops;
 230
 231	if (!capable(CAP_NET_ADMIN))
 232		return -EPERM;
 233
 234	write_lock(&qdisc_mod_lock);
 235	ops = qdisc_lookup_default(name);
 236	if (!ops) {
 237		/* Not found, drop lock and try to load module */
 238		write_unlock(&qdisc_mod_lock);
 239		request_module("sch_%s", name);
 240		write_lock(&qdisc_mod_lock);
 241
 242		ops = qdisc_lookup_default(name);
 243	}
 244
 245	if (ops) {
 246		/* Set new default */
 247		module_put(default_qdisc_ops->owner);
 248		default_qdisc_ops = ops;
 249	}
 250	write_unlock(&qdisc_mod_lock);
 251
 252	return ops ? 0 : -ENOENT;
 253}
 254
 
 
 
 
 
 
 
 
 
 255/* We know handle. Find qdisc among all qdisc's attached to device
 256 * (root qdisc, all its children, children of children etc.)
 257 * Note: caller either uses rtnl or rcu_read_lock()
 258 */
 259
 260static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 261{
 262	struct Qdisc *q;
 263
 
 
 
 264	if (!(root->flags & TCQ_F_BUILTIN) &&
 265	    root->handle == handle)
 266		return root;
 267
 268	list_for_each_entry_rcu(q, &root->list, list) {
 269		if (q->handle == handle)
 270			return q;
 271	}
 272	return NULL;
 273}
 274
 275void qdisc_list_add(struct Qdisc *q)
 276{
 277	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
 278		struct Qdisc *root = qdisc_dev(q)->qdisc;
 279
 280		WARN_ON_ONCE(root == &noop_qdisc);
 281		ASSERT_RTNL();
 282		list_add_tail_rcu(&q->list, &root->list);
 
 
 283	}
 284}
 285EXPORT_SYMBOL(qdisc_list_add);
 286
 287void qdisc_list_del(struct Qdisc *q)
 288{
 289	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
 290		ASSERT_RTNL();
 291		list_del_rcu(&q->list);
 292	}
 293}
 294EXPORT_SYMBOL(qdisc_list_del);
 295
 296struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 297{
 298	struct Qdisc *q;
 299
 
 
 300	q = qdisc_match_from_root(dev->qdisc, handle);
 301	if (q)
 302		goto out;
 303
 304	if (dev_ingress_queue(dev))
 305		q = qdisc_match_from_root(
 306			dev_ingress_queue(dev)->qdisc_sleeping,
 307			handle);
 308out:
 309	return q;
 310}
 311
 312static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 313{
 314	unsigned long cl;
 315	struct Qdisc *leaf;
 316	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
 317
 318	if (cops == NULL)
 319		return NULL;
 320	cl = cops->get(p, classid);
 321
 322	if (cl == 0)
 323		return NULL;
 324	leaf = cops->leaf(p, cl);
 325	cops->put(p, cl);
 326	return leaf;
 327}
 328
 329/* Find queueing discipline by name */
 330
 331static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
 332{
 333	struct Qdisc_ops *q = NULL;
 334
 335	if (kind) {
 336		read_lock(&qdisc_mod_lock);
 337		for (q = qdisc_base; q; q = q->next) {
 338			if (nla_strcmp(kind, q->id) == 0) {
 339				if (!try_module_get(q->owner))
 340					q = NULL;
 341				break;
 342			}
 343		}
 344		read_unlock(&qdisc_mod_lock);
 345	}
 346	return q;
 347}
 348
 349/* The linklayer setting were not transferred from iproute2, in older
 350 * versions, and the rate tables lookup systems have been dropped in
 351 * the kernel. To keep backward compatible with older iproute2 tc
 352 * utils, we detect the linklayer setting by detecting if the rate
 353 * table were modified.
 354 *
 355 * For linklayer ATM table entries, the rate table will be aligned to
 356 * 48 bytes, thus some table entries will contain the same value.  The
 357 * mpu (min packet unit) is also encoded into the old rate table, thus
 358 * starting from the mpu, we find low and high table entries for
 359 * mapping this cell.  If these entries contain the same value, when
 360 * the rate tables have been modified for linklayer ATM.
 361 *
 362 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
 363 * and then roundup to the next cell, calc the table entry one below,
 364 * and compare.
 365 */
 366static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 367{
 368	int low       = roundup(r->mpu, 48);
 369	int high      = roundup(low+1, 48);
 370	int cell_low  = low >> r->cell_log;
 371	int cell_high = (high >> r->cell_log) - 1;
 372
 373	/* rtab is too inaccurate at rates > 100Mbit/s */
 374	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
 375		pr_debug("TC linklayer: Giving up ATM detection\n");
 376		return TC_LINKLAYER_ETHERNET;
 377	}
 378
 379	if ((cell_high > cell_low) && (cell_high < 256)
 380	    && (rtab[cell_low] == rtab[cell_high])) {
 381		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
 382			 cell_low, cell_high, rtab[cell_high]);
 383		return TC_LINKLAYER_ATM;
 384	}
 385	return TC_LINKLAYER_ETHERNET;
 386}
 387
 388static struct qdisc_rate_table *qdisc_rtab_list;
 389
 390struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
 
 
 391{
 392	struct qdisc_rate_table *rtab;
 393
 394	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
 395	    nla_len(tab) != TC_RTAB_SIZE)
 
 396		return NULL;
 
 397
 398	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
 399		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
 400		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
 401			rtab->refcnt++;
 402			return rtab;
 403		}
 404	}
 405
 406	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
 407	if (rtab) {
 408		rtab->rate = *r;
 409		rtab->refcnt = 1;
 410		memcpy(rtab->data, nla_data(tab), 1024);
 411		if (r->linklayer == TC_LINKLAYER_UNAWARE)
 412			r->linklayer = __detect_linklayer(r, rtab->data);
 413		rtab->next = qdisc_rtab_list;
 414		qdisc_rtab_list = rtab;
 
 
 415	}
 416	return rtab;
 417}
 418EXPORT_SYMBOL(qdisc_get_rtab);
 419
 420void qdisc_put_rtab(struct qdisc_rate_table *tab)
 421{
 422	struct qdisc_rate_table *rtab, **rtabp;
 423
 424	if (!tab || --tab->refcnt)
 425		return;
 426
 427	for (rtabp = &qdisc_rtab_list;
 428	     (rtab = *rtabp) != NULL;
 429	     rtabp = &rtab->next) {
 430		if (rtab == tab) {
 431			*rtabp = rtab->next;
 432			kfree(rtab);
 433			return;
 434		}
 435	}
 436}
 437EXPORT_SYMBOL(qdisc_put_rtab);
 438
 439static LIST_HEAD(qdisc_stab_list);
 440static DEFINE_SPINLOCK(qdisc_stab_lock);
 441
 442static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
 443	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
 444	[TCA_STAB_DATA] = { .type = NLA_BINARY },
 445};
 446
 447static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
 
 448{
 449	struct nlattr *tb[TCA_STAB_MAX + 1];
 450	struct qdisc_size_table *stab;
 451	struct tc_sizespec *s;
 452	unsigned int tsize = 0;
 453	u16 *tab = NULL;
 454	int err;
 455
 456	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
 457	if (err < 0)
 458		return ERR_PTR(err);
 459	if (!tb[TCA_STAB_BASE])
 
 460		return ERR_PTR(-EINVAL);
 
 461
 462	s = nla_data(tb[TCA_STAB_BASE]);
 463
 464	if (s->tsize > 0) {
 465		if (!tb[TCA_STAB_DATA])
 
 466			return ERR_PTR(-EINVAL);
 
 467		tab = nla_data(tb[TCA_STAB_DATA]);
 468		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
 469	}
 470
 471	if (tsize != s->tsize || (!tab && tsize > 0))
 
 472		return ERR_PTR(-EINVAL);
 473
 474	spin_lock(&qdisc_stab_lock);
 475
 476	list_for_each_entry(stab, &qdisc_stab_list, list) {
 477		if (memcmp(&stab->szopts, s, sizeof(*s)))
 478			continue;
 479		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
 480			continue;
 481		stab->refcnt++;
 482		spin_unlock(&qdisc_stab_lock);
 483		return stab;
 484	}
 485
 486	spin_unlock(&qdisc_stab_lock);
 487
 488	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
 489	if (!stab)
 490		return ERR_PTR(-ENOMEM);
 491
 492	stab->refcnt = 1;
 493	stab->szopts = *s;
 494	if (tsize > 0)
 495		memcpy(stab->data, tab, tsize * sizeof(u16));
 496
 497	spin_lock(&qdisc_stab_lock);
 498	list_add_tail(&stab->list, &qdisc_stab_list);
 499	spin_unlock(&qdisc_stab_lock);
 500
 501	return stab;
 502}
 503
 504static void stab_kfree_rcu(struct rcu_head *head)
 505{
 506	kfree(container_of(head, struct qdisc_size_table, rcu));
 507}
 508
 509void qdisc_put_stab(struct qdisc_size_table *tab)
 510{
 511	if (!tab)
 512		return;
 513
 514	spin_lock(&qdisc_stab_lock);
 515
 516	if (--tab->refcnt == 0) {
 517		list_del(&tab->list);
 518		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
 519	}
 520
 521	spin_unlock(&qdisc_stab_lock);
 522}
 523EXPORT_SYMBOL(qdisc_put_stab);
 524
 525static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
 526{
 527	struct nlattr *nest;
 528
 529	nest = nla_nest_start(skb, TCA_STAB);
 530	if (nest == NULL)
 531		goto nla_put_failure;
 532	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
 533		goto nla_put_failure;
 534	nla_nest_end(skb, nest);
 535
 536	return skb->len;
 537
 538nla_put_failure:
 539	return -1;
 540}
 541
 542void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
 
 543{
 544	int pkt_len, slot;
 545
 546	pkt_len = skb->len + stab->szopts.overhead;
 547	if (unlikely(!stab->szopts.tsize))
 548		goto out;
 549
 550	slot = pkt_len + stab->szopts.cell_align;
 551	if (unlikely(slot < 0))
 552		slot = 0;
 553
 554	slot >>= stab->szopts.cell_log;
 555	if (likely(slot < stab->szopts.tsize))
 556		pkt_len = stab->data[slot];
 557	else
 558		pkt_len = stab->data[stab->szopts.tsize - 1] *
 559				(slot / stab->szopts.tsize) +
 560				stab->data[slot % stab->szopts.tsize];
 561
 562	pkt_len <<= stab->szopts.size_log;
 563out:
 564	if (unlikely(pkt_len < 1))
 565		pkt_len = 1;
 566	qdisc_skb_cb(skb)->pkt_len = pkt_len;
 567}
 568EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 569
 570void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
 571{
 572	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
 573		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
 574			txt, qdisc->ops->id, qdisc->handle >> 16);
 575		qdisc->flags |= TCQ_F_WARN_NONWC;
 576	}
 577}
 578EXPORT_SYMBOL(qdisc_warn_nonwc);
 579
 580static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 581{
 582	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 583						 timer);
 584
 585	rcu_read_lock();
 586	qdisc_unthrottled(wd->qdisc);
 587	__netif_schedule(qdisc_root(wd->qdisc));
 588	rcu_read_unlock();
 589
 590	return HRTIMER_NORESTART;
 591}
 592
 593void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 594{
 595	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
 596	wd->timer.function = qdisc_watchdog;
 597	wd->qdisc = qdisc;
 598}
 599EXPORT_SYMBOL(qdisc_watchdog_init);
 600
 601void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
 602{
 603	if (test_bit(__QDISC_STATE_DEACTIVATED,
 604		     &qdisc_root_sleeping(wd->qdisc)->state))
 605		return;
 606
 607	if (throttle)
 608		qdisc_throttled(wd->qdisc);
 609
 
 610	hrtimer_start(&wd->timer,
 611		      ns_to_ktime(expires),
 612		      HRTIMER_MODE_ABS_PINNED);
 613}
 614EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
 615
 616void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 617{
 618	hrtimer_cancel(&wd->timer);
 619	qdisc_unthrottled(wd->qdisc);
 620}
 621EXPORT_SYMBOL(qdisc_watchdog_cancel);
 622
 623static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
 624{
 625	unsigned int size = n * sizeof(struct hlist_head), i;
 626	struct hlist_head *h;
 
 627
 628	if (size <= PAGE_SIZE)
 629		h = kmalloc(size, GFP_KERNEL);
 630	else
 631		h = (struct hlist_head *)
 632			__get_free_pages(GFP_KERNEL, get_order(size));
 633
 634	if (h != NULL) {
 635		for (i = 0; i < n; i++)
 636			INIT_HLIST_HEAD(&h[i]);
 637	}
 638	return h;
 639}
 640
 641static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
 642{
 643	unsigned int size = n * sizeof(struct hlist_head);
 644
 645	if (size <= PAGE_SIZE)
 646		kfree(h);
 647	else
 648		free_pages((unsigned long)h, get_order(size));
 649}
 650
 651void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
 652{
 653	struct Qdisc_class_common *cl;
 654	struct hlist_node *next;
 655	struct hlist_head *nhash, *ohash;
 656	unsigned int nsize, nmask, osize;
 657	unsigned int i, h;
 658
 659	/* Rehash when load factor exceeds 0.75 */
 660	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
 661		return;
 662	nsize = clhash->hashsize * 2;
 663	nmask = nsize - 1;
 664	nhash = qdisc_class_hash_alloc(nsize);
 665	if (nhash == NULL)
 666		return;
 667
 668	ohash = clhash->hash;
 669	osize = clhash->hashsize;
 670
 671	sch_tree_lock(sch);
 672	for (i = 0; i < osize; i++) {
 673		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
 674			h = qdisc_class_hash(cl->classid, nmask);
 675			hlist_add_head(&cl->hnode, &nhash[h]);
 676		}
 677	}
 678	clhash->hash     = nhash;
 679	clhash->hashsize = nsize;
 680	clhash->hashmask = nmask;
 681	sch_tree_unlock(sch);
 682
 683	qdisc_class_hash_free(ohash, osize);
 684}
 685EXPORT_SYMBOL(qdisc_class_hash_grow);
 686
 687int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
 688{
 689	unsigned int size = 4;
 690
 691	clhash->hash = qdisc_class_hash_alloc(size);
 692	if (clhash->hash == NULL)
 693		return -ENOMEM;
 694	clhash->hashsize  = size;
 695	clhash->hashmask  = size - 1;
 696	clhash->hashelems = 0;
 697	return 0;
 698}
 699EXPORT_SYMBOL(qdisc_class_hash_init);
 700
 701void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
 702{
 703	qdisc_class_hash_free(clhash->hash, clhash->hashsize);
 704}
 705EXPORT_SYMBOL(qdisc_class_hash_destroy);
 706
 707void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
 708			     struct Qdisc_class_common *cl)
 709{
 710	unsigned int h;
 711
 712	INIT_HLIST_NODE(&cl->hnode);
 713	h = qdisc_class_hash(cl->classid, clhash->hashmask);
 714	hlist_add_head(&cl->hnode, &clhash->hash[h]);
 715	clhash->hashelems++;
 716}
 717EXPORT_SYMBOL(qdisc_class_hash_insert);
 718
 719void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
 720			     struct Qdisc_class_common *cl)
 721{
 722	hlist_del(&cl->hnode);
 723	clhash->hashelems--;
 724}
 725EXPORT_SYMBOL(qdisc_class_hash_remove);
 726
 727/* Allocate an unique handle from space managed by kernel
 728 * Possible range is [8000-FFFF]:0000 (0x8000 values)
 729 */
 730static u32 qdisc_alloc_handle(struct net_device *dev)
 731{
 732	int i = 0x8000;
 733	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
 734
 735	do {
 736		autohandle += TC_H_MAKE(0x10000U, 0);
 737		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
 738			autohandle = TC_H_MAKE(0x80000000U, 0);
 739		if (!qdisc_lookup(dev, autohandle))
 740			return autohandle;
 741		cond_resched();
 742	} while	(--i > 0);
 743
 744	return 0;
 745}
 746
 747void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
 748			       unsigned int len)
 749{
 
 750	const struct Qdisc_class_ops *cops;
 751	unsigned long cl;
 752	u32 parentid;
 
 753	int drops;
 754
 755	if (n == 0 && len == 0)
 756		return;
 757	drops = max_t(int, n, 0);
 758	rcu_read_lock();
 759	while ((parentid = sch->parent)) {
 760		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
 761			break;
 762
 763		if (sch->flags & TCQ_F_NOPARENT)
 764			break;
 
 
 
 
 
 
 
 
 
 
 
 765		/* TODO: perform the search on a per txq basis */
 766		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
 767		if (sch == NULL) {
 768			WARN_ON_ONCE(parentid != TC_H_ROOT);
 769			break;
 770		}
 771		cops = sch->ops->cl_ops;
 772		if (cops->qlen_notify) {
 773			cl = cops->get(sch, parentid);
 774			cops->qlen_notify(sch, cl);
 775			cops->put(sch, cl);
 776		}
 777		sch->q.qlen -= n;
 778		sch->qstats.backlog -= len;
 779		__qdisc_qstats_drop(sch, drops);
 780	}
 781	rcu_read_unlock();
 782}
 783EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
 784
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 785static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 786			       struct nlmsghdr *n, u32 clid,
 787			       struct Qdisc *old, struct Qdisc *new)
 788{
 789	if (new || old)
 790		qdisc_notify(net, skb, n, clid, old, new);
 791
 792	if (old)
 793		qdisc_destroy(old);
 794}
 795
 796/* Graft qdisc "new" to class "classid" of qdisc "parent" or
 797 * to device "dev".
 798 *
 799 * When appropriate send a netlink notification using 'skb'
 800 * and "n".
 801 *
 802 * On success, destroy old qdisc.
 803 */
 804
 805static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 806		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
 807		       struct Qdisc *new, struct Qdisc *old)
 
 808{
 809	struct Qdisc *q = old;
 810	struct net *net = dev_net(dev);
 811	int err = 0;
 812
 813	if (parent == NULL) {
 814		unsigned int i, num_q, ingress;
 815
 816		ingress = 0;
 817		num_q = dev->num_tx_queues;
 818		if ((q && q->flags & TCQ_F_INGRESS) ||
 819		    (new && new->flags & TCQ_F_INGRESS)) {
 820			num_q = 1;
 821			ingress = 1;
 822			if (!dev_ingress_queue(dev))
 
 823				return -ENOENT;
 
 824		}
 825
 826		if (dev->flags & IFF_UP)
 827			dev_deactivate(dev);
 828
 829		if (new && new->ops->attach)
 830			goto skip;
 831
 832		for (i = 0; i < num_q; i++) {
 833			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
 834
 835			if (!ingress)
 836				dev_queue = netdev_get_tx_queue(dev, i);
 837
 838			old = dev_graft_qdisc(dev_queue, new);
 839			if (new && i > 0)
 840				atomic_inc(&new->refcnt);
 841
 842			if (!ingress)
 843				qdisc_destroy(old);
 844		}
 845
 846skip:
 847		if (!ingress) {
 848			notify_and_destroy(net, skb, n, classid,
 849					   dev->qdisc, new);
 850			if (new && !new->ops->attach)
 851				atomic_inc(&new->refcnt);
 852			dev->qdisc = new ? : &noop_qdisc;
 853
 854			if (new && new->ops->attach)
 855				new->ops->attach(new);
 856		} else {
 857			notify_and_destroy(net, skb, n, classid, old, new);
 858		}
 859
 860		if (dev->flags & IFF_UP)
 861			dev_activate(dev);
 862	} else {
 863		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
 864
 
 
 
 
 
 865		err = -EOPNOTSUPP;
 866		if (cops && cops->graft) {
 867			unsigned long cl = cops->get(parent, classid);
 
 868			if (cl) {
 869				err = cops->graft(parent, cl, new, &old);
 870				cops->put(parent, cl);
 871			} else
 
 872				err = -ENOENT;
 
 873		}
 874		if (!err)
 875			notify_and_destroy(net, skb, n, classid, old, new);
 876	}
 877	return err;
 878}
 879
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 880/* lockdep annotation is needed for ingress; egress gets it only for name */
 881static struct lock_class_key qdisc_tx_lock;
 882static struct lock_class_key qdisc_rx_lock;
 883
 884/*
 885   Allocate and initialize new qdisc.
 886
 887   Parameters are passed via opt.
 888 */
 889
 890static struct Qdisc *
 891qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
 892	     struct Qdisc *p, u32 parent, u32 handle,
 893	     struct nlattr **tca, int *errp)
 
 894{
 895	int err;
 896	struct nlattr *kind = tca[TCA_KIND];
 897	struct Qdisc *sch;
 898	struct Qdisc_ops *ops;
 899	struct qdisc_size_table *stab;
 900
 901	ops = qdisc_lookup_ops(kind);
 902#ifdef CONFIG_MODULES
 903	if (ops == NULL && kind != NULL) {
 904		char name[IFNAMSIZ];
 905		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
 906			/* We dropped the RTNL semaphore in order to
 907			 * perform the module load.  So, even if we
 908			 * succeeded in loading the module we have to
 909			 * tell the caller to replay the request.  We
 910			 * indicate this using -EAGAIN.
 911			 * We replay the request because the device may
 912			 * go away in the mean time.
 913			 */
 914			rtnl_unlock();
 915			request_module("sch_%s", name);
 916			rtnl_lock();
 917			ops = qdisc_lookup_ops(kind);
 918			if (ops != NULL) {
 919				/* We will try again qdisc_lookup_ops,
 920				 * so don't keep a reference.
 921				 */
 922				module_put(ops->owner);
 923				err = -EAGAIN;
 924				goto err_out;
 925			}
 926		}
 927	}
 928#endif
 929
 930	err = -ENOENT;
 931	if (ops == NULL)
 
 932		goto err_out;
 
 933
 934	sch = qdisc_alloc(dev_queue, ops);
 935	if (IS_ERR(sch)) {
 936		err = PTR_ERR(sch);
 937		goto err_out2;
 938	}
 939
 940	sch->parent = parent;
 941
 942	if (handle == TC_H_INGRESS) {
 943		sch->flags |= TCQ_F_INGRESS;
 944		handle = TC_H_MAKE(TC_H_INGRESS, 0);
 945		lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
 946	} else {
 947		if (handle == 0) {
 948			handle = qdisc_alloc_handle(dev);
 949			err = -ENOMEM;
 950			if (handle == 0)
 951				goto err_out3;
 952		}
 953		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
 954		if (!netif_is_multiqueue(dev))
 955			sch->flags |= TCQ_F_ONETXQUEUE;
 956	}
 957
 958	sch->handle = handle;
 959
 960	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
 961		if (qdisc_is_percpu_stats(sch)) {
 962			sch->cpu_bstats =
 963				netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
 964			if (!sch->cpu_bstats)
 965				goto err_out4;
 966
 967			sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 968			if (!sch->cpu_qstats)
 969				goto err_out4;
 970		}
 971
 972		if (tca[TCA_STAB]) {
 973			stab = qdisc_get_stab(tca[TCA_STAB]);
 974			if (IS_ERR(stab)) {
 975				err = PTR_ERR(stab);
 976				goto err_out4;
 977			}
 978			rcu_assign_pointer(sch->stab, stab);
 
 
 
 
 
 
 
 979		}
 980		if (tca[TCA_RATE]) {
 981			spinlock_t *root_lock;
 
 
 982
 983			err = -EOPNOTSUPP;
 984			if (sch->flags & TCQ_F_MQROOT)
 985				goto err_out4;
 986
 987			if ((sch->parent != TC_H_ROOT) &&
 988			    !(sch->flags & TCQ_F_INGRESS) &&
 989			    (!p || !(p->flags & TCQ_F_MQROOT)))
 990				root_lock = qdisc_root_sleeping_lock(sch);
 991			else
 992				root_lock = qdisc_lock(sch);
 993
 994			err = gen_new_estimator(&sch->bstats,
 995						sch->cpu_bstats,
 996						&sch->rate_est,
 997						root_lock,
 998						tca[TCA_RATE]);
 999			if (err)
1000				goto err_out4;
1001		}
1002
1003		qdisc_list_add(sch);
 
 
 
 
 
1004
1005		return sch;
 
 
 
 
 
 
 
 
 
1006	}
 
 
 
 
 
 
 
 
 
1007err_out3:
1008	dev_put(dev);
1009	kfree((char *) sch - sch->padded);
1010err_out2:
1011	module_put(ops->owner);
1012err_out:
1013	*errp = err;
1014	return NULL;
1015
1016err_out4:
1017	free_percpu(sch->cpu_bstats);
1018	free_percpu(sch->cpu_qstats);
1019	/*
1020	 * Any broken qdiscs that would require a ops->reset() here?
1021	 * The qdisc was never in action so it shouldn't be necessary.
1022	 */
1023	qdisc_put_stab(rtnl_dereference(sch->stab));
1024	if (ops->destroy)
1025		ops->destroy(sch);
1026	goto err_out3;
1027}
1028
1029static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 
1030{
1031	struct qdisc_size_table *ostab, *stab = NULL;
1032	int err = 0;
1033
1034	if (tca[TCA_OPTIONS]) {
1035		if (sch->ops->change == NULL)
 
1036			return -EINVAL;
1037		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
 
 
 
 
 
1038		if (err)
1039			return err;
1040	}
1041
1042	if (tca[TCA_STAB]) {
1043		stab = qdisc_get_stab(tca[TCA_STAB]);
1044		if (IS_ERR(stab))
1045			return PTR_ERR(stab);
1046	}
1047
1048	ostab = rtnl_dereference(sch->stab);
1049	rcu_assign_pointer(sch->stab, stab);
1050	qdisc_put_stab(ostab);
1051
1052	if (tca[TCA_RATE]) {
1053		/* NB: ignores errors from replace_estimator
1054		   because change can't be undone. */
1055		if (sch->flags & TCQ_F_MQROOT)
1056			goto out;
1057		gen_replace_estimator(&sch->bstats,
1058				      sch->cpu_bstats,
1059				      &sch->rate_est,
1060				      qdisc_root_sleeping_lock(sch),
 
1061				      tca[TCA_RATE]);
1062	}
1063out:
1064	return 0;
1065}
1066
1067struct check_loop_arg {
1068	struct qdisc_walker	w;
1069	struct Qdisc		*p;
1070	int			depth;
1071};
1072
1073static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
 
1074
1075static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1076{
1077	struct check_loop_arg	arg;
1078
1079	if (q->ops->cl_ops == NULL)
1080		return 0;
1081
1082	arg.w.stop = arg.w.skip = arg.w.count = 0;
1083	arg.w.fn = check_loop_fn;
1084	arg.depth = depth;
1085	arg.p = p;
1086	q->ops->cl_ops->walk(q, &arg.w);
1087	return arg.w.stop ? -ELOOP : 0;
1088}
1089
1090static int
1091check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1092{
1093	struct Qdisc *leaf;
1094	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1095	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1096
1097	leaf = cops->leaf(q, cl);
1098	if (leaf) {
1099		if (leaf == arg->p || arg->depth > 7)
1100			return -ELOOP;
1101		return check_loop(leaf, arg->p, arg->depth + 1);
1102	}
1103	return 0;
1104}
1105
1106/*
1107 * Delete/get qdisc.
1108 */
1109
1110static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
 
1111{
1112	struct net *net = sock_net(skb->sk);
1113	struct tcmsg *tcm = nlmsg_data(n);
1114	struct nlattr *tca[TCA_MAX + 1];
1115	struct net_device *dev;
1116	u32 clid;
1117	struct Qdisc *q = NULL;
1118	struct Qdisc *p = NULL;
1119	int err;
1120
1121	if ((n->nlmsg_type != RTM_GETQDISC) &&
1122	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1123		return -EPERM;
1124
1125	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1126	if (err < 0)
1127		return err;
1128
1129	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1130	if (!dev)
1131		return -ENODEV;
1132
1133	clid = tcm->tcm_parent;
1134	if (clid) {
1135		if (clid != TC_H_ROOT) {
1136			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1137				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1138				if (!p)
 
1139					return -ENOENT;
 
1140				q = qdisc_leaf(p, clid);
1141			} else if (dev_ingress_queue(dev)) {
1142				q = dev_ingress_queue(dev)->qdisc_sleeping;
1143			}
1144		} else {
1145			q = dev->qdisc;
1146		}
1147		if (!q)
 
1148			return -ENOENT;
 
1149
1150		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
 
1151			return -EINVAL;
 
1152	} else {
1153		q = qdisc_lookup(dev, tcm->tcm_handle);
1154		if (!q)
 
1155			return -ENOENT;
 
1156	}
1157
1158	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
 
1159		return -EINVAL;
 
1160
1161	if (n->nlmsg_type == RTM_DELQDISC) {
1162		if (!clid)
 
1163			return -EINVAL;
1164		if (q->handle == 0)
 
 
1165			return -ENOENT;
1166		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
 
1167		if (err != 0)
1168			return err;
1169	} else {
1170		qdisc_notify(net, skb, n, clid, NULL, q);
1171	}
1172	return 0;
1173}
1174
1175/*
1176 * Create/change qdisc.
1177 */
1178
1179static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
 
1180{
1181	struct net *net = sock_net(skb->sk);
1182	struct tcmsg *tcm;
1183	struct nlattr *tca[TCA_MAX + 1];
1184	struct net_device *dev;
1185	u32 clid;
1186	struct Qdisc *q, *p;
1187	int err;
1188
1189	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1190		return -EPERM;
1191
1192replay:
1193	/* Reinit, just in case something touches this. */
1194	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1195	if (err < 0)
1196		return err;
1197
1198	tcm = nlmsg_data(n);
1199	clid = tcm->tcm_parent;
1200	q = p = NULL;
1201
1202	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1203	if (!dev)
1204		return -ENODEV;
1205
1206
1207	if (clid) {
1208		if (clid != TC_H_ROOT) {
1209			if (clid != TC_H_INGRESS) {
1210				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1211				if (!p)
 
1212					return -ENOENT;
 
1213				q = qdisc_leaf(p, clid);
1214			} else if (dev_ingress_queue_create(dev)) {
1215				q = dev_ingress_queue(dev)->qdisc_sleeping;
1216			}
1217		} else {
1218			q = dev->qdisc;
1219		}
1220
1221		/* It may be default qdisc, ignore it */
1222		if (q && q->handle == 0)
1223			q = NULL;
1224
1225		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1226			if (tcm->tcm_handle) {
1227				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
 
1228					return -EEXIST;
1229				if (TC_H_MIN(tcm->tcm_handle))
 
 
1230					return -EINVAL;
 
1231				q = qdisc_lookup(dev, tcm->tcm_handle);
1232				if (!q)
1233					goto create_n_graft;
1234				if (n->nlmsg_flags & NLM_F_EXCL)
 
1235					return -EEXIST;
1236				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
 
 
 
1237					return -EINVAL;
 
1238				if (q == p ||
1239				    (p && check_loop(q, p, 0)))
 
1240					return -ELOOP;
1241				atomic_inc(&q->refcnt);
 
1242				goto graft;
1243			} else {
1244				if (!q)
1245					goto create_n_graft;
1246
1247				/* This magic test requires explanation.
1248				 *
1249				 *   We know, that some child q is already
1250				 *   attached to this parent and have choice:
1251				 *   either to change it or to create/graft new one.
1252				 *
1253				 *   1. We are allowed to create/graft only
1254				 *   if CREATE and REPLACE flags are set.
1255				 *
1256				 *   2. If EXCL is set, requestor wanted to say,
1257				 *   that qdisc tcm_handle is not expected
1258				 *   to exist, so that we choose create/graft too.
1259				 *
1260				 *   3. The last case is when no flags are set.
1261				 *   Alas, it is sort of hole in API, we
1262				 *   cannot decide what to do unambiguously.
1263				 *   For now we select create/graft, if
1264				 *   user gave KIND, which does not match existing.
1265				 */
1266				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1267				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1268				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1269				     (tca[TCA_KIND] &&
1270				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1271					goto create_n_graft;
1272			}
1273		}
1274	} else {
1275		if (!tcm->tcm_handle)
 
1276			return -EINVAL;
 
1277		q = qdisc_lookup(dev, tcm->tcm_handle);
1278	}
1279
1280	/* Change qdisc parameters */
1281	if (q == NULL)
 
1282		return -ENOENT;
1283	if (n->nlmsg_flags & NLM_F_EXCL)
 
 
1284		return -EEXIST;
1285	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
 
 
1286		return -EINVAL;
1287	err = qdisc_change(q, tca);
 
1288	if (err == 0)
1289		qdisc_notify(net, skb, n, clid, NULL, q);
1290	return err;
1291
1292create_n_graft:
1293	if (!(n->nlmsg_flags & NLM_F_CREATE))
 
1294		return -ENOENT;
 
1295	if (clid == TC_H_INGRESS) {
1296		if (dev_ingress_queue(dev))
1297			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1298					 tcm->tcm_parent, tcm->tcm_parent,
1299					 tca, &err);
1300		else
 
1301			err = -ENOENT;
 
1302	} else {
1303		struct netdev_queue *dev_queue;
1304
1305		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1306			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1307		else if (p)
1308			dev_queue = p->dev_queue;
1309		else
1310			dev_queue = netdev_get_tx_queue(dev, 0);
1311
1312		q = qdisc_create(dev, dev_queue, p,
1313				 tcm->tcm_parent, tcm->tcm_handle,
1314				 tca, &err);
1315	}
1316	if (q == NULL) {
1317		if (err == -EAGAIN)
1318			goto replay;
1319		return err;
1320	}
1321
1322graft:
1323	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1324	if (err) {
1325		if (q)
1326			qdisc_destroy(q);
1327		return err;
1328	}
1329
1330	return 0;
1331}
1332
1333static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1334			 u32 portid, u32 seq, u16 flags, int event)
1335{
1336	struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1337	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1338	struct tcmsg *tcm;
1339	struct nlmsghdr  *nlh;
1340	unsigned char *b = skb_tail_pointer(skb);
1341	struct gnet_dump d;
1342	struct qdisc_size_table *stab;
1343	__u32 qlen;
1344
1345	cond_resched();
1346	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1347	if (!nlh)
1348		goto out_nlmsg_trim;
1349	tcm = nlmsg_data(nlh);
1350	tcm->tcm_family = AF_UNSPEC;
1351	tcm->tcm__pad1 = 0;
1352	tcm->tcm__pad2 = 0;
1353	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1354	tcm->tcm_parent = clid;
1355	tcm->tcm_handle = q->handle;
1356	tcm->tcm_info = atomic_read(&q->refcnt);
1357	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1358		goto nla_put_failure;
1359	if (q->ops->dump && q->ops->dump(q, skb) < 0)
1360		goto nla_put_failure;
1361	qlen = q->q.qlen;
1362
1363	stab = rtnl_dereference(q->stab);
1364	if (stab && qdisc_dump_stab(skb, stab) < 0)
1365		goto nla_put_failure;
1366
1367	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1368					 qdisc_root_sleeping_lock(q), &d) < 0)
1369		goto nla_put_failure;
1370
1371	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1372		goto nla_put_failure;
1373
1374	if (qdisc_is_percpu_stats(q)) {
1375		cpu_bstats = q->cpu_bstats;
1376		cpu_qstats = q->cpu_qstats;
1377	}
1378
1379	if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
1380	    gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1381	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1382		goto nla_put_failure;
1383
1384	if (gnet_stats_finish_copy(&d) < 0)
1385		goto nla_put_failure;
1386
1387	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1388	return skb->len;
1389
1390out_nlmsg_trim:
1391nla_put_failure:
1392	nlmsg_trim(skb, b);
1393	return -1;
1394}
1395
1396static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1397{
1398	return (q->flags & TCQ_F_BUILTIN) ? true : false;
1399}
1400
1401static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1402			struct nlmsghdr *n, u32 clid,
1403			struct Qdisc *old, struct Qdisc *new)
1404{
1405	struct sk_buff *skb;
1406	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1407
1408	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1409	if (!skb)
1410		return -ENOBUFS;
1411
1412	if (old && !tc_qdisc_dump_ignore(old)) {
1413		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1414				  0, RTM_DELQDISC) < 0)
1415			goto err_out;
1416	}
1417	if (new && !tc_qdisc_dump_ignore(new)) {
1418		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1419				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1420			goto err_out;
1421	}
1422
1423	if (skb->len)
1424		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1425				      n->nlmsg_flags & NLM_F_ECHO);
1426
1427err_out:
1428	kfree_skb(skb);
1429	return -EINVAL;
1430}
1431
1432static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1433			      struct netlink_callback *cb,
1434			      int *q_idx_p, int s_q_idx)
 
1435{
1436	int ret = 0, q_idx = *q_idx_p;
1437	struct Qdisc *q;
 
1438
1439	if (!root)
1440		return 0;
1441
1442	q = root;
1443	if (q_idx < s_q_idx) {
1444		q_idx++;
1445	} else {
1446		if (!tc_qdisc_dump_ignore(q) &&
1447		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1448				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
 
1449			goto done;
1450		q_idx++;
1451	}
1452	list_for_each_entry(q, &root->list, list) {
 
 
 
 
 
 
 
 
 
 
1453		if (q_idx < s_q_idx) {
1454			q_idx++;
1455			continue;
1456		}
1457		if (!tc_qdisc_dump_ignore(q) &&
1458		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1459				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
 
1460			goto done;
1461		q_idx++;
1462	}
1463
1464out:
1465	*q_idx_p = q_idx;
1466	return ret;
1467done:
1468	ret = -1;
1469	goto out;
1470}
1471
1472static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1473{
1474	struct net *net = sock_net(skb->sk);
1475	int idx, q_idx;
1476	int s_idx, s_q_idx;
1477	struct net_device *dev;
 
 
 
1478
1479	s_idx = cb->args[0];
1480	s_q_idx = q_idx = cb->args[1];
1481
1482	idx = 0;
1483	ASSERT_RTNL();
 
 
 
 
 
1484	for_each_netdev(net, dev) {
1485		struct netdev_queue *dev_queue;
1486
1487		if (idx < s_idx)
1488			goto cont;
1489		if (idx > s_idx)
1490			s_q_idx = 0;
1491		q_idx = 0;
1492
1493		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
 
1494			goto done;
1495
1496		dev_queue = dev_ingress_queue(dev);
1497		if (dev_queue &&
1498		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1499				       &q_idx, s_q_idx) < 0)
 
1500			goto done;
1501
1502cont:
1503		idx++;
1504	}
1505
1506done:
1507	cb->args[0] = idx;
1508	cb->args[1] = q_idx;
1509
1510	return skb->len;
1511}
1512
1513
1514
1515/************************************************
1516 *	Traffic classes manipulation.		*
1517 ************************************************/
1518
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1520
1521static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
 
 
 
 
 
 
 
 
1522{
1523	struct net *net = sock_net(skb->sk);
1524	struct tcmsg *tcm = nlmsg_data(n);
1525	struct nlattr *tca[TCA_MAX + 1];
1526	struct net_device *dev;
1527	struct Qdisc *q = NULL;
1528	const struct Qdisc_class_ops *cops;
1529	unsigned long cl = 0;
1530	unsigned long new_cl;
1531	u32 portid;
1532	u32 clid;
1533	u32 qid;
1534	int err;
1535
1536	if ((n->nlmsg_type != RTM_GETTCLASS) &&
1537	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1538		return -EPERM;
1539
1540	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1541	if (err < 0)
1542		return err;
1543
1544	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1545	if (!dev)
1546		return -ENODEV;
1547
1548	/*
1549	   parent == TC_H_UNSPEC - unspecified parent.
1550	   parent == TC_H_ROOT   - class is root, which has no parent.
1551	   parent == X:0	 - parent is root class.
1552	   parent == X:Y	 - parent is a node in hierarchy.
1553	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1554
1555	   handle == 0:0	 - generate handle from kernel pool.
1556	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1557	   handle == X:Y	 - clear.
1558	   handle == X:0	 - root class.
1559	 */
1560
1561	/* Step 1. Determine qdisc handle X:0 */
1562
1563	portid = tcm->tcm_parent;
1564	clid = tcm->tcm_handle;
1565	qid = TC_H_MAJ(clid);
1566
1567	if (portid != TC_H_ROOT) {
1568		u32 qid1 = TC_H_MAJ(portid);
1569
1570		if (qid && qid1) {
1571			/* If both majors are known, they must be identical. */
1572			if (qid != qid1)
1573				return -EINVAL;
1574		} else if (qid1) {
1575			qid = qid1;
1576		} else if (qid == 0)
1577			qid = dev->qdisc->handle;
1578
1579		/* Now qid is genuine qdisc handle consistent
1580		 * both with parent and child.
1581		 *
1582		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1583		 */
1584		if (portid)
1585			portid = TC_H_MAKE(qid, portid);
1586	} else {
1587		if (qid == 0)
1588			qid = dev->qdisc->handle;
1589	}
1590
1591	/* OK. Locate qdisc */
1592	q = qdisc_lookup(dev, qid);
1593	if (!q)
1594		return -ENOENT;
1595
1596	/* An check that it supports classes */
1597	cops = q->ops->cl_ops;
1598	if (cops == NULL)
1599		return -EINVAL;
1600
1601	/* Now try to get class */
1602	if (clid == 0) {
1603		if (portid == TC_H_ROOT)
1604			clid = qid;
1605	} else
1606		clid = TC_H_MAKE(qid, clid);
1607
1608	if (clid)
1609		cl = cops->get(q, clid);
1610
1611	if (cl == 0) {
1612		err = -ENOENT;
1613		if (n->nlmsg_type != RTM_NEWTCLASS ||
1614		    !(n->nlmsg_flags & NLM_F_CREATE))
1615			goto out;
1616	} else {
1617		switch (n->nlmsg_type) {
1618		case RTM_NEWTCLASS:
1619			err = -EEXIST;
1620			if (n->nlmsg_flags & NLM_F_EXCL)
1621				goto out;
1622			break;
1623		case RTM_DELTCLASS:
1624			err = -EOPNOTSUPP;
1625			if (cops->delete)
1626				err = cops->delete(q, cl);
1627			if (err == 0)
1628				tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1629			goto out;
1630		case RTM_GETTCLASS:
1631			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1632			goto out;
1633		default:
1634			err = -EINVAL;
1635			goto out;
1636		}
1637	}
1638
 
 
 
 
 
1639	new_cl = cl;
1640	err = -EOPNOTSUPP;
1641	if (cops->change)
1642		err = cops->change(q, clid, portid, tca, &new_cl);
1643	if (err == 0)
1644		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1645
 
 
 
1646out:
1647	if (cl)
1648		cops->put(q, cl);
1649
1650	return err;
1651}
1652
1653
1654static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1655			  unsigned long cl,
1656			  u32 portid, u32 seq, u16 flags, int event)
1657{
1658	struct tcmsg *tcm;
1659	struct nlmsghdr  *nlh;
1660	unsigned char *b = skb_tail_pointer(skb);
1661	struct gnet_dump d;
1662	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1663
1664	cond_resched();
1665	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1666	if (!nlh)
1667		goto out_nlmsg_trim;
1668	tcm = nlmsg_data(nlh);
1669	tcm->tcm_family = AF_UNSPEC;
1670	tcm->tcm__pad1 = 0;
1671	tcm->tcm__pad2 = 0;
1672	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1673	tcm->tcm_parent = q->handle;
1674	tcm->tcm_handle = q->handle;
1675	tcm->tcm_info = 0;
1676	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1677		goto nla_put_failure;
1678	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1679		goto nla_put_failure;
1680
1681	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1682					 qdisc_root_sleeping_lock(q), &d) < 0)
1683		goto nla_put_failure;
1684
1685	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1686		goto nla_put_failure;
1687
1688	if (gnet_stats_finish_copy(&d) < 0)
1689		goto nla_put_failure;
1690
1691	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1692	return skb->len;
1693
1694out_nlmsg_trim:
1695nla_put_failure:
1696	nlmsg_trim(skb, b);
1697	return -1;
1698}
1699
1700static int tclass_notify(struct net *net, struct sk_buff *oskb,
1701			 struct nlmsghdr *n, struct Qdisc *q,
1702			 unsigned long cl, int event)
1703{
1704	struct sk_buff *skb;
1705	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1706
1707	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1708	if (!skb)
1709		return -ENOBUFS;
1710
1711	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1712		kfree_skb(skb);
1713		return -EINVAL;
1714	}
1715
1716	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1717			      n->nlmsg_flags & NLM_F_ECHO);
1718}
1719
1720struct qdisc_dump_args {
1721	struct qdisc_walker	w;
1722	struct sk_buff		*skb;
1723	struct netlink_callback	*cb;
1724};
1725
1726static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
 
1727{
1728	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1729
1730	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1731			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
 
1732}
1733
1734static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1735				struct tcmsg *tcm, struct netlink_callback *cb,
1736				int *t_p, int s_t)
1737{
1738	struct qdisc_dump_args arg;
1739
1740	if (tc_qdisc_dump_ignore(q) ||
1741	    *t_p < s_t || !q->ops->cl_ops ||
1742	    (tcm->tcm_parent &&
1743	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1744		(*t_p)++;
1745		return 0;
1746	}
1747	if (*t_p > s_t)
1748		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1749	arg.w.fn = qdisc_class_dump;
1750	arg.skb = skb;
1751	arg.cb = cb;
1752	arg.w.stop  = 0;
1753	arg.w.skip = cb->args[1];
1754	arg.w.count = 0;
1755	q->ops->cl_ops->walk(q, &arg.w);
1756	cb->args[1] = arg.w.count;
1757	if (arg.w.stop)
1758		return -1;
1759	(*t_p)++;
1760	return 0;
1761}
1762
1763static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1764			       struct tcmsg *tcm, struct netlink_callback *cb,
1765			       int *t_p, int s_t)
1766{
1767	struct Qdisc *q;
 
1768
1769	if (!root)
1770		return 0;
1771
1772	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1773		return -1;
1774
1775	list_for_each_entry(q, &root->list, list) {
 
 
 
 
 
 
 
 
 
1776		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1777			return -1;
1778	}
1779
1780	return 0;
1781}
1782
1783static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1784{
1785	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1786	struct net *net = sock_net(skb->sk);
1787	struct netdev_queue *dev_queue;
1788	struct net_device *dev;
1789	int t, s_t;
1790
1791	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1792		return 0;
1793	dev = dev_get_by_index(net, tcm->tcm_ifindex);
1794	if (!dev)
1795		return 0;
1796
1797	s_t = cb->args[0];
1798	t = 0;
1799
1800	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1801		goto done;
1802
1803	dev_queue = dev_ingress_queue(dev);
1804	if (dev_queue &&
1805	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1806				&t, s_t) < 0)
1807		goto done;
1808
1809done:
1810	cb->args[0] = t;
1811
1812	dev_put(dev);
1813	return skb->len;
1814}
1815
1816/* Main classifier routine: scans classifier chain attached
1817 * to this qdisc, (optionally) tests for protocol and asks
1818 * specific classifiers.
1819 */
1820int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1821		struct tcf_result *res, bool compat_mode)
1822{
1823	__be16 protocol = tc_skb_protocol(skb);
1824#ifdef CONFIG_NET_CLS_ACT
1825	const struct tcf_proto *old_tp = tp;
1826	int limit = 0;
1827
1828reclassify:
1829#endif
1830	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1831		int err;
1832
1833		if (tp->protocol != protocol &&
1834		    tp->protocol != htons(ETH_P_ALL))
1835			continue;
1836
1837		err = tp->classify(skb, tp, res);
1838#ifdef CONFIG_NET_CLS_ACT
1839		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
1840			goto reset;
1841#endif
1842		if (err >= 0)
1843			return err;
1844	}
1845
1846	return TC_ACT_UNSPEC; /* signal: continue lookup */
1847#ifdef CONFIG_NET_CLS_ACT
1848reset:
1849	if (unlikely(limit++ >= MAX_REC_LOOP)) {
1850		net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1851				       tp->q->ops->id, tp->prio & 0xffff,
1852				       ntohs(tp->protocol));
1853		return TC_ACT_SHOT;
1854	}
1855
1856	tp = old_tp;
1857	protocol = tc_skb_protocol(skb);
1858	goto reclassify;
1859#endif
1860}
1861EXPORT_SYMBOL(tc_classify);
1862
1863bool tcf_destroy(struct tcf_proto *tp, bool force)
1864{
1865	if (tp->ops->destroy(tp, force)) {
1866		module_put(tp->ops->owner);
1867		kfree_rcu(tp, rcu);
1868		return true;
1869	}
1870
1871	return false;
1872}
1873
1874void tcf_destroy_chain(struct tcf_proto __rcu **fl)
1875{
1876	struct tcf_proto *tp;
1877
1878	while ((tp = rtnl_dereference(*fl)) != NULL) {
1879		RCU_INIT_POINTER(*fl, tp->next);
1880		tcf_destroy(tp, true);
1881	}
1882}
1883EXPORT_SYMBOL(tcf_destroy_chain);
1884
1885#ifdef CONFIG_PROC_FS
1886static int psched_show(struct seq_file *seq, void *v)
1887{
1888	seq_printf(seq, "%08x %08x %08x %08x\n",
1889		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1890		   1000000,
1891		   (u32)NSEC_PER_SEC / hrtimer_resolution);
1892
1893	return 0;
1894}
1895
1896static int psched_open(struct inode *inode, struct file *file)
1897{
1898	return single_open(file, psched_show, NULL);
1899}
1900
1901static const struct file_operations psched_fops = {
1902	.owner = THIS_MODULE,
1903	.open = psched_open,
1904	.read  = seq_read,
1905	.llseek = seq_lseek,
1906	.release = single_release,
1907};
1908
1909static int __net_init psched_net_init(struct net *net)
1910{
1911	struct proc_dir_entry *e;
1912
1913	e = proc_create("psched", 0, net->proc_net, &psched_fops);
1914	if (e == NULL)
1915		return -ENOMEM;
1916
1917	return 0;
1918}
1919
1920static void __net_exit psched_net_exit(struct net *net)
1921{
1922	remove_proc_entry("psched", net->proc_net);
1923}
1924#else
1925static int __net_init psched_net_init(struct net *net)
1926{
1927	return 0;
1928}
1929
1930static void __net_exit psched_net_exit(struct net *net)
1931{
1932}
1933#endif
1934
1935static struct pernet_operations psched_net_ops = {
1936	.init = psched_net_init,
1937	.exit = psched_net_exit,
1938};
1939
1940static int __init pktsched_init(void)
1941{
1942	int err;
1943
1944	err = register_pernet_subsys(&psched_net_ops);
1945	if (err) {
1946		pr_err("pktsched_init: "
1947		       "cannot initialize per netns operations\n");
1948		return err;
1949	}
1950
1951	register_qdisc(&pfifo_fast_ops);
1952	register_qdisc(&pfifo_qdisc_ops);
1953	register_qdisc(&bfifo_qdisc_ops);
1954	register_qdisc(&pfifo_head_drop_qdisc_ops);
1955	register_qdisc(&mq_qdisc_ops);
1956	register_qdisc(&noqueue_qdisc_ops);
1957
1958	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1959	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1960	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1961	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1962	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1963	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
 
 
1964
1965	return 0;
1966}
1967
1968subsys_initcall(pktsched_init);