Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * net/sched/sch_api.c	Packet scheduler API.
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 *
  11 * Fixes:
  12 *
  13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
  14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/string.h>
  22#include <linux/errno.h>
  23#include <linux/skbuff.h>
  24#include <linux/init.h>
  25#include <linux/proc_fs.h>
  26#include <linux/seq_file.h>
  27#include <linux/kmod.h>
  28#include <linux/list.h>
  29#include <linux/hrtimer.h>
  30#include <linux/lockdep.h>
  31#include <linux/slab.h>
 
  32
  33#include <net/net_namespace.h>
  34#include <net/sock.h>
  35#include <net/netlink.h>
  36#include <net/pkt_sched.h>
 
  37
  38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
  39			struct nlmsghdr *n, u32 clid,
  40			struct Qdisc *old, struct Qdisc *new);
  41static int tclass_notify(struct net *net, struct sk_buff *oskb,
  42			 struct nlmsghdr *n, struct Qdisc *q,
  43			 unsigned long cl, int event);
  44
  45/*
  46
  47   Short review.
  48   -------------
  49
  50   This file consists of two interrelated parts:
  51
  52   1. queueing disciplines manager frontend.
  53   2. traffic classes manager frontend.
  54
  55   Generally, queueing discipline ("qdisc") is a black box,
  56   which is able to enqueue packets and to dequeue them (when
  57   device is ready to send something) in order and at times
  58   determined by algorithm hidden in it.
  59
  60   qdisc's are divided to two categories:
  61   - "queues", which have no internal structure visible from outside.
  62   - "schedulers", which split all the packets to "traffic classes",
  63     using "packet classifiers" (look at cls_api.c)
  64
  65   In turn, classes may have child qdiscs (as rule, queues)
  66   attached to them etc. etc. etc.
  67
  68   The goal of the routines in this file is to translate
  69   information supplied by user in the form of handles
  70   to more intelligible for kernel form, to make some sanity
  71   checks and part of work, which is common to all qdiscs
  72   and to provide rtnetlink notifications.
  73
  74   All real intelligent work is done inside qdisc modules.
  75
  76
  77
  78   Every discipline has two major routines: enqueue and dequeue.
  79
  80   ---dequeue
  81
  82   dequeue usually returns a skb to send. It is allowed to return NULL,
  83   but it does not mean that queue is empty, it just means that
  84   discipline does not want to send anything this time.
  85   Queue is really empty if q->q.qlen == 0.
  86   For complicated disciplines with multiple queues q->q is not
  87   real packet queue, but however q->q.qlen must be valid.
  88
  89   ---enqueue
  90
  91   enqueue returns 0, if packet was enqueued successfully.
  92   If packet (this one or another one) was dropped, it returns
  93   not zero error code.
  94   NET_XMIT_DROP 	- this packet dropped
  95     Expected action: do not backoff, but wait until queue will clear.
  96   NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
  97     Expected action: backoff or ignore
  98   NET_XMIT_POLICED	- dropped by police.
  99     Expected action: backoff or error to real-time apps.
 100
 101   Auxiliary routines:
 102
 103   ---peek
 104
 105   like dequeue but without removing a packet from the queue
 106
 107   ---reset
 108
 109   returns qdisc to initial state: purge all buffers, clear all
 110   timers, counters (except for statistics) etc.
 111
 112   ---init
 113
 114   initializes newly created qdisc.
 115
 116   ---destroy
 117
 118   destroys resources allocated by init and during lifetime of qdisc.
 119
 120   ---change
 121
 122   changes qdisc parameters.
 123 */
 124
 125/* Protects list of registered TC modules. It is pure SMP lock. */
 126static DEFINE_RWLOCK(qdisc_mod_lock);
 127
 128
 129/************************************************
 130 *	Queueing disciplines manipulation.	*
 131 ************************************************/
 132
 133
 134/* The list of all installed queueing disciplines. */
 135
 136static struct Qdisc_ops *qdisc_base;
 137
 138/* Register/uregister queueing discipline */
 139
 140int register_qdisc(struct Qdisc_ops *qops)
 141{
 142	struct Qdisc_ops *q, **qp;
 143	int rc = -EEXIST;
 144
 145	write_lock(&qdisc_mod_lock);
 146	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 147		if (!strcmp(qops->id, q->id))
 148			goto out;
 149
 150	if (qops->enqueue == NULL)
 151		qops->enqueue = noop_qdisc_ops.enqueue;
 152	if (qops->peek == NULL) {
 153		if (qops->dequeue == NULL)
 154			qops->peek = noop_qdisc_ops.peek;
 155		else
 156			goto out_einval;
 157	}
 158	if (qops->dequeue == NULL)
 159		qops->dequeue = noop_qdisc_ops.dequeue;
 160
 161	if (qops->cl_ops) {
 162		const struct Qdisc_class_ops *cops = qops->cl_ops;
 163
 164		if (!(cops->get && cops->put && cops->walk && cops->leaf))
 165			goto out_einval;
 166
 167		if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
 168			goto out_einval;
 169	}
 170
 171	qops->next = NULL;
 172	*qp = qops;
 173	rc = 0;
 174out:
 175	write_unlock(&qdisc_mod_lock);
 176	return rc;
 177
 178out_einval:
 179	rc = -EINVAL;
 180	goto out;
 181}
 182EXPORT_SYMBOL(register_qdisc);
 183
 184int unregister_qdisc(struct Qdisc_ops *qops)
 185{
 186	struct Qdisc_ops *q, **qp;
 187	int err = -ENOENT;
 188
 189	write_lock(&qdisc_mod_lock);
 190	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 191		if (q == qops)
 192			break;
 193	if (q) {
 194		*qp = q->next;
 195		q->next = NULL;
 196		err = 0;
 197	}
 198	write_unlock(&qdisc_mod_lock);
 199	return err;
 200}
 201EXPORT_SYMBOL(unregister_qdisc);
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 203/* We know handle. Find qdisc among all qdisc's attached to device
 204   (root qdisc, all its children, children of children etc.)
 
 205 */
 206
 207static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 208{
 209	struct Qdisc *q;
 210
 
 
 
 211	if (!(root->flags & TCQ_F_BUILTIN) &&
 212	    root->handle == handle)
 213		return root;
 214
 215	list_for_each_entry(q, &root->list, list) {
 
 216		if (q->handle == handle)
 217			return q;
 218	}
 219	return NULL;
 220}
 221
 222static void qdisc_list_add(struct Qdisc *q)
 223{
 224	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
 225		list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list);
 
 
 
 
 226}
 
 227
 228void qdisc_list_del(struct Qdisc *q)
 229{
 230	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
 231		list_del(&q->list);
 
 
 232}
 233EXPORT_SYMBOL(qdisc_list_del);
 234
 235struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 236{
 237	struct Qdisc *q;
 238
 
 
 239	q = qdisc_match_from_root(dev->qdisc, handle);
 240	if (q)
 241		goto out;
 242
 243	if (dev_ingress_queue(dev))
 244		q = qdisc_match_from_root(
 245			dev_ingress_queue(dev)->qdisc_sleeping,
 246			handle);
 247out:
 248	return q;
 249}
 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 252{
 253	unsigned long cl;
 254	struct Qdisc *leaf;
 255	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
 256
 257	if (cops == NULL)
 258		return NULL;
 259	cl = cops->get(p, classid);
 260
 261	if (cl == 0)
 262		return NULL;
 263	leaf = cops->leaf(p, cl);
 264	cops->put(p, cl);
 265	return leaf;
 266}
 267
 268/* Find queueing discipline by name */
 269
 270static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
 271{
 272	struct Qdisc_ops *q = NULL;
 273
 274	if (kind) {
 275		read_lock(&qdisc_mod_lock);
 276		for (q = qdisc_base; q; q = q->next) {
 277			if (nla_strcmp(kind, q->id) == 0) {
 278				if (!try_module_get(q->owner))
 279					q = NULL;
 280				break;
 281			}
 282		}
 283		read_unlock(&qdisc_mod_lock);
 284	}
 285	return q;
 286}
 287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 288static struct qdisc_rate_table *qdisc_rtab_list;
 289
 290struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
 
 
 291{
 292	struct qdisc_rate_table *rtab;
 293
 
 
 
 
 
 
 
 294	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
 295		if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
 
 296			rtab->refcnt++;
 297			return rtab;
 298		}
 299	}
 300
 301	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
 302	    nla_len(tab) != TC_RTAB_SIZE)
 303		return NULL;
 304
 305	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
 306	if (rtab) {
 307		rtab->rate = *r;
 308		rtab->refcnt = 1;
 309		memcpy(rtab->data, nla_data(tab), 1024);
 
 
 310		rtab->next = qdisc_rtab_list;
 311		qdisc_rtab_list = rtab;
 
 
 312	}
 313	return rtab;
 314}
 315EXPORT_SYMBOL(qdisc_get_rtab);
 316
 317void qdisc_put_rtab(struct qdisc_rate_table *tab)
 318{
 319	struct qdisc_rate_table *rtab, **rtabp;
 320
 321	if (!tab || --tab->refcnt)
 322		return;
 323
 324	for (rtabp = &qdisc_rtab_list;
 325	     (rtab = *rtabp) != NULL;
 326	     rtabp = &rtab->next) {
 327		if (rtab == tab) {
 328			*rtabp = rtab->next;
 329			kfree(rtab);
 330			return;
 331		}
 332	}
 333}
 334EXPORT_SYMBOL(qdisc_put_rtab);
 335
 336static LIST_HEAD(qdisc_stab_list);
 337static DEFINE_SPINLOCK(qdisc_stab_lock);
 338
 339static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
 340	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
 341	[TCA_STAB_DATA] = { .type = NLA_BINARY },
 342};
 343
 344static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
 
 345{
 346	struct nlattr *tb[TCA_STAB_MAX + 1];
 347	struct qdisc_size_table *stab;
 348	struct tc_sizespec *s;
 349	unsigned int tsize = 0;
 350	u16 *tab = NULL;
 351	int err;
 352
 353	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
 
 354	if (err < 0)
 355		return ERR_PTR(err);
 356	if (!tb[TCA_STAB_BASE])
 
 357		return ERR_PTR(-EINVAL);
 
 358
 359	s = nla_data(tb[TCA_STAB_BASE]);
 360
 361	if (s->tsize > 0) {
 362		if (!tb[TCA_STAB_DATA])
 
 363			return ERR_PTR(-EINVAL);
 
 364		tab = nla_data(tb[TCA_STAB_DATA]);
 365		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
 366	}
 367
 368	if (tsize != s->tsize || (!tab && tsize > 0))
 
 369		return ERR_PTR(-EINVAL);
 370
 371	spin_lock(&qdisc_stab_lock);
 372
 373	list_for_each_entry(stab, &qdisc_stab_list, list) {
 374		if (memcmp(&stab->szopts, s, sizeof(*s)))
 375			continue;
 376		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
 377			continue;
 378		stab->refcnt++;
 379		spin_unlock(&qdisc_stab_lock);
 380		return stab;
 381	}
 382
 383	spin_unlock(&qdisc_stab_lock);
 
 
 
 
 384
 385	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
 386	if (!stab)
 387		return ERR_PTR(-ENOMEM);
 388
 389	stab->refcnt = 1;
 390	stab->szopts = *s;
 391	if (tsize > 0)
 392		memcpy(stab->data, tab, tsize * sizeof(u16));
 393
 394	spin_lock(&qdisc_stab_lock);
 395	list_add_tail(&stab->list, &qdisc_stab_list);
 396	spin_unlock(&qdisc_stab_lock);
 397
 398	return stab;
 399}
 400
 401static void stab_kfree_rcu(struct rcu_head *head)
 402{
 403	kfree(container_of(head, struct qdisc_size_table, rcu));
 404}
 405
 406void qdisc_put_stab(struct qdisc_size_table *tab)
 407{
 408	if (!tab)
 409		return;
 410
 411	spin_lock(&qdisc_stab_lock);
 412
 413	if (--tab->refcnt == 0) {
 414		list_del(&tab->list);
 415		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
 416	}
 417
 418	spin_unlock(&qdisc_stab_lock);
 419}
 420EXPORT_SYMBOL(qdisc_put_stab);
 421
 422static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
 423{
 424	struct nlattr *nest;
 425
 426	nest = nla_nest_start(skb, TCA_STAB);
 427	if (nest == NULL)
 428		goto nla_put_failure;
 429	NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
 
 430	nla_nest_end(skb, nest);
 431
 432	return skb->len;
 433
 434nla_put_failure:
 435	return -1;
 436}
 437
 438void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
 
 439{
 440	int pkt_len, slot;
 441
 442	pkt_len = skb->len + stab->szopts.overhead;
 443	if (unlikely(!stab->szopts.tsize))
 444		goto out;
 445
 446	slot = pkt_len + stab->szopts.cell_align;
 447	if (unlikely(slot < 0))
 448		slot = 0;
 449
 450	slot >>= stab->szopts.cell_log;
 451	if (likely(slot < stab->szopts.tsize))
 452		pkt_len = stab->data[slot];
 453	else
 454		pkt_len = stab->data[stab->szopts.tsize - 1] *
 455				(slot / stab->szopts.tsize) +
 456				stab->data[slot % stab->szopts.tsize];
 457
 458	pkt_len <<= stab->szopts.size_log;
 459out:
 460	if (unlikely(pkt_len < 1))
 461		pkt_len = 1;
 462	qdisc_skb_cb(skb)->pkt_len = pkt_len;
 463}
 464EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 465
 466void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
 467{
 468	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
 469		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
 470			txt, qdisc->ops->id, qdisc->handle >> 16);
 471		qdisc->flags |= TCQ_F_WARN_NONWC;
 472	}
 473}
 474EXPORT_SYMBOL(qdisc_warn_nonwc);
 475
 476static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 477{
 478	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 479						 timer);
 480
 481	qdisc_unthrottled(wd->qdisc);
 482	__netif_schedule(qdisc_root(wd->qdisc));
 
 483
 484	return HRTIMER_NORESTART;
 485}
 486
 487void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 
 488{
 489	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 490	wd->timer.function = qdisc_watchdog;
 491	wd->qdisc = qdisc;
 492}
 493EXPORT_SYMBOL(qdisc_watchdog_init);
 494
 495void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
 496{
 497	ktime_t time;
 
 
 498
 
 
 
 499	if (test_bit(__QDISC_STATE_DEACTIVATED,
 500		     &qdisc_root_sleeping(wd->qdisc)->state))
 501		return;
 502
 503	qdisc_throttled(wd->qdisc);
 504	time = ktime_set(0, 0);
 505	time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
 506	hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
 
 
 
 
 
 
 
 
 
 507}
 508EXPORT_SYMBOL(qdisc_watchdog_schedule);
 509
 510void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 511{
 512	hrtimer_cancel(&wd->timer);
 513	qdisc_unthrottled(wd->qdisc);
 514}
 515EXPORT_SYMBOL(qdisc_watchdog_cancel);
 516
 517static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
 518{
 519	unsigned int size = n * sizeof(struct hlist_head), i;
 520	struct hlist_head *h;
 
 521
 522	if (size <= PAGE_SIZE)
 523		h = kmalloc(size, GFP_KERNEL);
 524	else
 525		h = (struct hlist_head *)
 526			__get_free_pages(GFP_KERNEL, get_order(size));
 527
 528	if (h != NULL) {
 529		for (i = 0; i < n; i++)
 530			INIT_HLIST_HEAD(&h[i]);
 531	}
 532	return h;
 533}
 534
 535static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
 536{
 537	unsigned int size = n * sizeof(struct hlist_head);
 538
 539	if (size <= PAGE_SIZE)
 540		kfree(h);
 541	else
 542		free_pages((unsigned long)h, get_order(size));
 543}
 544
 545void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
 546{
 547	struct Qdisc_class_common *cl;
 548	struct hlist_node *n, *next;
 549	struct hlist_head *nhash, *ohash;
 550	unsigned int nsize, nmask, osize;
 551	unsigned int i, h;
 552
 553	/* Rehash when load factor exceeds 0.75 */
 554	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
 555		return;
 556	nsize = clhash->hashsize * 2;
 557	nmask = nsize - 1;
 558	nhash = qdisc_class_hash_alloc(nsize);
 559	if (nhash == NULL)
 560		return;
 561
 562	ohash = clhash->hash;
 563	osize = clhash->hashsize;
 564
 565	sch_tree_lock(sch);
 566	for (i = 0; i < osize; i++) {
 567		hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
 568			h = qdisc_class_hash(cl->classid, nmask);
 569			hlist_add_head(&cl->hnode, &nhash[h]);
 570		}
 571	}
 572	clhash->hash     = nhash;
 573	clhash->hashsize = nsize;
 574	clhash->hashmask = nmask;
 575	sch_tree_unlock(sch);
 576
 577	qdisc_class_hash_free(ohash, osize);
 578}
 579EXPORT_SYMBOL(qdisc_class_hash_grow);
 580
 581int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
 582{
 583	unsigned int size = 4;
 584
 585	clhash->hash = qdisc_class_hash_alloc(size);
 586	if (clhash->hash == NULL)
 587		return -ENOMEM;
 588	clhash->hashsize  = size;
 589	clhash->hashmask  = size - 1;
 590	clhash->hashelems = 0;
 591	return 0;
 592}
 593EXPORT_SYMBOL(qdisc_class_hash_init);
 594
 595void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
 596{
 597	qdisc_class_hash_free(clhash->hash, clhash->hashsize);
 598}
 599EXPORT_SYMBOL(qdisc_class_hash_destroy);
 600
 601void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
 602			     struct Qdisc_class_common *cl)
 603{
 604	unsigned int h;
 605
 606	INIT_HLIST_NODE(&cl->hnode);
 607	h = qdisc_class_hash(cl->classid, clhash->hashmask);
 608	hlist_add_head(&cl->hnode, &clhash->hash[h]);
 609	clhash->hashelems++;
 610}
 611EXPORT_SYMBOL(qdisc_class_hash_insert);
 612
 613void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
 614			     struct Qdisc_class_common *cl)
 615{
 616	hlist_del(&cl->hnode);
 617	clhash->hashelems--;
 618}
 619EXPORT_SYMBOL(qdisc_class_hash_remove);
 620
 621/* Allocate an unique handle from space managed by kernel */
 622
 
 623static u32 qdisc_alloc_handle(struct net_device *dev)
 624{
 625	int i = 0x10000;
 626	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
 627
 628	do {
 629		autohandle += TC_H_MAKE(0x10000U, 0);
 630		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
 631			autohandle = TC_H_MAKE(0x80000000U, 0);
 632	} while	(qdisc_lookup(dev, autohandle) && --i > 0);
 
 
 
 633
 634	return i > 0 ? autohandle : 0;
 635}
 636
 637void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
 638{
 
 639	const struct Qdisc_class_ops *cops;
 640	unsigned long cl;
 641	u32 parentid;
 
 
 642
 643	if (n == 0)
 644		return;
 
 
 645	while ((parentid = sch->parent)) {
 646		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
 647			return;
 648
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 649		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
 650		if (sch == NULL) {
 651			WARN_ON(parentid != TC_H_ROOT);
 652			return;
 653		}
 654		cops = sch->ops->cl_ops;
 655		if (cops->qlen_notify) {
 656			cl = cops->get(sch, parentid);
 657			cops->qlen_notify(sch, cl);
 658			cops->put(sch, cl);
 659		}
 660		sch->q.qlen -= n;
 
 
 661	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662}
 663EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
 664
 665static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 666			       struct nlmsghdr *n, u32 clid,
 667			       struct Qdisc *old, struct Qdisc *new)
 668{
 669	if (new || old)
 670		qdisc_notify(net, skb, n, clid, old, new);
 671
 672	if (old)
 673		qdisc_destroy(old);
 
 
 
 
 
 
 
 
 
 
 
 
 
 674}
 675
 676/* Graft qdisc "new" to class "classid" of qdisc "parent" or
 677 * to device "dev".
 678 *
 679 * When appropriate send a netlink notification using 'skb'
 680 * and "n".
 681 *
 682 * On success, destroy old qdisc.
 683 */
 684
 685static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 686		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
 687		       struct Qdisc *new, struct Qdisc *old)
 
 688{
 689	struct Qdisc *q = old;
 690	struct net *net = dev_net(dev);
 691	int err = 0;
 692
 693	if (parent == NULL) {
 694		unsigned int i, num_q, ingress;
 695
 696		ingress = 0;
 697		num_q = dev->num_tx_queues;
 698		if ((q && q->flags & TCQ_F_INGRESS) ||
 699		    (new && new->flags & TCQ_F_INGRESS)) {
 700			num_q = 1;
 701			ingress = 1;
 702			if (!dev_ingress_queue(dev))
 
 703				return -ENOENT;
 
 704		}
 705
 706		if (dev->flags & IFF_UP)
 707			dev_deactivate(dev);
 708
 709		if (new && new->ops->attach) {
 710			new->ops->attach(new);
 711			num_q = 0;
 712		}
 713
 714		for (i = 0; i < num_q; i++) {
 715			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
 716
 717			if (!ingress)
 718				dev_queue = netdev_get_tx_queue(dev, i);
 719
 720			old = dev_graft_qdisc(dev_queue, new);
 721			if (new && i > 0)
 722				atomic_inc(&new->refcnt);
 723
 724			if (!ingress)
 725				qdisc_destroy(old);
 726		}
 727
 
 728		if (!ingress) {
 729			notify_and_destroy(net, skb, n, classid,
 730					   dev->qdisc, new);
 731			if (new && !new->ops->attach)
 732				atomic_inc(&new->refcnt);
 733			dev->qdisc = new ? : &noop_qdisc;
 
 
 
 734		} else {
 735			notify_and_destroy(net, skb, n, classid, old, new);
 736		}
 737
 738		if (dev->flags & IFF_UP)
 739			dev_activate(dev);
 740	} else {
 741		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
 
 
 742
 743		err = -EOPNOTSUPP;
 744		if (cops && cops->graft) {
 745			unsigned long cl = cops->get(parent, classid);
 746			if (cl) {
 747				err = cops->graft(parent, cl, new, &old);
 748				cops->put(parent, cl);
 749			} else
 750				err = -ENOENT;
 
 
 
 751		}
 752		if (!err)
 753			notify_and_destroy(net, skb, n, classid, old, new);
 
 
 
 754	}
 755	return err;
 756}
 757
 758/* lockdep annotation is needed for ingress; egress gets it only for name */
 759static struct lock_class_key qdisc_tx_lock;
 760static struct lock_class_key qdisc_rx_lock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 761
 762/*
 763   Allocate and initialize new qdisc.
 764
 765   Parameters are passed via opt.
 766 */
 767
 768static struct Qdisc *
 769qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
 770	     struct Qdisc *p, u32 parent, u32 handle,
 771	     struct nlattr **tca, int *errp)
 
 772{
 773	int err;
 774	struct nlattr *kind = tca[TCA_KIND];
 775	struct Qdisc *sch;
 776	struct Qdisc_ops *ops;
 777	struct qdisc_size_table *stab;
 778
 779	ops = qdisc_lookup_ops(kind);
 780#ifdef CONFIG_MODULES
 781	if (ops == NULL && kind != NULL) {
 782		char name[IFNAMSIZ];
 783		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
 784			/* We dropped the RTNL semaphore in order to
 785			 * perform the module load.  So, even if we
 786			 * succeeded in loading the module we have to
 787			 * tell the caller to replay the request.  We
 788			 * indicate this using -EAGAIN.
 789			 * We replay the request because the device may
 790			 * go away in the mean time.
 791			 */
 792			rtnl_unlock();
 793			request_module("sch_%s", name);
 794			rtnl_lock();
 795			ops = qdisc_lookup_ops(kind);
 796			if (ops != NULL) {
 797				/* We will try again qdisc_lookup_ops,
 798				 * so don't keep a reference.
 799				 */
 800				module_put(ops->owner);
 801				err = -EAGAIN;
 802				goto err_out;
 803			}
 804		}
 805	}
 806#endif
 807
 808	err = -ENOENT;
 809	if (ops == NULL)
 
 810		goto err_out;
 
 811
 812	sch = qdisc_alloc(dev_queue, ops);
 813	if (IS_ERR(sch)) {
 814		err = PTR_ERR(sch);
 815		goto err_out2;
 816	}
 817
 818	sch->parent = parent;
 819
 820	if (handle == TC_H_INGRESS) {
 821		sch->flags |= TCQ_F_INGRESS;
 822		handle = TC_H_MAKE(TC_H_INGRESS, 0);
 823		lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
 824	} else {
 825		if (handle == 0) {
 826			handle = qdisc_alloc_handle(dev);
 827			err = -ENOMEM;
 828			if (handle == 0)
 
 829				goto err_out3;
 
 830		}
 831		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
 
 832	}
 833
 834	sch->handle = handle;
 835
 836	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
 837		if (tca[TCA_STAB]) {
 838			stab = qdisc_get_stab(tca[TCA_STAB]);
 839			if (IS_ERR(stab)) {
 840				err = PTR_ERR(stab);
 841				goto err_out4;
 842			}
 843			rcu_assign_pointer(sch->stab, stab);
 844		}
 845		if (tca[TCA_RATE]) {
 846			spinlock_t *root_lock;
 847
 848			err = -EOPNOTSUPP;
 849			if (sch->flags & TCQ_F_MQROOT)
 850				goto err_out4;
 851
 852			if ((sch->parent != TC_H_ROOT) &&
 853			    !(sch->flags & TCQ_F_INGRESS) &&
 854			    (!p || !(p->flags & TCQ_F_MQROOT)))
 855				root_lock = qdisc_root_sleeping_lock(sch);
 856			else
 857				root_lock = qdisc_lock(sch);
 858
 859			err = gen_new_estimator(&sch->bstats, &sch->rate_est,
 860						root_lock, tca[TCA_RATE]);
 861			if (err)
 862				goto err_out4;
 
 863		}
 
 
 
 
 864
 865		qdisc_list_add(sch);
 
 
 
 
 
 
 
 
 
 
 
 866
 867		return sch;
 
 
 
 
 
 
 
 
 
 868	}
 
 
 
 
 
 
 
 
 
 
 869err_out3:
 870	dev_put(dev);
 871	kfree((char *) sch - sch->padded);
 872err_out2:
 873	module_put(ops->owner);
 874err_out:
 875	*errp = err;
 876	return NULL;
 877
 878err_out4:
 879	/*
 880	 * Any broken qdiscs that would require a ops->reset() here?
 881	 * The qdisc was never in action so it shouldn't be necessary.
 882	 */
 883	qdisc_put_stab(rtnl_dereference(sch->stab));
 884	if (ops->destroy)
 885		ops->destroy(sch);
 886	goto err_out3;
 887}
 888
 889static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 
 890{
 891	struct qdisc_size_table *ostab, *stab = NULL;
 892	int err = 0;
 893
 894	if (tca[TCA_OPTIONS]) {
 895		if (sch->ops->change == NULL)
 
 896			return -EINVAL;
 897		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
 
 
 
 
 
 898		if (err)
 899			return err;
 900	}
 901
 902	if (tca[TCA_STAB]) {
 903		stab = qdisc_get_stab(tca[TCA_STAB]);
 904		if (IS_ERR(stab))
 905			return PTR_ERR(stab);
 906	}
 907
 908	ostab = rtnl_dereference(sch->stab);
 909	rcu_assign_pointer(sch->stab, stab);
 910	qdisc_put_stab(ostab);
 911
 912	if (tca[TCA_RATE]) {
 913		/* NB: ignores errors from replace_estimator
 914		   because change can't be undone. */
 915		if (sch->flags & TCQ_F_MQROOT)
 916			goto out;
 917		gen_replace_estimator(&sch->bstats, &sch->rate_est,
 918					    qdisc_root_sleeping_lock(sch),
 919					    tca[TCA_RATE]);
 
 
 
 920	}
 921out:
 922	return 0;
 923}
 924
 925struct check_loop_arg {
 926	struct qdisc_walker	w;
 927	struct Qdisc		*p;
 928	int			depth;
 929};
 930
 931static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
 
 932
 933static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
 934{
 935	struct check_loop_arg	arg;
 936
 937	if (q->ops->cl_ops == NULL)
 938		return 0;
 939
 940	arg.w.stop = arg.w.skip = arg.w.count = 0;
 941	arg.w.fn = check_loop_fn;
 942	arg.depth = depth;
 943	arg.p = p;
 944	q->ops->cl_ops->walk(q, &arg.w);
 945	return arg.w.stop ? -ELOOP : 0;
 946}
 947
 948static int
 949check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
 950{
 951	struct Qdisc *leaf;
 952	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
 953	struct check_loop_arg *arg = (struct check_loop_arg *)w;
 954
 955	leaf = cops->leaf(q, cl);
 956	if (leaf) {
 957		if (leaf == arg->p || arg->depth > 7)
 958			return -ELOOP;
 959		return check_loop(leaf, arg->p, arg->depth + 1);
 960	}
 961	return 0;
 962}
 963
 
 
 
 
 
 
 
 
 
 
 
 964/*
 965 * Delete/get qdisc.
 966 */
 967
 968static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
 969{
 970	struct net *net = sock_net(skb->sk);
 971	struct tcmsg *tcm = NLMSG_DATA(n);
 972	struct nlattr *tca[TCA_MAX + 1];
 973	struct net_device *dev;
 974	u32 clid = tcm->tcm_parent;
 975	struct Qdisc *q = NULL;
 976	struct Qdisc *p = NULL;
 977	int err;
 978
 979	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
 980	if (!dev)
 981		return -ENODEV;
 982
 983	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
 
 984	if (err < 0)
 985		return err;
 986
 
 
 
 
 
 987	if (clid) {
 988		if (clid != TC_H_ROOT) {
 989			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
 990				p = qdisc_lookup(dev, TC_H_MAJ(clid));
 991				if (!p)
 
 992					return -ENOENT;
 
 993				q = qdisc_leaf(p, clid);
 994			} else if (dev_ingress_queue(dev)) {
 995				q = dev_ingress_queue(dev)->qdisc_sleeping;
 996			}
 997		} else {
 998			q = dev->qdisc;
 999		}
1000		if (!q)
 
1001			return -ENOENT;
 
1002
1003		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
 
1004			return -EINVAL;
 
1005	} else {
1006		q = qdisc_lookup(dev, tcm->tcm_handle);
1007		if (!q)
 
1008			return -ENOENT;
 
1009	}
1010
1011	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
 
1012		return -EINVAL;
 
1013
1014	if (n->nlmsg_type == RTM_DELQDISC) {
1015		if (!clid)
 
1016			return -EINVAL;
1017		if (q->handle == 0)
 
 
1018			return -ENOENT;
1019		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
 
1020		if (err != 0)
1021			return err;
1022	} else {
1023		qdisc_notify(net, skb, n, clid, NULL, q);
1024	}
1025	return 0;
1026}
1027
1028/*
1029 * Create/change qdisc.
1030 */
1031
1032static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
1033{
1034	struct net *net = sock_net(skb->sk);
1035	struct tcmsg *tcm;
1036	struct nlattr *tca[TCA_MAX + 1];
1037	struct net_device *dev;
1038	u32 clid;
1039	struct Qdisc *q, *p;
1040	int err;
1041
 
 
 
1042replay:
1043	/* Reinit, just in case something touches this. */
1044	tcm = NLMSG_DATA(n);
 
 
 
 
 
1045	clid = tcm->tcm_parent;
1046	q = p = NULL;
1047
1048	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1049	if (!dev)
1050		return -ENODEV;
1051
1052	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1053	if (err < 0)
1054		return err;
1055
1056	if (clid) {
1057		if (clid != TC_H_ROOT) {
1058			if (clid != TC_H_INGRESS) {
1059				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1060				if (!p)
 
1061					return -ENOENT;
 
1062				q = qdisc_leaf(p, clid);
1063			} else if (dev_ingress_queue_create(dev)) {
1064				q = dev_ingress_queue(dev)->qdisc_sleeping;
1065			}
1066		} else {
1067			q = dev->qdisc;
1068		}
1069
1070		/* It may be default qdisc, ignore it */
1071		if (q && q->handle == 0)
1072			q = NULL;
1073
1074		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1075			if (tcm->tcm_handle) {
1076				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
 
1077					return -EEXIST;
1078				if (TC_H_MIN(tcm->tcm_handle))
 
 
1079					return -EINVAL;
 
1080				q = qdisc_lookup(dev, tcm->tcm_handle);
1081				if (!q)
1082					goto create_n_graft;
1083				if (n->nlmsg_flags & NLM_F_EXCL)
 
1084					return -EEXIST;
1085				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
 
 
 
1086					return -EINVAL;
 
1087				if (q == p ||
1088				    (p && check_loop(q, p, 0)))
 
1089					return -ELOOP;
1090				atomic_inc(&q->refcnt);
 
1091				goto graft;
1092			} else {
1093				if (!q)
1094					goto create_n_graft;
1095
1096				/* This magic test requires explanation.
1097				 *
1098				 *   We know, that some child q is already
1099				 *   attached to this parent and have choice:
1100				 *   either to change it or to create/graft new one.
1101				 *
1102				 *   1. We are allowed to create/graft only
1103				 *   if CREATE and REPLACE flags are set.
1104				 *
1105				 *   2. If EXCL is set, requestor wanted to say,
1106				 *   that qdisc tcm_handle is not expected
1107				 *   to exist, so that we choose create/graft too.
1108				 *
1109				 *   3. The last case is when no flags are set.
1110				 *   Alas, it is sort of hole in API, we
1111				 *   cannot decide what to do unambiguously.
1112				 *   For now we select create/graft, if
1113				 *   user gave KIND, which does not match existing.
1114				 */
1115				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1116				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1117				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1118				     (tca[TCA_KIND] &&
1119				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1120					goto create_n_graft;
1121			}
1122		}
1123	} else {
1124		if (!tcm->tcm_handle)
 
1125			return -EINVAL;
 
1126		q = qdisc_lookup(dev, tcm->tcm_handle);
1127	}
1128
1129	/* Change qdisc parameters */
1130	if (q == NULL)
 
1131		return -ENOENT;
1132	if (n->nlmsg_flags & NLM_F_EXCL)
 
 
1133		return -EEXIST;
1134	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
 
 
1135		return -EINVAL;
1136	err = qdisc_change(q, tca);
 
1137	if (err == 0)
1138		qdisc_notify(net, skb, n, clid, NULL, q);
1139	return err;
1140
1141create_n_graft:
1142	if (!(n->nlmsg_flags & NLM_F_CREATE))
 
1143		return -ENOENT;
 
1144	if (clid == TC_H_INGRESS) {
1145		if (dev_ingress_queue(dev))
1146			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1147					 tcm->tcm_parent, tcm->tcm_parent,
1148					 tca, &err);
1149		else
 
1150			err = -ENOENT;
 
1151	} else {
1152		struct netdev_queue *dev_queue;
1153
1154		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1155			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1156		else if (p)
1157			dev_queue = p->dev_queue;
1158		else
1159			dev_queue = netdev_get_tx_queue(dev, 0);
1160
1161		q = qdisc_create(dev, dev_queue, p,
1162				 tcm->tcm_parent, tcm->tcm_handle,
1163				 tca, &err);
1164	}
1165	if (q == NULL) {
1166		if (err == -EAGAIN)
1167			goto replay;
1168		return err;
1169	}
1170
1171graft:
1172	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1173	if (err) {
1174		if (q)
1175			qdisc_destroy(q);
1176		return err;
1177	}
1178
1179	return 0;
1180}
1181
1182static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1183			 u32 pid, u32 seq, u16 flags, int event)
1184{
1185	struct tcmsg *tcm;
1186	struct nlmsghdr  *nlh;
1187	unsigned char *b = skb_tail_pointer(skb);
1188	struct gnet_dump d;
1189	struct qdisc_size_table *stab;
1190
1191	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1192	tcm = NLMSG_DATA(nlh);
1193	tcm->tcm_family = AF_UNSPEC;
1194	tcm->tcm__pad1 = 0;
1195	tcm->tcm__pad2 = 0;
1196	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1197	tcm->tcm_parent = clid;
1198	tcm->tcm_handle = q->handle;
1199	tcm->tcm_info = atomic_read(&q->refcnt);
1200	NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1201	if (q->ops->dump && q->ops->dump(q, skb) < 0)
1202		goto nla_put_failure;
1203	q->qstats.qlen = q->q.qlen;
1204
1205	stab = rtnl_dereference(q->stab);
1206	if (stab && qdisc_dump_stab(skb, stab) < 0)
1207		goto nla_put_failure;
1208
1209	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1210					 qdisc_root_sleeping_lock(q), &d) < 0)
1211		goto nla_put_failure;
1212
1213	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1214		goto nla_put_failure;
1215
1216	if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
1217	    gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1218	    gnet_stats_copy_queue(&d, &q->qstats) < 0)
1219		goto nla_put_failure;
1220
1221	if (gnet_stats_finish_copy(&d) < 0)
1222		goto nla_put_failure;
1223
1224	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1225	return skb->len;
1226
1227nlmsg_failure:
1228nla_put_failure:
1229	nlmsg_trim(skb, b);
1230	return -1;
1231}
1232
1233static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1234{
1235	return (q->flags & TCQ_F_BUILTIN) ? true : false;
1236}
1237
1238static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1239			struct nlmsghdr *n, u32 clid,
1240			struct Qdisc *old, struct Qdisc *new)
1241{
1242	struct sk_buff *skb;
1243	u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1244
1245	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1246	if (!skb)
1247		return -ENOBUFS;
1248
1249	if (old && !tc_qdisc_dump_ignore(old)) {
1250		if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
1251				  0, RTM_DELQDISC) < 0)
1252			goto err_out;
1253	}
1254	if (new && !tc_qdisc_dump_ignore(new)) {
1255		if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
1256				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1257			goto err_out;
1258	}
1259
1260	if (skb->len)
1261		return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
1262				      n->nlmsg_flags & NLM_F_ECHO);
1263
1264err_out:
1265	kfree_skb(skb);
1266	return -EINVAL;
1267}
1268
1269static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1270			      struct netlink_callback *cb,
1271			      int *q_idx_p, int s_q_idx)
 
1272{
1273	int ret = 0, q_idx = *q_idx_p;
1274	struct Qdisc *q;
 
1275
1276	if (!root)
1277		return 0;
1278
1279	q = root;
1280	if (q_idx < s_q_idx) {
1281		q_idx++;
1282	} else {
1283		if (!tc_qdisc_dump_ignore(q) &&
1284		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1285				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
 
1286			goto done;
1287		q_idx++;
1288	}
1289	list_for_each_entry(q, &root->list, list) {
 
 
 
 
 
 
 
 
 
 
1290		if (q_idx < s_q_idx) {
1291			q_idx++;
1292			continue;
1293		}
1294		if (!tc_qdisc_dump_ignore(q) &&
1295		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1296				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
 
1297			goto done;
1298		q_idx++;
1299	}
1300
1301out:
1302	*q_idx_p = q_idx;
1303	return ret;
1304done:
1305	ret = -1;
1306	goto out;
1307}
1308
1309static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1310{
1311	struct net *net = sock_net(skb->sk);
1312	int idx, q_idx;
1313	int s_idx, s_q_idx;
1314	struct net_device *dev;
 
 
 
1315
1316	s_idx = cb->args[0];
1317	s_q_idx = q_idx = cb->args[1];
1318
1319	rcu_read_lock();
1320	idx = 0;
1321	for_each_netdev_rcu(net, dev) {
 
 
 
 
 
 
 
1322		struct netdev_queue *dev_queue;
1323
1324		if (idx < s_idx)
1325			goto cont;
1326		if (idx > s_idx)
1327			s_q_idx = 0;
1328		q_idx = 0;
1329
1330		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
 
1331			goto done;
1332
1333		dev_queue = dev_ingress_queue(dev);
1334		if (dev_queue &&
1335		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1336				       &q_idx, s_q_idx) < 0)
 
1337			goto done;
1338
1339cont:
1340		idx++;
1341	}
1342
1343done:
1344	rcu_read_unlock();
1345
1346	cb->args[0] = idx;
1347	cb->args[1] = q_idx;
1348
1349	return skb->len;
1350}
1351
1352
1353
1354/************************************************
1355 *	Traffic classes manipulation.		*
1356 ************************************************/
1357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1358
 
 
 
1359
1360static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1361{
1362	struct net *net = sock_net(skb->sk);
1363	struct tcmsg *tcm = NLMSG_DATA(n);
1364	struct nlattr *tca[TCA_MAX + 1];
1365	struct net_device *dev;
1366	struct Qdisc *q = NULL;
1367	const struct Qdisc_class_ops *cops;
1368	unsigned long cl = 0;
1369	unsigned long new_cl;
1370	u32 pid = tcm->tcm_parent;
1371	u32 clid = tcm->tcm_handle;
1372	u32 qid = TC_H_MAJ(clid);
1373	int err;
1374
1375	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1376	if (!dev)
1377		return -ENODEV;
1378
1379	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
 
1380	if (err < 0)
1381		return err;
1382
 
 
 
 
1383	/*
1384	   parent == TC_H_UNSPEC - unspecified parent.
1385	   parent == TC_H_ROOT   - class is root, which has no parent.
1386	   parent == X:0	 - parent is root class.
1387	   parent == X:Y	 - parent is a node in hierarchy.
1388	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1389
1390	   handle == 0:0	 - generate handle from kernel pool.
1391	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1392	   handle == X:Y	 - clear.
1393	   handle == X:0	 - root class.
1394	 */
1395
1396	/* Step 1. Determine qdisc handle X:0 */
1397
1398	if (pid != TC_H_ROOT) {
1399		u32 qid1 = TC_H_MAJ(pid);
 
 
 
 
1400
1401		if (qid && qid1) {
1402			/* If both majors are known, they must be identical. */
1403			if (qid != qid1)
1404				return -EINVAL;
1405		} else if (qid1) {
1406			qid = qid1;
1407		} else if (qid == 0)
1408			qid = dev->qdisc->handle;
1409
1410		/* Now qid is genuine qdisc handle consistent
1411		 * both with parent and child.
1412		 *
1413		 * TC_H_MAJ(pid) still may be unspecified, complete it now.
1414		 */
1415		if (pid)
1416			pid = TC_H_MAKE(qid, pid);
1417	} else {
1418		if (qid == 0)
1419			qid = dev->qdisc->handle;
1420	}
1421
1422	/* OK. Locate qdisc */
1423	q = qdisc_lookup(dev, qid);
1424	if (!q)
1425		return -ENOENT;
1426
1427	/* An check that it supports classes */
1428	cops = q->ops->cl_ops;
1429	if (cops == NULL)
1430		return -EINVAL;
1431
1432	/* Now try to get class */
1433	if (clid == 0) {
1434		if (pid == TC_H_ROOT)
1435			clid = qid;
1436	} else
1437		clid = TC_H_MAKE(qid, clid);
1438
1439	if (clid)
1440		cl = cops->get(q, clid);
1441
1442	if (cl == 0) {
1443		err = -ENOENT;
1444		if (n->nlmsg_type != RTM_NEWTCLASS ||
1445		    !(n->nlmsg_flags & NLM_F_CREATE))
1446			goto out;
1447	} else {
1448		switch (n->nlmsg_type) {
1449		case RTM_NEWTCLASS:
1450			err = -EEXIST;
1451			if (n->nlmsg_flags & NLM_F_EXCL)
1452				goto out;
1453			break;
1454		case RTM_DELTCLASS:
1455			err = -EOPNOTSUPP;
1456			if (cops->delete)
1457				err = cops->delete(q, cl);
1458			if (err == 0)
1459				tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1460			goto out;
1461		case RTM_GETTCLASS:
1462			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1463			goto out;
1464		default:
1465			err = -EINVAL;
1466			goto out;
1467		}
1468	}
1469
 
 
 
 
 
1470	new_cl = cl;
1471	err = -EOPNOTSUPP;
1472	if (cops->change)
1473		err = cops->change(q, clid, pid, tca, &new_cl);
1474	if (err == 0)
1475		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1476
 
 
 
1477out:
1478	if (cl)
1479		cops->put(q, cl);
1480
1481	return err;
1482}
1483
1484
1485static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1486			  unsigned long cl,
1487			  u32 pid, u32 seq, u16 flags, int event)
1488{
1489	struct tcmsg *tcm;
1490	struct nlmsghdr  *nlh;
1491	unsigned char *b = skb_tail_pointer(skb);
1492	struct gnet_dump d;
1493	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1494
1495	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1496	tcm = NLMSG_DATA(nlh);
1497	tcm->tcm_family = AF_UNSPEC;
1498	tcm->tcm__pad1 = 0;
1499	tcm->tcm__pad2 = 0;
1500	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1501	tcm->tcm_parent = q->handle;
1502	tcm->tcm_handle = q->handle;
1503	tcm->tcm_info = 0;
1504	NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1505	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1506		goto nla_put_failure;
1507
1508	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1509					 qdisc_root_sleeping_lock(q), &d) < 0)
1510		goto nla_put_failure;
1511
1512	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1513		goto nla_put_failure;
1514
1515	if (gnet_stats_finish_copy(&d) < 0)
1516		goto nla_put_failure;
1517
1518	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1519	return skb->len;
1520
1521nlmsg_failure:
1522nla_put_failure:
1523	nlmsg_trim(skb, b);
1524	return -1;
1525}
1526
1527static int tclass_notify(struct net *net, struct sk_buff *oskb,
1528			 struct nlmsghdr *n, struct Qdisc *q,
1529			 unsigned long cl, int event)
1530{
1531	struct sk_buff *skb;
1532	u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1533
1534	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1535	if (!skb)
1536		return -ENOBUFS;
1537
1538	if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1539		kfree_skb(skb);
1540		return -EINVAL;
1541	}
1542
1543	return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
1544			      n->nlmsg_flags & NLM_F_ECHO);
1545}
1546
1547struct qdisc_dump_args {
1548	struct qdisc_walker	w;
1549	struct sk_buff		*skb;
1550	struct netlink_callback	*cb;
1551};
1552
1553static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
 
1554{
1555	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1556
1557	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1558			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
 
1559}
1560
1561static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1562				struct tcmsg *tcm, struct netlink_callback *cb,
1563				int *t_p, int s_t)
1564{
1565	struct qdisc_dump_args arg;
1566
1567	if (tc_qdisc_dump_ignore(q) ||
1568	    *t_p < s_t || !q->ops->cl_ops ||
1569	    (tcm->tcm_parent &&
1570	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1571		(*t_p)++;
1572		return 0;
1573	}
1574	if (*t_p > s_t)
1575		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1576	arg.w.fn = qdisc_class_dump;
1577	arg.skb = skb;
1578	arg.cb = cb;
1579	arg.w.stop  = 0;
1580	arg.w.skip = cb->args[1];
1581	arg.w.count = 0;
1582	q->ops->cl_ops->walk(q, &arg.w);
1583	cb->args[1] = arg.w.count;
1584	if (arg.w.stop)
1585		return -1;
1586	(*t_p)++;
1587	return 0;
1588}
1589
1590static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1591			       struct tcmsg *tcm, struct netlink_callback *cb,
1592			       int *t_p, int s_t)
1593{
1594	struct Qdisc *q;
 
1595
1596	if (!root)
1597		return 0;
1598
1599	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1600		return -1;
1601
1602	list_for_each_entry(q, &root->list, list) {
 
 
 
 
 
 
 
 
 
 
1603		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1604			return -1;
1605	}
1606
1607	return 0;
1608}
1609
1610static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1611{
1612	struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
1613	struct net *net = sock_net(skb->sk);
1614	struct netdev_queue *dev_queue;
1615	struct net_device *dev;
1616	int t, s_t;
1617
1618	if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1619		return 0;
1620	dev = dev_get_by_index(net, tcm->tcm_ifindex);
1621	if (!dev)
1622		return 0;
1623
1624	s_t = cb->args[0];
1625	t = 0;
1626
1627	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1628		goto done;
1629
1630	dev_queue = dev_ingress_queue(dev);
1631	if (dev_queue &&
1632	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1633				&t, s_t) < 0)
1634		goto done;
1635
1636done:
1637	cb->args[0] = t;
1638
1639	dev_put(dev);
1640	return skb->len;
1641}
1642
1643/* Main classifier routine: scans classifier chain attached
1644 * to this qdisc, (optionally) tests for protocol and asks
1645 * specific classifiers.
1646 */
1647int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
1648		       struct tcf_result *res)
1649{
1650	__be16 protocol = skb->protocol;
1651	int err;
1652
1653	for (; tp; tp = tp->next) {
1654		if (tp->protocol != protocol &&
1655		    tp->protocol != htons(ETH_P_ALL))
1656			continue;
1657		err = tp->classify(skb, tp, res);
1658
1659		if (err >= 0) {
1660#ifdef CONFIG_NET_CLS_ACT
1661			if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1662				skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1663#endif
1664			return err;
1665		}
1666	}
1667	return -1;
1668}
1669EXPORT_SYMBOL(tc_classify_compat);
1670
1671int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1672		struct tcf_result *res)
1673{
1674	int err = 0;
1675#ifdef CONFIG_NET_CLS_ACT
1676	const struct tcf_proto *otp = tp;
1677reclassify:
1678#endif
1679
1680	err = tc_classify_compat(skb, tp, res);
1681#ifdef CONFIG_NET_CLS_ACT
1682	if (err == TC_ACT_RECLASSIFY) {
1683		u32 verd = G_TC_VERD(skb->tc_verd);
1684		tp = otp;
1685
1686		if (verd++ >= MAX_REC_LOOP) {
1687			if (net_ratelimit())
1688				pr_notice("%s: packet reclassify loop"
1689					  " rule prio %u protocol %02x\n",
1690					  tp->q->ops->id,
1691					  tp->prio & 0xffff,
1692					  ntohs(tp->protocol));
1693			return TC_ACT_SHOT;
1694		}
1695		skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1696		goto reclassify;
1697	}
1698#endif
1699	return err;
1700}
1701EXPORT_SYMBOL(tc_classify);
1702
1703void tcf_destroy(struct tcf_proto *tp)
1704{
1705	tp->ops->destroy(tp);
1706	module_put(tp->ops->owner);
1707	kfree(tp);
1708}
1709
1710void tcf_destroy_chain(struct tcf_proto **fl)
1711{
1712	struct tcf_proto *tp;
1713
1714	while ((tp = *fl) != NULL) {
1715		*fl = tp->next;
1716		tcf_destroy(tp);
1717	}
1718}
1719EXPORT_SYMBOL(tcf_destroy_chain);
1720
1721#ifdef CONFIG_PROC_FS
1722static int psched_show(struct seq_file *seq, void *v)
1723{
1724	struct timespec ts;
1725
1726	hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1727	seq_printf(seq, "%08x %08x %08x %08x\n",
1728		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1729		   1000000,
1730		   (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1731
1732	return 0;
1733}
1734
1735static int psched_open(struct inode *inode, struct file *file)
1736{
1737	return single_open(file, psched_show, NULL);
1738}
1739
1740static const struct file_operations psched_fops = {
1741	.owner = THIS_MODULE,
1742	.open = psched_open,
1743	.read  = seq_read,
1744	.llseek = seq_lseek,
1745	.release = single_release,
1746};
1747
1748static int __net_init psched_net_init(struct net *net)
1749{
1750	struct proc_dir_entry *e;
1751
1752	e = proc_net_fops_create(net, "psched", 0, &psched_fops);
1753	if (e == NULL)
1754		return -ENOMEM;
1755
1756	return 0;
1757}
1758
1759static void __net_exit psched_net_exit(struct net *net)
1760{
1761	proc_net_remove(net, "psched");
1762}
1763#else
1764static int __net_init psched_net_init(struct net *net)
1765{
1766	return 0;
1767}
1768
1769static void __net_exit psched_net_exit(struct net *net)
1770{
1771}
1772#endif
1773
1774static struct pernet_operations psched_net_ops = {
1775	.init = psched_net_init,
1776	.exit = psched_net_exit,
1777};
1778
1779static int __init pktsched_init(void)
1780{
1781	int err;
1782
1783	err = register_pernet_subsys(&psched_net_ops);
1784	if (err) {
1785		pr_err("pktsched_init: "
1786		       "cannot initialize per netns operations\n");
1787		return err;
1788	}
1789
 
1790	register_qdisc(&pfifo_qdisc_ops);
1791	register_qdisc(&bfifo_qdisc_ops);
1792	register_qdisc(&pfifo_head_drop_qdisc_ops);
1793	register_qdisc(&mq_qdisc_ops);
 
1794
1795	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1796	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1797	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1798	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1799	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1800	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
 
 
1801
1802	return 0;
1803}
1804
1805subsys_initcall(pktsched_init);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_api.c	Packet scheduler API.
   4 *
 
 
 
 
 
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *
   7 * Fixes:
   8 *
   9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
  10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/string.h>
  18#include <linux/errno.h>
  19#include <linux/skbuff.h>
  20#include <linux/init.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/kmod.h>
  24#include <linux/list.h>
  25#include <linux/hrtimer.h>
 
  26#include <linux/slab.h>
  27#include <linux/hashtable.h>
  28
  29#include <net/net_namespace.h>
  30#include <net/sock.h>
  31#include <net/netlink.h>
  32#include <net/pkt_sched.h>
  33#include <net/pkt_cls.h>
  34
  35#include <trace/events/qdisc.h>
 
 
 
 
 
  36
  37/*
  38
  39   Short review.
  40   -------------
  41
  42   This file consists of two interrelated parts:
  43
  44   1. queueing disciplines manager frontend.
  45   2. traffic classes manager frontend.
  46
  47   Generally, queueing discipline ("qdisc") is a black box,
  48   which is able to enqueue packets and to dequeue them (when
  49   device is ready to send something) in order and at times
  50   determined by algorithm hidden in it.
  51
  52   qdisc's are divided to two categories:
  53   - "queues", which have no internal structure visible from outside.
  54   - "schedulers", which split all the packets to "traffic classes",
  55     using "packet classifiers" (look at cls_api.c)
  56
  57   In turn, classes may have child qdiscs (as rule, queues)
  58   attached to them etc. etc. etc.
  59
  60   The goal of the routines in this file is to translate
  61   information supplied by user in the form of handles
  62   to more intelligible for kernel form, to make some sanity
  63   checks and part of work, which is common to all qdiscs
  64   and to provide rtnetlink notifications.
  65
  66   All real intelligent work is done inside qdisc modules.
  67
  68
  69
  70   Every discipline has two major routines: enqueue and dequeue.
  71
  72   ---dequeue
  73
  74   dequeue usually returns a skb to send. It is allowed to return NULL,
  75   but it does not mean that queue is empty, it just means that
  76   discipline does not want to send anything this time.
  77   Queue is really empty if q->q.qlen == 0.
  78   For complicated disciplines with multiple queues q->q is not
  79   real packet queue, but however q->q.qlen must be valid.
  80
  81   ---enqueue
  82
  83   enqueue returns 0, if packet was enqueued successfully.
  84   If packet (this one or another one) was dropped, it returns
  85   not zero error code.
  86   NET_XMIT_DROP 	- this packet dropped
  87     Expected action: do not backoff, but wait until queue will clear.
  88   NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
  89     Expected action: backoff or ignore
 
 
  90
  91   Auxiliary routines:
  92
  93   ---peek
  94
  95   like dequeue but without removing a packet from the queue
  96
  97   ---reset
  98
  99   returns qdisc to initial state: purge all buffers, clear all
 100   timers, counters (except for statistics) etc.
 101
 102   ---init
 103
 104   initializes newly created qdisc.
 105
 106   ---destroy
 107
 108   destroys resources allocated by init and during lifetime of qdisc.
 109
 110   ---change
 111
 112   changes qdisc parameters.
 113 */
 114
 115/* Protects list of registered TC modules. It is pure SMP lock. */
 116static DEFINE_RWLOCK(qdisc_mod_lock);
 117
 118
 119/************************************************
 120 *	Queueing disciplines manipulation.	*
 121 ************************************************/
 122
 123
 124/* The list of all installed queueing disciplines. */
 125
 126static struct Qdisc_ops *qdisc_base;
 127
 128/* Register/unregister queueing discipline */
 129
 130int register_qdisc(struct Qdisc_ops *qops)
 131{
 132	struct Qdisc_ops *q, **qp;
 133	int rc = -EEXIST;
 134
 135	write_lock(&qdisc_mod_lock);
 136	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 137		if (!strcmp(qops->id, q->id))
 138			goto out;
 139
 140	if (qops->enqueue == NULL)
 141		qops->enqueue = noop_qdisc_ops.enqueue;
 142	if (qops->peek == NULL) {
 143		if (qops->dequeue == NULL)
 144			qops->peek = noop_qdisc_ops.peek;
 145		else
 146			goto out_einval;
 147	}
 148	if (qops->dequeue == NULL)
 149		qops->dequeue = noop_qdisc_ops.dequeue;
 150
 151	if (qops->cl_ops) {
 152		const struct Qdisc_class_ops *cops = qops->cl_ops;
 153
 154		if (!(cops->find && cops->walk && cops->leaf))
 155			goto out_einval;
 156
 157		if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
 158			goto out_einval;
 159	}
 160
 161	qops->next = NULL;
 162	*qp = qops;
 163	rc = 0;
 164out:
 165	write_unlock(&qdisc_mod_lock);
 166	return rc;
 167
 168out_einval:
 169	rc = -EINVAL;
 170	goto out;
 171}
 172EXPORT_SYMBOL(register_qdisc);
 173
 174int unregister_qdisc(struct Qdisc_ops *qops)
 175{
 176	struct Qdisc_ops *q, **qp;
 177	int err = -ENOENT;
 178
 179	write_lock(&qdisc_mod_lock);
 180	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 181		if (q == qops)
 182			break;
 183	if (q) {
 184		*qp = q->next;
 185		q->next = NULL;
 186		err = 0;
 187	}
 188	write_unlock(&qdisc_mod_lock);
 189	return err;
 190}
 191EXPORT_SYMBOL(unregister_qdisc);
 192
 193/* Get default qdisc if not otherwise specified */
 194void qdisc_get_default(char *name, size_t len)
 195{
 196	read_lock(&qdisc_mod_lock);
 197	strlcpy(name, default_qdisc_ops->id, len);
 198	read_unlock(&qdisc_mod_lock);
 199}
 200
 201static struct Qdisc_ops *qdisc_lookup_default(const char *name)
 202{
 203	struct Qdisc_ops *q = NULL;
 204
 205	for (q = qdisc_base; q; q = q->next) {
 206		if (!strcmp(name, q->id)) {
 207			if (!try_module_get(q->owner))
 208				q = NULL;
 209			break;
 210		}
 211	}
 212
 213	return q;
 214}
 215
 216/* Set new default qdisc to use */
 217int qdisc_set_default(const char *name)
 218{
 219	const struct Qdisc_ops *ops;
 220
 221	if (!capable(CAP_NET_ADMIN))
 222		return -EPERM;
 223
 224	write_lock(&qdisc_mod_lock);
 225	ops = qdisc_lookup_default(name);
 226	if (!ops) {
 227		/* Not found, drop lock and try to load module */
 228		write_unlock(&qdisc_mod_lock);
 229		request_module("sch_%s", name);
 230		write_lock(&qdisc_mod_lock);
 231
 232		ops = qdisc_lookup_default(name);
 233	}
 234
 235	if (ops) {
 236		/* Set new default */
 237		module_put(default_qdisc_ops->owner);
 238		default_qdisc_ops = ops;
 239	}
 240	write_unlock(&qdisc_mod_lock);
 241
 242	return ops ? 0 : -ENOENT;
 243}
 244
 245#ifdef CONFIG_NET_SCH_DEFAULT
 246/* Set default value from kernel config */
 247static int __init sch_default_qdisc(void)
 248{
 249	return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
 250}
 251late_initcall(sch_default_qdisc);
 252#endif
 253
 254/* We know handle. Find qdisc among all qdisc's attached to device
 255 * (root qdisc, all its children, children of children etc.)
 256 * Note: caller either uses rtnl or rcu_read_lock()
 257 */
 258
 259static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 260{
 261	struct Qdisc *q;
 262
 263	if (!qdisc_dev(root))
 264		return (root->handle == handle ? root : NULL);
 265
 266	if (!(root->flags & TCQ_F_BUILTIN) &&
 267	    root->handle == handle)
 268		return root;
 269
 270	hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
 271				   lockdep_rtnl_is_held()) {
 272		if (q->handle == handle)
 273			return q;
 274	}
 275	return NULL;
 276}
 277
 278void qdisc_hash_add(struct Qdisc *q, bool invisible)
 279{
 280	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
 281		ASSERT_RTNL();
 282		hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
 283		if (invisible)
 284			q->flags |= TCQ_F_INVISIBLE;
 285	}
 286}
 287EXPORT_SYMBOL(qdisc_hash_add);
 288
 289void qdisc_hash_del(struct Qdisc *q)
 290{
 291	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
 292		ASSERT_RTNL();
 293		hash_del_rcu(&q->hash);
 294	}
 295}
 296EXPORT_SYMBOL(qdisc_hash_del);
 297
 298struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 299{
 300	struct Qdisc *q;
 301
 302	if (!handle)
 303		return NULL;
 304	q = qdisc_match_from_root(dev->qdisc, handle);
 305	if (q)
 306		goto out;
 307
 308	if (dev_ingress_queue(dev))
 309		q = qdisc_match_from_root(
 310			dev_ingress_queue(dev)->qdisc_sleeping,
 311			handle);
 312out:
 313	return q;
 314}
 315
 316struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
 317{
 318	struct netdev_queue *nq;
 319	struct Qdisc *q;
 320
 321	if (!handle)
 322		return NULL;
 323	q = qdisc_match_from_root(dev->qdisc, handle);
 324	if (q)
 325		goto out;
 326
 327	nq = dev_ingress_queue_rcu(dev);
 328	if (nq)
 329		q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
 330out:
 331	return q;
 332}
 333
 334static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 335{
 336	unsigned long cl;
 
 337	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
 338
 339	if (cops == NULL)
 340		return NULL;
 341	cl = cops->find(p, classid);
 342
 343	if (cl == 0)
 344		return NULL;
 345	return cops->leaf(p, cl);
 
 
 346}
 347
 348/* Find queueing discipline by name */
 349
 350static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
 351{
 352	struct Qdisc_ops *q = NULL;
 353
 354	if (kind) {
 355		read_lock(&qdisc_mod_lock);
 356		for (q = qdisc_base; q; q = q->next) {
 357			if (nla_strcmp(kind, q->id) == 0) {
 358				if (!try_module_get(q->owner))
 359					q = NULL;
 360				break;
 361			}
 362		}
 363		read_unlock(&qdisc_mod_lock);
 364	}
 365	return q;
 366}
 367
 368/* The linklayer setting were not transferred from iproute2, in older
 369 * versions, and the rate tables lookup systems have been dropped in
 370 * the kernel. To keep backward compatible with older iproute2 tc
 371 * utils, we detect the linklayer setting by detecting if the rate
 372 * table were modified.
 373 *
 374 * For linklayer ATM table entries, the rate table will be aligned to
 375 * 48 bytes, thus some table entries will contain the same value.  The
 376 * mpu (min packet unit) is also encoded into the old rate table, thus
 377 * starting from the mpu, we find low and high table entries for
 378 * mapping this cell.  If these entries contain the same value, when
 379 * the rate tables have been modified for linklayer ATM.
 380 *
 381 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
 382 * and then roundup to the next cell, calc the table entry one below,
 383 * and compare.
 384 */
 385static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 386{
 387	int low       = roundup(r->mpu, 48);
 388	int high      = roundup(low+1, 48);
 389	int cell_low  = low >> r->cell_log;
 390	int cell_high = (high >> r->cell_log) - 1;
 391
 392	/* rtab is too inaccurate at rates > 100Mbit/s */
 393	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
 394		pr_debug("TC linklayer: Giving up ATM detection\n");
 395		return TC_LINKLAYER_ETHERNET;
 396	}
 397
 398	if ((cell_high > cell_low) && (cell_high < 256)
 399	    && (rtab[cell_low] == rtab[cell_high])) {
 400		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
 401			 cell_low, cell_high, rtab[cell_high]);
 402		return TC_LINKLAYER_ATM;
 403	}
 404	return TC_LINKLAYER_ETHERNET;
 405}
 406
 407static struct qdisc_rate_table *qdisc_rtab_list;
 408
 409struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 410					struct nlattr *tab,
 411					struct netlink_ext_ack *extack)
 412{
 413	struct qdisc_rate_table *rtab;
 414
 415	if (tab == NULL || r->rate == 0 ||
 416	    r->cell_log == 0 || r->cell_log >= 32 ||
 417	    nla_len(tab) != TC_RTAB_SIZE) {
 418		NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
 419		return NULL;
 420	}
 421
 422	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
 423		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
 424		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
 425			rtab->refcnt++;
 426			return rtab;
 427		}
 428	}
 429
 
 
 
 
 430	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
 431	if (rtab) {
 432		rtab->rate = *r;
 433		rtab->refcnt = 1;
 434		memcpy(rtab->data, nla_data(tab), 1024);
 435		if (r->linklayer == TC_LINKLAYER_UNAWARE)
 436			r->linklayer = __detect_linklayer(r, rtab->data);
 437		rtab->next = qdisc_rtab_list;
 438		qdisc_rtab_list = rtab;
 439	} else {
 440		NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
 441	}
 442	return rtab;
 443}
 444EXPORT_SYMBOL(qdisc_get_rtab);
 445
 446void qdisc_put_rtab(struct qdisc_rate_table *tab)
 447{
 448	struct qdisc_rate_table *rtab, **rtabp;
 449
 450	if (!tab || --tab->refcnt)
 451		return;
 452
 453	for (rtabp = &qdisc_rtab_list;
 454	     (rtab = *rtabp) != NULL;
 455	     rtabp = &rtab->next) {
 456		if (rtab == tab) {
 457			*rtabp = rtab->next;
 458			kfree(rtab);
 459			return;
 460		}
 461	}
 462}
 463EXPORT_SYMBOL(qdisc_put_rtab);
 464
 465static LIST_HEAD(qdisc_stab_list);
 
 466
 467static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
 468	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
 469	[TCA_STAB_DATA] = { .type = NLA_BINARY },
 470};
 471
 472static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
 473					       struct netlink_ext_ack *extack)
 474{
 475	struct nlattr *tb[TCA_STAB_MAX + 1];
 476	struct qdisc_size_table *stab;
 477	struct tc_sizespec *s;
 478	unsigned int tsize = 0;
 479	u16 *tab = NULL;
 480	int err;
 481
 482	err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
 483					  extack);
 484	if (err < 0)
 485		return ERR_PTR(err);
 486	if (!tb[TCA_STAB_BASE]) {
 487		NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
 488		return ERR_PTR(-EINVAL);
 489	}
 490
 491	s = nla_data(tb[TCA_STAB_BASE]);
 492
 493	if (s->tsize > 0) {
 494		if (!tb[TCA_STAB_DATA]) {
 495			NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
 496			return ERR_PTR(-EINVAL);
 497		}
 498		tab = nla_data(tb[TCA_STAB_DATA]);
 499		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
 500	}
 501
 502	if (tsize != s->tsize || (!tab && tsize > 0)) {
 503		NL_SET_ERR_MSG(extack, "Invalid size of size table");
 504		return ERR_PTR(-EINVAL);
 505	}
 
 506
 507	list_for_each_entry(stab, &qdisc_stab_list, list) {
 508		if (memcmp(&stab->szopts, s, sizeof(*s)))
 509			continue;
 510		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
 511			continue;
 512		stab->refcnt++;
 
 513		return stab;
 514	}
 515
 516	if (s->size_log > STAB_SIZE_LOG_MAX ||
 517	    s->cell_log > STAB_SIZE_LOG_MAX) {
 518		NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
 519		return ERR_PTR(-EINVAL);
 520	}
 521
 522	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
 523	if (!stab)
 524		return ERR_PTR(-ENOMEM);
 525
 526	stab->refcnt = 1;
 527	stab->szopts = *s;
 528	if (tsize > 0)
 529		memcpy(stab->data, tab, tsize * sizeof(u16));
 530
 
 531	list_add_tail(&stab->list, &qdisc_stab_list);
 
 532
 533	return stab;
 534}
 535
 
 
 
 
 
 536void qdisc_put_stab(struct qdisc_size_table *tab)
 537{
 538	if (!tab)
 539		return;
 540
 
 
 541	if (--tab->refcnt == 0) {
 542		list_del(&tab->list);
 543		kfree_rcu(tab, rcu);
 544	}
 
 
 545}
 546EXPORT_SYMBOL(qdisc_put_stab);
 547
 548static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
 549{
 550	struct nlattr *nest;
 551
 552	nest = nla_nest_start_noflag(skb, TCA_STAB);
 553	if (nest == NULL)
 554		goto nla_put_failure;
 555	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
 556		goto nla_put_failure;
 557	nla_nest_end(skb, nest);
 558
 559	return skb->len;
 560
 561nla_put_failure:
 562	return -1;
 563}
 564
 565void __qdisc_calculate_pkt_len(struct sk_buff *skb,
 566			       const struct qdisc_size_table *stab)
 567{
 568	int pkt_len, slot;
 569
 570	pkt_len = skb->len + stab->szopts.overhead;
 571	if (unlikely(!stab->szopts.tsize))
 572		goto out;
 573
 574	slot = pkt_len + stab->szopts.cell_align;
 575	if (unlikely(slot < 0))
 576		slot = 0;
 577
 578	slot >>= stab->szopts.cell_log;
 579	if (likely(slot < stab->szopts.tsize))
 580		pkt_len = stab->data[slot];
 581	else
 582		pkt_len = stab->data[stab->szopts.tsize - 1] *
 583				(slot / stab->szopts.tsize) +
 584				stab->data[slot % stab->szopts.tsize];
 585
 586	pkt_len <<= stab->szopts.size_log;
 587out:
 588	if (unlikely(pkt_len < 1))
 589		pkt_len = 1;
 590	qdisc_skb_cb(skb)->pkt_len = pkt_len;
 591}
 592EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 593
 594void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
 595{
 596	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
 597		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
 598			txt, qdisc->ops->id, qdisc->handle >> 16);
 599		qdisc->flags |= TCQ_F_WARN_NONWC;
 600	}
 601}
 602EXPORT_SYMBOL(qdisc_warn_nonwc);
 603
 604static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 605{
 606	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 607						 timer);
 608
 609	rcu_read_lock();
 610	__netif_schedule(qdisc_root(wd->qdisc));
 611	rcu_read_unlock();
 612
 613	return HRTIMER_NORESTART;
 614}
 615
 616void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
 617				 clockid_t clockid)
 618{
 619	hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
 620	wd->timer.function = qdisc_watchdog;
 621	wd->qdisc = qdisc;
 622}
 623EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
 624
 625void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 626{
 627	qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
 628}
 629EXPORT_SYMBOL(qdisc_watchdog_init);
 630
 631void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
 632				      u64 delta_ns)
 633{
 634	if (test_bit(__QDISC_STATE_DEACTIVATED,
 635		     &qdisc_root_sleeping(wd->qdisc)->state))
 636		return;
 637
 638	if (hrtimer_is_queued(&wd->timer)) {
 639		/* If timer is already set in [expires, expires + delta_ns],
 640		 * do not reprogram it.
 641		 */
 642		if (wd->last_expires - expires <= delta_ns)
 643			return;
 644	}
 645
 646	wd->last_expires = expires;
 647	hrtimer_start_range_ns(&wd->timer,
 648			       ns_to_ktime(expires),
 649			       delta_ns,
 650			       HRTIMER_MODE_ABS_PINNED);
 651}
 652EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
 653
 654void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 655{
 656	hrtimer_cancel(&wd->timer);
 
 657}
 658EXPORT_SYMBOL(qdisc_watchdog_cancel);
 659
 660static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
 661{
 
 662	struct hlist_head *h;
 663	unsigned int i;
 664
 665	h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
 
 
 
 
 666
 667	if (h != NULL) {
 668		for (i = 0; i < n; i++)
 669			INIT_HLIST_HEAD(&h[i]);
 670	}
 671	return h;
 672}
 673
 
 
 
 
 
 
 
 
 
 
 674void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
 675{
 676	struct Qdisc_class_common *cl;
 677	struct hlist_node *next;
 678	struct hlist_head *nhash, *ohash;
 679	unsigned int nsize, nmask, osize;
 680	unsigned int i, h;
 681
 682	/* Rehash when load factor exceeds 0.75 */
 683	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
 684		return;
 685	nsize = clhash->hashsize * 2;
 686	nmask = nsize - 1;
 687	nhash = qdisc_class_hash_alloc(nsize);
 688	if (nhash == NULL)
 689		return;
 690
 691	ohash = clhash->hash;
 692	osize = clhash->hashsize;
 693
 694	sch_tree_lock(sch);
 695	for (i = 0; i < osize; i++) {
 696		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
 697			h = qdisc_class_hash(cl->classid, nmask);
 698			hlist_add_head(&cl->hnode, &nhash[h]);
 699		}
 700	}
 701	clhash->hash     = nhash;
 702	clhash->hashsize = nsize;
 703	clhash->hashmask = nmask;
 704	sch_tree_unlock(sch);
 705
 706	kvfree(ohash);
 707}
 708EXPORT_SYMBOL(qdisc_class_hash_grow);
 709
 710int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
 711{
 712	unsigned int size = 4;
 713
 714	clhash->hash = qdisc_class_hash_alloc(size);
 715	if (!clhash->hash)
 716		return -ENOMEM;
 717	clhash->hashsize  = size;
 718	clhash->hashmask  = size - 1;
 719	clhash->hashelems = 0;
 720	return 0;
 721}
 722EXPORT_SYMBOL(qdisc_class_hash_init);
 723
 724void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
 725{
 726	kvfree(clhash->hash);
 727}
 728EXPORT_SYMBOL(qdisc_class_hash_destroy);
 729
 730void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
 731			     struct Qdisc_class_common *cl)
 732{
 733	unsigned int h;
 734
 735	INIT_HLIST_NODE(&cl->hnode);
 736	h = qdisc_class_hash(cl->classid, clhash->hashmask);
 737	hlist_add_head(&cl->hnode, &clhash->hash[h]);
 738	clhash->hashelems++;
 739}
 740EXPORT_SYMBOL(qdisc_class_hash_insert);
 741
 742void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
 743			     struct Qdisc_class_common *cl)
 744{
 745	hlist_del(&cl->hnode);
 746	clhash->hashelems--;
 747}
 748EXPORT_SYMBOL(qdisc_class_hash_remove);
 749
 750/* Allocate an unique handle from space managed by kernel
 751 * Possible range is [8000-FFFF]:0000 (0x8000 values)
 752 */
 753static u32 qdisc_alloc_handle(struct net_device *dev)
 754{
 755	int i = 0x8000;
 756	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
 757
 758	do {
 759		autohandle += TC_H_MAKE(0x10000U, 0);
 760		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
 761			autohandle = TC_H_MAKE(0x80000000U, 0);
 762		if (!qdisc_lookup(dev, autohandle))
 763			return autohandle;
 764		cond_resched();
 765	} while	(--i > 0);
 766
 767	return 0;
 768}
 769
 770void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
 771{
 772	bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
 773	const struct Qdisc_class_ops *cops;
 774	unsigned long cl;
 775	u32 parentid;
 776	bool notify;
 777	int drops;
 778
 779	if (n == 0 && len == 0)
 780		return;
 781	drops = max_t(int, n, 0);
 782	rcu_read_lock();
 783	while ((parentid = sch->parent)) {
 784		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
 785			break;
 786
 787		if (sch->flags & TCQ_F_NOPARENT)
 788			break;
 789		/* Notify parent qdisc only if child qdisc becomes empty.
 790		 *
 791		 * If child was empty even before update then backlog
 792		 * counter is screwed and we skip notification because
 793		 * parent class is already passive.
 794		 *
 795		 * If the original child was offloaded then it is allowed
 796		 * to be seem as empty, so the parent is notified anyway.
 797		 */
 798		notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
 799						       !qdisc_is_offloaded);
 800		/* TODO: perform the search on a per txq basis */
 801		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
 802		if (sch == NULL) {
 803			WARN_ON_ONCE(parentid != TC_H_ROOT);
 804			break;
 805		}
 806		cops = sch->ops->cl_ops;
 807		if (notify && cops->qlen_notify) {
 808			cl = cops->find(sch, parentid);
 809			cops->qlen_notify(sch, cl);
 
 810		}
 811		sch->q.qlen -= n;
 812		sch->qstats.backlog -= len;
 813		__qdisc_qstats_drop(sch, drops);
 814	}
 815	rcu_read_unlock();
 816}
 817EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
 818
 819int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
 820			      void *type_data)
 821{
 822	struct net_device *dev = qdisc_dev(sch);
 823	int err;
 824
 825	sch->flags &= ~TCQ_F_OFFLOADED;
 826	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
 827		return 0;
 828
 829	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
 830	if (err == -EOPNOTSUPP)
 831		return 0;
 832
 833	if (!err)
 834		sch->flags |= TCQ_F_OFFLOADED;
 835
 836	return err;
 837}
 838EXPORT_SYMBOL(qdisc_offload_dump_helper);
 839
 840void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
 841				struct Qdisc *new, struct Qdisc *old,
 842				enum tc_setup_type type, void *type_data,
 843				struct netlink_ext_ack *extack)
 844{
 845	bool any_qdisc_is_offloaded;
 846	int err;
 847
 848	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
 849		return;
 850
 851	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
 852
 853	/* Don't report error if the graft is part of destroy operation. */
 854	if (!err || !new || new == &noop_qdisc)
 855		return;
 856
 857	/* Don't report error if the parent, the old child and the new
 858	 * one are not offloaded.
 859	 */
 860	any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
 861	any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
 862	any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
 863
 864	if (any_qdisc_is_offloaded)
 865		NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
 866}
 867EXPORT_SYMBOL(qdisc_offload_graft_helper);
 868
 869static void qdisc_offload_graft_root(struct net_device *dev,
 870				     struct Qdisc *new, struct Qdisc *old,
 871				     struct netlink_ext_ack *extack)
 872{
 873	struct tc_root_qopt_offload graft_offload = {
 874		.command	= TC_ROOT_GRAFT,
 875		.handle		= new ? new->handle : 0,
 876		.ingress	= (new && new->flags & TCQ_F_INGRESS) ||
 877				  (old && old->flags & TCQ_F_INGRESS),
 878	};
 879
 880	qdisc_offload_graft_helper(dev, NULL, new, old,
 881				   TC_SETUP_ROOT_QDISC, &graft_offload, extack);
 882}
 883
 884static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 885			 u32 portid, u32 seq, u16 flags, int event)
 886{
 887	struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
 888	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
 889	struct tcmsg *tcm;
 890	struct nlmsghdr  *nlh;
 891	unsigned char *b = skb_tail_pointer(skb);
 892	struct gnet_dump d;
 893	struct qdisc_size_table *stab;
 894	u32 block_index;
 895	__u32 qlen;
 896
 897	cond_resched();
 898	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
 899	if (!nlh)
 900		goto out_nlmsg_trim;
 901	tcm = nlmsg_data(nlh);
 902	tcm->tcm_family = AF_UNSPEC;
 903	tcm->tcm__pad1 = 0;
 904	tcm->tcm__pad2 = 0;
 905	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
 906	tcm->tcm_parent = clid;
 907	tcm->tcm_handle = q->handle;
 908	tcm->tcm_info = refcount_read(&q->refcnt);
 909	if (nla_put_string(skb, TCA_KIND, q->ops->id))
 910		goto nla_put_failure;
 911	if (q->ops->ingress_block_get) {
 912		block_index = q->ops->ingress_block_get(q);
 913		if (block_index &&
 914		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
 915			goto nla_put_failure;
 916	}
 917	if (q->ops->egress_block_get) {
 918		block_index = q->ops->egress_block_get(q);
 919		if (block_index &&
 920		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
 921			goto nla_put_failure;
 922	}
 923	if (q->ops->dump && q->ops->dump(q, skb) < 0)
 924		goto nla_put_failure;
 925	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
 926		goto nla_put_failure;
 927	qlen = qdisc_qlen_sum(q);
 928
 929	stab = rtnl_dereference(q->stab);
 930	if (stab && qdisc_dump_stab(skb, stab) < 0)
 931		goto nla_put_failure;
 932
 933	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
 934					 NULL, &d, TCA_PAD) < 0)
 935		goto nla_put_failure;
 936
 937	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
 938		goto nla_put_failure;
 939
 940	if (qdisc_is_percpu_stats(q)) {
 941		cpu_bstats = q->cpu_bstats;
 942		cpu_qstats = q->cpu_qstats;
 943	}
 944
 945	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
 946				  &d, cpu_bstats, &q->bstats) < 0 ||
 947	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
 948	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
 949		goto nla_put_failure;
 950
 951	if (gnet_stats_finish_copy(&d) < 0)
 952		goto nla_put_failure;
 953
 954	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 955	return skb->len;
 956
 957out_nlmsg_trim:
 958nla_put_failure:
 959	nlmsg_trim(skb, b);
 960	return -1;
 961}
 962
 963static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
 964{
 965	if (q->flags & TCQ_F_BUILTIN)
 966		return true;
 967	if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
 968		return true;
 969
 970	return false;
 971}
 972
 973static int qdisc_notify(struct net *net, struct sk_buff *oskb,
 974			struct nlmsghdr *n, u32 clid,
 975			struct Qdisc *old, struct Qdisc *new)
 976{
 977	struct sk_buff *skb;
 978	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
 979
 980	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 981	if (!skb)
 982		return -ENOBUFS;
 983
 984	if (old && !tc_qdisc_dump_ignore(old, false)) {
 985		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
 986				  0, RTM_DELQDISC) < 0)
 987			goto err_out;
 988	}
 989	if (new && !tc_qdisc_dump_ignore(new, false)) {
 990		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
 991				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
 992			goto err_out;
 993	}
 994
 995	if (skb->len)
 996		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
 997				      n->nlmsg_flags & NLM_F_ECHO);
 998
 999err_out:
1000	kfree_skb(skb);
1001	return -EINVAL;
1002}
 
1003
1004static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1005			       struct nlmsghdr *n, u32 clid,
1006			       struct Qdisc *old, struct Qdisc *new)
1007{
1008	if (new || old)
1009		qdisc_notify(net, skb, n, clid, old, new);
1010
1011	if (old)
1012		qdisc_put(old);
1013}
1014
1015static void qdisc_clear_nolock(struct Qdisc *sch)
1016{
1017	sch->flags &= ~TCQ_F_NOLOCK;
1018	if (!(sch->flags & TCQ_F_CPUSTATS))
1019		return;
1020
1021	free_percpu(sch->cpu_bstats);
1022	free_percpu(sch->cpu_qstats);
1023	sch->cpu_bstats = NULL;
1024	sch->cpu_qstats = NULL;
1025	sch->flags &= ~TCQ_F_CPUSTATS;
1026}
1027
1028/* Graft qdisc "new" to class "classid" of qdisc "parent" or
1029 * to device "dev".
1030 *
1031 * When appropriate send a netlink notification using 'skb'
1032 * and "n".
1033 *
1034 * On success, destroy old qdisc.
1035 */
1036
1037static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1038		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1039		       struct Qdisc *new, struct Qdisc *old,
1040		       struct netlink_ext_ack *extack)
1041{
1042	struct Qdisc *q = old;
1043	struct net *net = dev_net(dev);
 
1044
1045	if (parent == NULL) {
1046		unsigned int i, num_q, ingress;
1047
1048		ingress = 0;
1049		num_q = dev->num_tx_queues;
1050		if ((q && q->flags & TCQ_F_INGRESS) ||
1051		    (new && new->flags & TCQ_F_INGRESS)) {
1052			num_q = 1;
1053			ingress = 1;
1054			if (!dev_ingress_queue(dev)) {
1055				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1056				return -ENOENT;
1057			}
1058		}
1059
1060		if (dev->flags & IFF_UP)
1061			dev_deactivate(dev);
1062
1063		qdisc_offload_graft_root(dev, new, old, extack);
1064
1065		if (new && new->ops->attach)
1066			goto skip;
1067
1068		for (i = 0; i < num_q; i++) {
1069			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1070
1071			if (!ingress)
1072				dev_queue = netdev_get_tx_queue(dev, i);
1073
1074			old = dev_graft_qdisc(dev_queue, new);
1075			if (new && i > 0)
1076				qdisc_refcount_inc(new);
1077
1078			if (!ingress)
1079				qdisc_put(old);
1080		}
1081
1082skip:
1083		if (!ingress) {
1084			notify_and_destroy(net, skb, n, classid,
1085					   dev->qdisc, new);
1086			if (new && !new->ops->attach)
1087				qdisc_refcount_inc(new);
1088			dev->qdisc = new ? : &noop_qdisc;
1089
1090			if (new && new->ops->attach)
1091				new->ops->attach(new);
1092		} else {
1093			notify_and_destroy(net, skb, n, classid, old, new);
1094		}
1095
1096		if (dev->flags & IFF_UP)
1097			dev_activate(dev);
1098	} else {
1099		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1100		unsigned long cl;
1101		int err;
1102
1103		/* Only support running class lockless if parent is lockless */
1104		if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1105			qdisc_clear_nolock(new);
1106
1107		if (!cops || !cops->graft)
1108			return -EOPNOTSUPP;
1109
1110		cl = cops->find(parent, classid);
1111		if (!cl) {
1112			NL_SET_ERR_MSG(extack, "Specified class not found");
1113			return -ENOENT;
1114		}
1115
1116		err = cops->graft(parent, cl, new, &old, extack);
1117		if (err)
1118			return err;
1119		notify_and_destroy(net, skb, n, classid, old, new);
1120	}
1121	return 0;
1122}
1123
1124static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1125				   struct netlink_ext_ack *extack)
1126{
1127	u32 block_index;
1128
1129	if (tca[TCA_INGRESS_BLOCK]) {
1130		block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1131
1132		if (!block_index) {
1133			NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1134			return -EINVAL;
1135		}
1136		if (!sch->ops->ingress_block_set) {
1137			NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1138			return -EOPNOTSUPP;
1139		}
1140		sch->ops->ingress_block_set(sch, block_index);
1141	}
1142	if (tca[TCA_EGRESS_BLOCK]) {
1143		block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1144
1145		if (!block_index) {
1146			NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1147			return -EINVAL;
1148		}
1149		if (!sch->ops->egress_block_set) {
1150			NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1151			return -EOPNOTSUPP;
1152		}
1153		sch->ops->egress_block_set(sch, block_index);
1154	}
1155	return 0;
1156}
1157
1158/*
1159   Allocate and initialize new qdisc.
1160
1161   Parameters are passed via opt.
1162 */
1163
1164static struct Qdisc *qdisc_create(struct net_device *dev,
1165				  struct netdev_queue *dev_queue,
1166				  struct Qdisc *p, u32 parent, u32 handle,
1167				  struct nlattr **tca, int *errp,
1168				  struct netlink_ext_ack *extack)
1169{
1170	int err;
1171	struct nlattr *kind = tca[TCA_KIND];
1172	struct Qdisc *sch;
1173	struct Qdisc_ops *ops;
1174	struct qdisc_size_table *stab;
1175
1176	ops = qdisc_lookup_ops(kind);
1177#ifdef CONFIG_MODULES
1178	if (ops == NULL && kind != NULL) {
1179		char name[IFNAMSIZ];
1180		if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1181			/* We dropped the RTNL semaphore in order to
1182			 * perform the module load.  So, even if we
1183			 * succeeded in loading the module we have to
1184			 * tell the caller to replay the request.  We
1185			 * indicate this using -EAGAIN.
1186			 * We replay the request because the device may
1187			 * go away in the mean time.
1188			 */
1189			rtnl_unlock();
1190			request_module("sch_%s", name);
1191			rtnl_lock();
1192			ops = qdisc_lookup_ops(kind);
1193			if (ops != NULL) {
1194				/* We will try again qdisc_lookup_ops,
1195				 * so don't keep a reference.
1196				 */
1197				module_put(ops->owner);
1198				err = -EAGAIN;
1199				goto err_out;
1200			}
1201		}
1202	}
1203#endif
1204
1205	err = -ENOENT;
1206	if (!ops) {
1207		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1208		goto err_out;
1209	}
1210
1211	sch = qdisc_alloc(dev_queue, ops, extack);
1212	if (IS_ERR(sch)) {
1213		err = PTR_ERR(sch);
1214		goto err_out2;
1215	}
1216
1217	sch->parent = parent;
1218
1219	if (handle == TC_H_INGRESS) {
1220		sch->flags |= TCQ_F_INGRESS;
1221		handle = TC_H_MAKE(TC_H_INGRESS, 0);
 
1222	} else {
1223		if (handle == 0) {
1224			handle = qdisc_alloc_handle(dev);
1225			if (handle == 0) {
1226				NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1227				err = -ENOSPC;
1228				goto err_out3;
1229			}
1230		}
1231		if (!netif_is_multiqueue(dev))
1232			sch->flags |= TCQ_F_ONETXQUEUE;
1233	}
1234
1235	sch->handle = handle;
1236
1237	/* This exist to keep backward compatible with a userspace
1238	 * loophole, what allowed userspace to get IFF_NO_QUEUE
1239	 * facility on older kernels by setting tx_queue_len=0 (prior
1240	 * to qdisc init), and then forgot to reinit tx_queue_len
1241	 * before again attaching a qdisc.
1242	 */
1243	if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1244		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1245		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1246	}
 
1247
1248	err = qdisc_block_indexes_set(sch, tca, extack);
1249	if (err)
1250		goto err_out3;
1251
1252	if (ops->init) {
1253		err = ops->init(sch, tca[TCA_OPTIONS], extack);
1254		if (err != 0)
1255			goto err_out5;
1256	}
 
1257
1258	if (tca[TCA_STAB]) {
1259		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1260		if (IS_ERR(stab)) {
1261			err = PTR_ERR(stab);
1262			goto err_out4;
1263		}
1264		rcu_assign_pointer(sch->stab, stab);
1265	}
1266	if (tca[TCA_RATE]) {
1267		seqcount_t *running;
1268
1269		err = -EOPNOTSUPP;
1270		if (sch->flags & TCQ_F_MQROOT) {
1271			NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1272			goto err_out4;
1273		}
1274
1275		if (sch->parent != TC_H_ROOT &&
1276		    !(sch->flags & TCQ_F_INGRESS) &&
1277		    (!p || !(p->flags & TCQ_F_MQROOT)))
1278			running = qdisc_root_sleeping_running(sch);
1279		else
1280			running = &sch->running;
1281
1282		err = gen_new_estimator(&sch->bstats,
1283					sch->cpu_bstats,
1284					&sch->rate_est,
1285					NULL,
1286					running,
1287					tca[TCA_RATE]);
1288		if (err) {
1289			NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1290			goto err_out4;
1291		}
1292	}
1293
1294	qdisc_hash_add(sch, false);
1295	trace_qdisc_create(ops, dev, parent);
1296
1297	return sch;
1298
1299err_out5:
1300	/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1301	if (ops->destroy)
1302		ops->destroy(sch);
1303err_out3:
1304	dev_put(dev);
1305	qdisc_free(sch);
1306err_out2:
1307	module_put(ops->owner);
1308err_out:
1309	*errp = err;
1310	return NULL;
1311
1312err_out4:
1313	/*
1314	 * Any broken qdiscs that would require a ops->reset() here?
1315	 * The qdisc was never in action so it shouldn't be necessary.
1316	 */
1317	qdisc_put_stab(rtnl_dereference(sch->stab));
1318	if (ops->destroy)
1319		ops->destroy(sch);
1320	goto err_out3;
1321}
1322
1323static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1324			struct netlink_ext_ack *extack)
1325{
1326	struct qdisc_size_table *ostab, *stab = NULL;
1327	int err = 0;
1328
1329	if (tca[TCA_OPTIONS]) {
1330		if (!sch->ops->change) {
1331			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1332			return -EINVAL;
1333		}
1334		if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1335			NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1336			return -EOPNOTSUPP;
1337		}
1338		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1339		if (err)
1340			return err;
1341	}
1342
1343	if (tca[TCA_STAB]) {
1344		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1345		if (IS_ERR(stab))
1346			return PTR_ERR(stab);
1347	}
1348
1349	ostab = rtnl_dereference(sch->stab);
1350	rcu_assign_pointer(sch->stab, stab);
1351	qdisc_put_stab(ostab);
1352
1353	if (tca[TCA_RATE]) {
1354		/* NB: ignores errors from replace_estimator
1355		   because change can't be undone. */
1356		if (sch->flags & TCQ_F_MQROOT)
1357			goto out;
1358		gen_replace_estimator(&sch->bstats,
1359				      sch->cpu_bstats,
1360				      &sch->rate_est,
1361				      NULL,
1362				      qdisc_root_sleeping_running(sch),
1363				      tca[TCA_RATE]);
1364	}
1365out:
1366	return 0;
1367}
1368
1369struct check_loop_arg {
1370	struct qdisc_walker	w;
1371	struct Qdisc		*p;
1372	int			depth;
1373};
1374
1375static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1376			 struct qdisc_walker *w);
1377
1378static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1379{
1380	struct check_loop_arg	arg;
1381
1382	if (q->ops->cl_ops == NULL)
1383		return 0;
1384
1385	arg.w.stop = arg.w.skip = arg.w.count = 0;
1386	arg.w.fn = check_loop_fn;
1387	arg.depth = depth;
1388	arg.p = p;
1389	q->ops->cl_ops->walk(q, &arg.w);
1390	return arg.w.stop ? -ELOOP : 0;
1391}
1392
1393static int
1394check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1395{
1396	struct Qdisc *leaf;
1397	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1398	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1399
1400	leaf = cops->leaf(q, cl);
1401	if (leaf) {
1402		if (leaf == arg->p || arg->depth > 7)
1403			return -ELOOP;
1404		return check_loop(leaf, arg->p, arg->depth + 1);
1405	}
1406	return 0;
1407}
1408
1409const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1410	[TCA_KIND]		= { .type = NLA_STRING },
1411	[TCA_RATE]		= { .type = NLA_BINARY,
1412				    .len = sizeof(struct tc_estimator) },
1413	[TCA_STAB]		= { .type = NLA_NESTED },
1414	[TCA_DUMP_INVISIBLE]	= { .type = NLA_FLAG },
1415	[TCA_CHAIN]		= { .type = NLA_U32 },
1416	[TCA_INGRESS_BLOCK]	= { .type = NLA_U32 },
1417	[TCA_EGRESS_BLOCK]	= { .type = NLA_U32 },
1418};
1419
1420/*
1421 * Delete/get qdisc.
1422 */
1423
1424static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1425			struct netlink_ext_ack *extack)
1426{
1427	struct net *net = sock_net(skb->sk);
1428	struct tcmsg *tcm = nlmsg_data(n);
1429	struct nlattr *tca[TCA_MAX + 1];
1430	struct net_device *dev;
1431	u32 clid;
1432	struct Qdisc *q = NULL;
1433	struct Qdisc *p = NULL;
1434	int err;
1435
1436	if ((n->nlmsg_type != RTM_GETQDISC) &&
1437	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1438		return -EPERM;
1439
1440	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1441				     rtm_tca_policy, extack);
1442	if (err < 0)
1443		return err;
1444
1445	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1446	if (!dev)
1447		return -ENODEV;
1448
1449	clid = tcm->tcm_parent;
1450	if (clid) {
1451		if (clid != TC_H_ROOT) {
1452			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1453				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1454				if (!p) {
1455					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1456					return -ENOENT;
1457				}
1458				q = qdisc_leaf(p, clid);
1459			} else if (dev_ingress_queue(dev)) {
1460				q = dev_ingress_queue(dev)->qdisc_sleeping;
1461			}
1462		} else {
1463			q = dev->qdisc;
1464		}
1465		if (!q) {
1466			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1467			return -ENOENT;
1468		}
1469
1470		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1471			NL_SET_ERR_MSG(extack, "Invalid handle");
1472			return -EINVAL;
1473		}
1474	} else {
1475		q = qdisc_lookup(dev, tcm->tcm_handle);
1476		if (!q) {
1477			NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1478			return -ENOENT;
1479		}
1480	}
1481
1482	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1483		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1484		return -EINVAL;
1485	}
1486
1487	if (n->nlmsg_type == RTM_DELQDISC) {
1488		if (!clid) {
1489			NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1490			return -EINVAL;
1491		}
1492		if (q->handle == 0) {
1493			NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1494			return -ENOENT;
1495		}
1496		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1497		if (err != 0)
1498			return err;
1499	} else {
1500		qdisc_notify(net, skb, n, clid, NULL, q);
1501	}
1502	return 0;
1503}
1504
1505/*
1506 * Create/change qdisc.
1507 */
1508
1509static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1510			   struct netlink_ext_ack *extack)
1511{
1512	struct net *net = sock_net(skb->sk);
1513	struct tcmsg *tcm;
1514	struct nlattr *tca[TCA_MAX + 1];
1515	struct net_device *dev;
1516	u32 clid;
1517	struct Qdisc *q, *p;
1518	int err;
1519
1520	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1521		return -EPERM;
1522
1523replay:
1524	/* Reinit, just in case something touches this. */
1525	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1526				     rtm_tca_policy, extack);
1527	if (err < 0)
1528		return err;
1529
1530	tcm = nlmsg_data(n);
1531	clid = tcm->tcm_parent;
1532	q = p = NULL;
1533
1534	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1535	if (!dev)
1536		return -ENODEV;
1537
 
 
 
1538
1539	if (clid) {
1540		if (clid != TC_H_ROOT) {
1541			if (clid != TC_H_INGRESS) {
1542				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1543				if (!p) {
1544					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1545					return -ENOENT;
1546				}
1547				q = qdisc_leaf(p, clid);
1548			} else if (dev_ingress_queue_create(dev)) {
1549				q = dev_ingress_queue(dev)->qdisc_sleeping;
1550			}
1551		} else {
1552			q = dev->qdisc;
1553		}
1554
1555		/* It may be default qdisc, ignore it */
1556		if (q && q->handle == 0)
1557			q = NULL;
1558
1559		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1560			if (tcm->tcm_handle) {
1561				if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1562					NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1563					return -EEXIST;
1564				}
1565				if (TC_H_MIN(tcm->tcm_handle)) {
1566					NL_SET_ERR_MSG(extack, "Invalid minor handle");
1567					return -EINVAL;
1568				}
1569				q = qdisc_lookup(dev, tcm->tcm_handle);
1570				if (!q)
1571					goto create_n_graft;
1572				if (n->nlmsg_flags & NLM_F_EXCL) {
1573					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1574					return -EEXIST;
1575				}
1576				if (tca[TCA_KIND] &&
1577				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1578					NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1579					return -EINVAL;
1580				}
1581				if (q == p ||
1582				    (p && check_loop(q, p, 0))) {
1583					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1584					return -ELOOP;
1585				}
1586				qdisc_refcount_inc(q);
1587				goto graft;
1588			} else {
1589				if (!q)
1590					goto create_n_graft;
1591
1592				/* This magic test requires explanation.
1593				 *
1594				 *   We know, that some child q is already
1595				 *   attached to this parent and have choice:
1596				 *   either to change it or to create/graft new one.
1597				 *
1598				 *   1. We are allowed to create/graft only
1599				 *   if CREATE and REPLACE flags are set.
1600				 *
1601				 *   2. If EXCL is set, requestor wanted to say,
1602				 *   that qdisc tcm_handle is not expected
1603				 *   to exist, so that we choose create/graft too.
1604				 *
1605				 *   3. The last case is when no flags are set.
1606				 *   Alas, it is sort of hole in API, we
1607				 *   cannot decide what to do unambiguously.
1608				 *   For now we select create/graft, if
1609				 *   user gave KIND, which does not match existing.
1610				 */
1611				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1612				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1613				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1614				     (tca[TCA_KIND] &&
1615				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1616					goto create_n_graft;
1617			}
1618		}
1619	} else {
1620		if (!tcm->tcm_handle) {
1621			NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1622			return -EINVAL;
1623		}
1624		q = qdisc_lookup(dev, tcm->tcm_handle);
1625	}
1626
1627	/* Change qdisc parameters */
1628	if (!q) {
1629		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1630		return -ENOENT;
1631	}
1632	if (n->nlmsg_flags & NLM_F_EXCL) {
1633		NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1634		return -EEXIST;
1635	}
1636	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1637		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1638		return -EINVAL;
1639	}
1640	err = qdisc_change(q, tca, extack);
1641	if (err == 0)
1642		qdisc_notify(net, skb, n, clid, NULL, q);
1643	return err;
1644
1645create_n_graft:
1646	if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1647		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1648		return -ENOENT;
1649	}
1650	if (clid == TC_H_INGRESS) {
1651		if (dev_ingress_queue(dev)) {
1652			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1653					 tcm->tcm_parent, tcm->tcm_parent,
1654					 tca, &err, extack);
1655		} else {
1656			NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1657			err = -ENOENT;
1658		}
1659	} else {
1660		struct netdev_queue *dev_queue;
1661
1662		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1663			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1664		else if (p)
1665			dev_queue = p->dev_queue;
1666		else
1667			dev_queue = netdev_get_tx_queue(dev, 0);
1668
1669		q = qdisc_create(dev, dev_queue, p,
1670				 tcm->tcm_parent, tcm->tcm_handle,
1671				 tca, &err, extack);
1672	}
1673	if (q == NULL) {
1674		if (err == -EAGAIN)
1675			goto replay;
1676		return err;
1677	}
1678
1679graft:
1680	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1681	if (err) {
1682		if (q)
1683			qdisc_put(q);
1684		return err;
1685	}
1686
1687	return 0;
1688}
1689
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1690static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1691			      struct netlink_callback *cb,
1692			      int *q_idx_p, int s_q_idx, bool recur,
1693			      bool dump_invisible)
1694{
1695	int ret = 0, q_idx = *q_idx_p;
1696	struct Qdisc *q;
1697	int b;
1698
1699	if (!root)
1700		return 0;
1701
1702	q = root;
1703	if (q_idx < s_q_idx) {
1704		q_idx++;
1705	} else {
1706		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1707		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1708				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1709				  RTM_NEWQDISC) <= 0)
1710			goto done;
1711		q_idx++;
1712	}
1713
1714	/* If dumping singletons, there is no qdisc_dev(root) and the singleton
1715	 * itself has already been dumped.
1716	 *
1717	 * If we've already dumped the top-level (ingress) qdisc above and the global
1718	 * qdisc hashtable, we don't want to hit it again
1719	 */
1720	if (!qdisc_dev(root) || !recur)
1721		goto out;
1722
1723	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1724		if (q_idx < s_q_idx) {
1725			q_idx++;
1726			continue;
1727		}
1728		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1729		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1730				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1731				  RTM_NEWQDISC) <= 0)
1732			goto done;
1733		q_idx++;
1734	}
1735
1736out:
1737	*q_idx_p = q_idx;
1738	return ret;
1739done:
1740	ret = -1;
1741	goto out;
1742}
1743
1744static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1745{
1746	struct net *net = sock_net(skb->sk);
1747	int idx, q_idx;
1748	int s_idx, s_q_idx;
1749	struct net_device *dev;
1750	const struct nlmsghdr *nlh = cb->nlh;
1751	struct nlattr *tca[TCA_MAX + 1];
1752	int err;
1753
1754	s_idx = cb->args[0];
1755	s_q_idx = q_idx = cb->args[1];
1756
 
1757	idx = 0;
1758	ASSERT_RTNL();
1759
1760	err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1761				     rtm_tca_policy, cb->extack);
1762	if (err < 0)
1763		return err;
1764
1765	for_each_netdev(net, dev) {
1766		struct netdev_queue *dev_queue;
1767
1768		if (idx < s_idx)
1769			goto cont;
1770		if (idx > s_idx)
1771			s_q_idx = 0;
1772		q_idx = 0;
1773
1774		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1775				       true, tca[TCA_DUMP_INVISIBLE]) < 0)
1776			goto done;
1777
1778		dev_queue = dev_ingress_queue(dev);
1779		if (dev_queue &&
1780		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1781				       &q_idx, s_q_idx, false,
1782				       tca[TCA_DUMP_INVISIBLE]) < 0)
1783			goto done;
1784
1785cont:
1786		idx++;
1787	}
1788
1789done:
 
 
1790	cb->args[0] = idx;
1791	cb->args[1] = q_idx;
1792
1793	return skb->len;
1794}
1795
1796
1797
1798/************************************************
1799 *	Traffic classes manipulation.		*
1800 ************************************************/
1801
1802static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1803			  unsigned long cl,
1804			  u32 portid, u32 seq, u16 flags, int event)
1805{
1806	struct tcmsg *tcm;
1807	struct nlmsghdr  *nlh;
1808	unsigned char *b = skb_tail_pointer(skb);
1809	struct gnet_dump d;
1810	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1811
1812	cond_resched();
1813	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1814	if (!nlh)
1815		goto out_nlmsg_trim;
1816	tcm = nlmsg_data(nlh);
1817	tcm->tcm_family = AF_UNSPEC;
1818	tcm->tcm__pad1 = 0;
1819	tcm->tcm__pad2 = 0;
1820	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1821	tcm->tcm_parent = q->handle;
1822	tcm->tcm_handle = q->handle;
1823	tcm->tcm_info = 0;
1824	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1825		goto nla_put_failure;
1826	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1827		goto nla_put_failure;
1828
1829	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1830					 NULL, &d, TCA_PAD) < 0)
1831		goto nla_put_failure;
1832
1833	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1834		goto nla_put_failure;
1835
1836	if (gnet_stats_finish_copy(&d) < 0)
1837		goto nla_put_failure;
1838
1839	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1840	return skb->len;
1841
1842out_nlmsg_trim:
1843nla_put_failure:
1844	nlmsg_trim(skb, b);
1845	return -1;
1846}
1847
1848static int tclass_notify(struct net *net, struct sk_buff *oskb,
1849			 struct nlmsghdr *n, struct Qdisc *q,
1850			 unsigned long cl, int event)
1851{
1852	struct sk_buff *skb;
1853	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1854	int err = 0;
1855
1856	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1857	if (!skb)
1858		return -ENOBUFS;
1859
1860	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1861		kfree_skb(skb);
1862		return -EINVAL;
1863	}
1864
1865	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1866			     n->nlmsg_flags & NLM_F_ECHO);
1867	if (err > 0)
1868		err = 0;
1869	return err;
1870}
1871
1872static int tclass_del_notify(struct net *net,
1873			     const struct Qdisc_class_ops *cops,
1874			     struct sk_buff *oskb, struct nlmsghdr *n,
1875			     struct Qdisc *q, unsigned long cl,
1876			     struct netlink_ext_ack *extack)
1877{
1878	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1879	struct sk_buff *skb;
1880	int err = 0;
1881
1882	if (!cops->delete)
1883		return -EOPNOTSUPP;
1884
1885	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1886	if (!skb)
1887		return -ENOBUFS;
1888
1889	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1890			   RTM_DELTCLASS) < 0) {
1891		kfree_skb(skb);
1892		return -EINVAL;
1893	}
1894
1895	err = cops->delete(q, cl, extack);
1896	if (err) {
1897		kfree_skb(skb);
1898		return err;
1899	}
1900
1901	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1902			     n->nlmsg_flags & NLM_F_ECHO);
1903	if (err > 0)
1904		err = 0;
1905	return err;
1906}
1907
1908#ifdef CONFIG_NET_CLS
1909
1910struct tcf_bind_args {
1911	struct tcf_walker w;
1912	unsigned long base;
1913	unsigned long cl;
1914	u32 classid;
1915};
1916
1917static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1918{
1919	struct tcf_bind_args *a = (void *)arg;
1920
1921	if (tp->ops->bind_class) {
1922		struct Qdisc *q = tcf_block_q(tp->chain->block);
1923
1924		sch_tree_lock(q);
1925		tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
1926		sch_tree_unlock(q);
1927	}
1928	return 0;
1929}
1930
1931struct tc_bind_class_args {
1932	struct qdisc_walker w;
1933	unsigned long new_cl;
1934	u32 portid;
1935	u32 clid;
1936};
1937
1938static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
1939				struct qdisc_walker *w)
1940{
1941	struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
1942	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1943	struct tcf_block *block;
1944	struct tcf_chain *chain;
1945
1946	block = cops->tcf_block(q, cl, NULL);
1947	if (!block)
1948		return 0;
1949	for (chain = tcf_get_next_chain(block, NULL);
1950	     chain;
1951	     chain = tcf_get_next_chain(block, chain)) {
1952		struct tcf_proto *tp;
1953
1954		for (tp = tcf_get_next_proto(chain, NULL);
1955		     tp; tp = tcf_get_next_proto(chain, tp)) {
1956			struct tcf_bind_args arg = {};
1957
1958			arg.w.fn = tcf_node_bind;
1959			arg.classid = a->clid;
1960			arg.base = cl;
1961			arg.cl = a->new_cl;
1962			tp->ops->walk(tp, &arg.w, true);
1963		}
1964	}
1965
1966	return 0;
1967}
1968
1969static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1970			   unsigned long new_cl)
1971{
1972	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1973	struct tc_bind_class_args args = {};
1974
1975	if (!cops->tcf_block)
1976		return;
1977	args.portid = portid;
1978	args.clid = clid;
1979	args.new_cl = new_cl;
1980	args.w.fn = tc_bind_class_walker;
1981	q->ops->cl_ops->walk(q, &args.w);
1982}
1983
1984#else
1985
1986static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1987			   unsigned long new_cl)
1988{
1989}
1990
1991#endif
1992
1993static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1994			 struct netlink_ext_ack *extack)
1995{
1996	struct net *net = sock_net(skb->sk);
1997	struct tcmsg *tcm = nlmsg_data(n);
1998	struct nlattr *tca[TCA_MAX + 1];
1999	struct net_device *dev;
2000	struct Qdisc *q = NULL;
2001	const struct Qdisc_class_ops *cops;
2002	unsigned long cl = 0;
2003	unsigned long new_cl;
2004	u32 portid;
2005	u32 clid;
2006	u32 qid;
2007	int err;
2008
2009	if ((n->nlmsg_type != RTM_GETTCLASS) &&
2010	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2011		return -EPERM;
2012
2013	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2014				     rtm_tca_policy, extack);
2015	if (err < 0)
2016		return err;
2017
2018	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2019	if (!dev)
2020		return -ENODEV;
2021
2022	/*
2023	   parent == TC_H_UNSPEC - unspecified parent.
2024	   parent == TC_H_ROOT   - class is root, which has no parent.
2025	   parent == X:0	 - parent is root class.
2026	   parent == X:Y	 - parent is a node in hierarchy.
2027	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
2028
2029	   handle == 0:0	 - generate handle from kernel pool.
2030	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
2031	   handle == X:Y	 - clear.
2032	   handle == X:0	 - root class.
2033	 */
2034
2035	/* Step 1. Determine qdisc handle X:0 */
2036
2037	portid = tcm->tcm_parent;
2038	clid = tcm->tcm_handle;
2039	qid = TC_H_MAJ(clid);
2040
2041	if (portid != TC_H_ROOT) {
2042		u32 qid1 = TC_H_MAJ(portid);
2043
2044		if (qid && qid1) {
2045			/* If both majors are known, they must be identical. */
2046			if (qid != qid1)
2047				return -EINVAL;
2048		} else if (qid1) {
2049			qid = qid1;
2050		} else if (qid == 0)
2051			qid = dev->qdisc->handle;
2052
2053		/* Now qid is genuine qdisc handle consistent
2054		 * both with parent and child.
2055		 *
2056		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2057		 */
2058		if (portid)
2059			portid = TC_H_MAKE(qid, portid);
2060	} else {
2061		if (qid == 0)
2062			qid = dev->qdisc->handle;
2063	}
2064
2065	/* OK. Locate qdisc */
2066	q = qdisc_lookup(dev, qid);
2067	if (!q)
2068		return -ENOENT;
2069
2070	/* An check that it supports classes */
2071	cops = q->ops->cl_ops;
2072	if (cops == NULL)
2073		return -EINVAL;
2074
2075	/* Now try to get class */
2076	if (clid == 0) {
2077		if (portid == TC_H_ROOT)
2078			clid = qid;
2079	} else
2080		clid = TC_H_MAKE(qid, clid);
2081
2082	if (clid)
2083		cl = cops->find(q, clid);
2084
2085	if (cl == 0) {
2086		err = -ENOENT;
2087		if (n->nlmsg_type != RTM_NEWTCLASS ||
2088		    !(n->nlmsg_flags & NLM_F_CREATE))
2089			goto out;
2090	} else {
2091		switch (n->nlmsg_type) {
2092		case RTM_NEWTCLASS:
2093			err = -EEXIST;
2094			if (n->nlmsg_flags & NLM_F_EXCL)
2095				goto out;
2096			break;
2097		case RTM_DELTCLASS:
2098			err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2099			/* Unbind the class with flilters with 0 */
2100			tc_bind_tclass(q, portid, clid, 0);
 
 
2101			goto out;
2102		case RTM_GETTCLASS:
2103			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
2104			goto out;
2105		default:
2106			err = -EINVAL;
2107			goto out;
2108		}
2109	}
2110
2111	if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2112		NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2113		return -EOPNOTSUPP;
2114	}
2115
2116	new_cl = cl;
2117	err = -EOPNOTSUPP;
2118	if (cops->change)
2119		err = cops->change(q, clid, portid, tca, &new_cl, extack);
2120	if (err == 0) {
2121		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2122		/* We just create a new class, need to do reverse binding. */
2123		if (cl != new_cl)
2124			tc_bind_tclass(q, portid, clid, new_cl);
2125	}
2126out:
 
 
 
2127	return err;
2128}
2129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2130struct qdisc_dump_args {
2131	struct qdisc_walker	w;
2132	struct sk_buff		*skb;
2133	struct netlink_callback	*cb;
2134};
2135
2136static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2137			    struct qdisc_walker *arg)
2138{
2139	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2140
2141	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2142			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2143			      RTM_NEWTCLASS);
2144}
2145
2146static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2147				struct tcmsg *tcm, struct netlink_callback *cb,
2148				int *t_p, int s_t)
2149{
2150	struct qdisc_dump_args arg;
2151
2152	if (tc_qdisc_dump_ignore(q, false) ||
2153	    *t_p < s_t || !q->ops->cl_ops ||
2154	    (tcm->tcm_parent &&
2155	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2156		(*t_p)++;
2157		return 0;
2158	}
2159	if (*t_p > s_t)
2160		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2161	arg.w.fn = qdisc_class_dump;
2162	arg.skb = skb;
2163	arg.cb = cb;
2164	arg.w.stop  = 0;
2165	arg.w.skip = cb->args[1];
2166	arg.w.count = 0;
2167	q->ops->cl_ops->walk(q, &arg.w);
2168	cb->args[1] = arg.w.count;
2169	if (arg.w.stop)
2170		return -1;
2171	(*t_p)++;
2172	return 0;
2173}
2174
2175static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2176			       struct tcmsg *tcm, struct netlink_callback *cb,
2177			       int *t_p, int s_t, bool recur)
2178{
2179	struct Qdisc *q;
2180	int b;
2181
2182	if (!root)
2183		return 0;
2184
2185	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2186		return -1;
2187
2188	if (!qdisc_dev(root) || !recur)
2189		return 0;
2190
2191	if (tcm->tcm_parent) {
2192		q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2193		if (q && q != root &&
2194		    tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2195			return -1;
2196		return 0;
2197	}
2198	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2199		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2200			return -1;
2201	}
2202
2203	return 0;
2204}
2205
2206static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2207{
2208	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2209	struct net *net = sock_net(skb->sk);
2210	struct netdev_queue *dev_queue;
2211	struct net_device *dev;
2212	int t, s_t;
2213
2214	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2215		return 0;
2216	dev = dev_get_by_index(net, tcm->tcm_ifindex);
2217	if (!dev)
2218		return 0;
2219
2220	s_t = cb->args[0];
2221	t = 0;
2222
2223	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
2224		goto done;
2225
2226	dev_queue = dev_ingress_queue(dev);
2227	if (dev_queue &&
2228	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2229				&t, s_t, false) < 0)
2230		goto done;
2231
2232done:
2233	cb->args[0] = t;
2234
2235	dev_put(dev);
2236	return skb->len;
2237}
2238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2239#ifdef CONFIG_PROC_FS
2240static int psched_show(struct seq_file *seq, void *v)
2241{
 
 
 
2242	seq_printf(seq, "%08x %08x %08x %08x\n",
2243		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2244		   1000000,
2245		   (u32)NSEC_PER_SEC / hrtimer_resolution);
2246
2247	return 0;
2248}
2249
 
 
 
 
 
 
 
 
 
 
 
 
 
2250static int __net_init psched_net_init(struct net *net)
2251{
2252	struct proc_dir_entry *e;
2253
2254	e = proc_create_single("psched", 0, net->proc_net, psched_show);
2255	if (e == NULL)
2256		return -ENOMEM;
2257
2258	return 0;
2259}
2260
2261static void __net_exit psched_net_exit(struct net *net)
2262{
2263	remove_proc_entry("psched", net->proc_net);
2264}
2265#else
2266static int __net_init psched_net_init(struct net *net)
2267{
2268	return 0;
2269}
2270
2271static void __net_exit psched_net_exit(struct net *net)
2272{
2273}
2274#endif
2275
2276static struct pernet_operations psched_net_ops = {
2277	.init = psched_net_init,
2278	.exit = psched_net_exit,
2279};
2280
2281static int __init pktsched_init(void)
2282{
2283	int err;
2284
2285	err = register_pernet_subsys(&psched_net_ops);
2286	if (err) {
2287		pr_err("pktsched_init: "
2288		       "cannot initialize per netns operations\n");
2289		return err;
2290	}
2291
2292	register_qdisc(&pfifo_fast_ops);
2293	register_qdisc(&pfifo_qdisc_ops);
2294	register_qdisc(&bfifo_qdisc_ops);
2295	register_qdisc(&pfifo_head_drop_qdisc_ops);
2296	register_qdisc(&mq_qdisc_ops);
2297	register_qdisc(&noqueue_qdisc_ops);
2298
2299	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2300	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2301	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2302		      0);
2303	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2304	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2305	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2306		      0);
2307
2308	return 0;
2309}
2310
2311subsys_initcall(pktsched_init);