Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v3.5.6
   1/*
   2 * net/sched/sch_api.c	Packet scheduler API.
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 *
  11 * Fixes:
  12 *
  13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
  14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/string.h>
  22#include <linux/errno.h>
  23#include <linux/skbuff.h>
  24#include <linux/init.h>
  25#include <linux/proc_fs.h>
  26#include <linux/seq_file.h>
  27#include <linux/kmod.h>
  28#include <linux/list.h>
  29#include <linux/hrtimer.h>
  30#include <linux/lockdep.h>
  31#include <linux/slab.h>
  32
  33#include <net/net_namespace.h>
  34#include <net/sock.h>
  35#include <net/netlink.h>
  36#include <net/pkt_sched.h>
  37
  38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
  39			struct nlmsghdr *n, u32 clid,
  40			struct Qdisc *old, struct Qdisc *new);
  41static int tclass_notify(struct net *net, struct sk_buff *oskb,
  42			 struct nlmsghdr *n, struct Qdisc *q,
  43			 unsigned long cl, int event);
  44
  45/*
  46
  47   Short review.
  48   -------------
  49
  50   This file consists of two interrelated parts:
  51
  52   1. queueing disciplines manager frontend.
  53   2. traffic classes manager frontend.
  54
  55   Generally, queueing discipline ("qdisc") is a black box,
  56   which is able to enqueue packets and to dequeue them (when
  57   device is ready to send something) in order and at times
  58   determined by algorithm hidden in it.
  59
  60   qdisc's are divided to two categories:
  61   - "queues", which have no internal structure visible from outside.
  62   - "schedulers", which split all the packets to "traffic classes",
  63     using "packet classifiers" (look at cls_api.c)
  64
  65   In turn, classes may have child qdiscs (as rule, queues)
  66   attached to them etc. etc. etc.
  67
  68   The goal of the routines in this file is to translate
  69   information supplied by user in the form of handles
  70   to more intelligible for kernel form, to make some sanity
  71   checks and part of work, which is common to all qdiscs
  72   and to provide rtnetlink notifications.
  73
  74   All real intelligent work is done inside qdisc modules.
  75
  76
  77
  78   Every discipline has two major routines: enqueue and dequeue.
  79
  80   ---dequeue
  81
  82   dequeue usually returns a skb to send. It is allowed to return NULL,
  83   but it does not mean that queue is empty, it just means that
  84   discipline does not want to send anything this time.
  85   Queue is really empty if q->q.qlen == 0.
  86   For complicated disciplines with multiple queues q->q is not
  87   real packet queue, but however q->q.qlen must be valid.
  88
  89   ---enqueue
  90
  91   enqueue returns 0, if packet was enqueued successfully.
  92   If packet (this one or another one) was dropped, it returns
  93   not zero error code.
  94   NET_XMIT_DROP 	- this packet dropped
  95     Expected action: do not backoff, but wait until queue will clear.
  96   NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
  97     Expected action: backoff or ignore
  98   NET_XMIT_POLICED	- dropped by police.
  99     Expected action: backoff or error to real-time apps.
 100
 101   Auxiliary routines:
 102
 103   ---peek
 104
 105   like dequeue but without removing a packet from the queue
 106
 107   ---reset
 108
 109   returns qdisc to initial state: purge all buffers, clear all
 110   timers, counters (except for statistics) etc.
 111
 112   ---init
 113
 114   initializes newly created qdisc.
 115
 116   ---destroy
 117
 118   destroys resources allocated by init and during lifetime of qdisc.
 119
 120   ---change
 121
 122   changes qdisc parameters.
 123 */
 124
 125/* Protects list of registered TC modules. It is pure SMP lock. */
 126static DEFINE_RWLOCK(qdisc_mod_lock);
 127
 128
 129/************************************************
 130 *	Queueing disciplines manipulation.	*
 131 ************************************************/
 132
 133
 134/* The list of all installed queueing disciplines. */
 135
 136static struct Qdisc_ops *qdisc_base;
 137
 138/* Register/uregister queueing discipline */
 139
 140int register_qdisc(struct Qdisc_ops *qops)
 141{
 142	struct Qdisc_ops *q, **qp;
 143	int rc = -EEXIST;
 144
 145	write_lock(&qdisc_mod_lock);
 146	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 147		if (!strcmp(qops->id, q->id))
 148			goto out;
 149
 150	if (qops->enqueue == NULL)
 151		qops->enqueue = noop_qdisc_ops.enqueue;
 152	if (qops->peek == NULL) {
 153		if (qops->dequeue == NULL)
 154			qops->peek = noop_qdisc_ops.peek;
 155		else
 156			goto out_einval;
 157	}
 158	if (qops->dequeue == NULL)
 159		qops->dequeue = noop_qdisc_ops.dequeue;
 160
 161	if (qops->cl_ops) {
 162		const struct Qdisc_class_ops *cops = qops->cl_ops;
 163
 164		if (!(cops->get && cops->put && cops->walk && cops->leaf))
 165			goto out_einval;
 166
 167		if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
 168			goto out_einval;
 169	}
 170
 171	qops->next = NULL;
 172	*qp = qops;
 173	rc = 0;
 174out:
 175	write_unlock(&qdisc_mod_lock);
 176	return rc;
 177
 178out_einval:
 179	rc = -EINVAL;
 180	goto out;
 181}
 182EXPORT_SYMBOL(register_qdisc);
 183
 184int unregister_qdisc(struct Qdisc_ops *qops)
 185{
 186	struct Qdisc_ops *q, **qp;
 187	int err = -ENOENT;
 188
 189	write_lock(&qdisc_mod_lock);
 190	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 191		if (q == qops)
 192			break;
 193	if (q) {
 194		*qp = q->next;
 195		q->next = NULL;
 196		err = 0;
 197	}
 198	write_unlock(&qdisc_mod_lock);
 199	return err;
 200}
 201EXPORT_SYMBOL(unregister_qdisc);
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 203/* We know handle. Find qdisc among all qdisc's attached to device
 204   (root qdisc, all its children, children of children etc.)
 
 205 */
 206
 207static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 208{
 209	struct Qdisc *q;
 210
 211	if (!(root->flags & TCQ_F_BUILTIN) &&
 212	    root->handle == handle)
 213		return root;
 214
 215	list_for_each_entry(q, &root->list, list) {
 216		if (q->handle == handle)
 217			return q;
 218	}
 219	return NULL;
 220}
 221
 222static void qdisc_list_add(struct Qdisc *q)
 223{
 224	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
 225		list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list);
 
 
 
 
 
 226}
 
 227
 228void qdisc_list_del(struct Qdisc *q)
 229{
 230	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
 231		list_del(&q->list);
 
 
 232}
 233EXPORT_SYMBOL(qdisc_list_del);
 234
 235struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 236{
 237	struct Qdisc *q;
 238
 239	q = qdisc_match_from_root(dev->qdisc, handle);
 240	if (q)
 241		goto out;
 242
 243	if (dev_ingress_queue(dev))
 244		q = qdisc_match_from_root(
 245			dev_ingress_queue(dev)->qdisc_sleeping,
 246			handle);
 247out:
 248	return q;
 249}
 250
 251static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 252{
 253	unsigned long cl;
 254	struct Qdisc *leaf;
 255	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
 256
 257	if (cops == NULL)
 258		return NULL;
 259	cl = cops->get(p, classid);
 260
 261	if (cl == 0)
 262		return NULL;
 263	leaf = cops->leaf(p, cl);
 264	cops->put(p, cl);
 265	return leaf;
 266}
 267
 268/* Find queueing discipline by name */
 269
 270static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
 271{
 272	struct Qdisc_ops *q = NULL;
 273
 274	if (kind) {
 275		read_lock(&qdisc_mod_lock);
 276		for (q = qdisc_base; q; q = q->next) {
 277			if (nla_strcmp(kind, q->id) == 0) {
 278				if (!try_module_get(q->owner))
 279					q = NULL;
 280				break;
 281			}
 282		}
 283		read_unlock(&qdisc_mod_lock);
 284	}
 285	return q;
 286}
 287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 288static struct qdisc_rate_table *qdisc_rtab_list;
 289
 290struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
 291{
 292	struct qdisc_rate_table *rtab;
 293
 
 
 
 
 294	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
 295		if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
 
 296			rtab->refcnt++;
 297			return rtab;
 298		}
 299	}
 300
 301	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
 302	    nla_len(tab) != TC_RTAB_SIZE)
 303		return NULL;
 304
 305	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
 306	if (rtab) {
 307		rtab->rate = *r;
 308		rtab->refcnt = 1;
 309		memcpy(rtab->data, nla_data(tab), 1024);
 
 
 310		rtab->next = qdisc_rtab_list;
 311		qdisc_rtab_list = rtab;
 312	}
 313	return rtab;
 314}
 315EXPORT_SYMBOL(qdisc_get_rtab);
 316
 317void qdisc_put_rtab(struct qdisc_rate_table *tab)
 318{
 319	struct qdisc_rate_table *rtab, **rtabp;
 320
 321	if (!tab || --tab->refcnt)
 322		return;
 323
 324	for (rtabp = &qdisc_rtab_list;
 325	     (rtab = *rtabp) != NULL;
 326	     rtabp = &rtab->next) {
 327		if (rtab == tab) {
 328			*rtabp = rtab->next;
 329			kfree(rtab);
 330			return;
 331		}
 332	}
 333}
 334EXPORT_SYMBOL(qdisc_put_rtab);
 335
 336static LIST_HEAD(qdisc_stab_list);
 337static DEFINE_SPINLOCK(qdisc_stab_lock);
 338
 339static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
 340	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
 341	[TCA_STAB_DATA] = { .type = NLA_BINARY },
 342};
 343
 344static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
 345{
 346	struct nlattr *tb[TCA_STAB_MAX + 1];
 347	struct qdisc_size_table *stab;
 348	struct tc_sizespec *s;
 349	unsigned int tsize = 0;
 350	u16 *tab = NULL;
 351	int err;
 352
 353	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
 354	if (err < 0)
 355		return ERR_PTR(err);
 356	if (!tb[TCA_STAB_BASE])
 357		return ERR_PTR(-EINVAL);
 358
 359	s = nla_data(tb[TCA_STAB_BASE]);
 360
 361	if (s->tsize > 0) {
 362		if (!tb[TCA_STAB_DATA])
 363			return ERR_PTR(-EINVAL);
 364		tab = nla_data(tb[TCA_STAB_DATA]);
 365		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
 366	}
 367
 368	if (tsize != s->tsize || (!tab && tsize > 0))
 369		return ERR_PTR(-EINVAL);
 370
 371	spin_lock(&qdisc_stab_lock);
 372
 373	list_for_each_entry(stab, &qdisc_stab_list, list) {
 374		if (memcmp(&stab->szopts, s, sizeof(*s)))
 375			continue;
 376		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
 377			continue;
 378		stab->refcnt++;
 379		spin_unlock(&qdisc_stab_lock);
 380		return stab;
 381	}
 382
 383	spin_unlock(&qdisc_stab_lock);
 384
 385	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
 386	if (!stab)
 387		return ERR_PTR(-ENOMEM);
 388
 389	stab->refcnt = 1;
 390	stab->szopts = *s;
 391	if (tsize > 0)
 392		memcpy(stab->data, tab, tsize * sizeof(u16));
 393
 394	spin_lock(&qdisc_stab_lock);
 395	list_add_tail(&stab->list, &qdisc_stab_list);
 396	spin_unlock(&qdisc_stab_lock);
 397
 398	return stab;
 399}
 400
 401static void stab_kfree_rcu(struct rcu_head *head)
 402{
 403	kfree(container_of(head, struct qdisc_size_table, rcu));
 404}
 405
 406void qdisc_put_stab(struct qdisc_size_table *tab)
 407{
 408	if (!tab)
 409		return;
 410
 411	spin_lock(&qdisc_stab_lock);
 412
 413	if (--tab->refcnt == 0) {
 414		list_del(&tab->list);
 415		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
 416	}
 417
 418	spin_unlock(&qdisc_stab_lock);
 419}
 420EXPORT_SYMBOL(qdisc_put_stab);
 421
 422static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
 423{
 424	struct nlattr *nest;
 425
 426	nest = nla_nest_start(skb, TCA_STAB);
 427	if (nest == NULL)
 428		goto nla_put_failure;
 429	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
 430		goto nla_put_failure;
 431	nla_nest_end(skb, nest);
 432
 433	return skb->len;
 434
 435nla_put_failure:
 436	return -1;
 437}
 438
 439void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
 440{
 441	int pkt_len, slot;
 442
 443	pkt_len = skb->len + stab->szopts.overhead;
 444	if (unlikely(!stab->szopts.tsize))
 445		goto out;
 446
 447	slot = pkt_len + stab->szopts.cell_align;
 448	if (unlikely(slot < 0))
 449		slot = 0;
 450
 451	slot >>= stab->szopts.cell_log;
 452	if (likely(slot < stab->szopts.tsize))
 453		pkt_len = stab->data[slot];
 454	else
 455		pkt_len = stab->data[stab->szopts.tsize - 1] *
 456				(slot / stab->szopts.tsize) +
 457				stab->data[slot % stab->szopts.tsize];
 458
 459	pkt_len <<= stab->szopts.size_log;
 460out:
 461	if (unlikely(pkt_len < 1))
 462		pkt_len = 1;
 463	qdisc_skb_cb(skb)->pkt_len = pkt_len;
 464}
 465EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 466
 467void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
 468{
 469	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
 470		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
 471			txt, qdisc->ops->id, qdisc->handle >> 16);
 472		qdisc->flags |= TCQ_F_WARN_NONWC;
 473	}
 474}
 475EXPORT_SYMBOL(qdisc_warn_nonwc);
 476
 477static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 478{
 479	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 480						 timer);
 481
 
 482	qdisc_unthrottled(wd->qdisc);
 483	__netif_schedule(qdisc_root(wd->qdisc));
 
 484
 485	return HRTIMER_NORESTART;
 486}
 487
 488void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 489{
 490	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 491	wd->timer.function = qdisc_watchdog;
 492	wd->qdisc = qdisc;
 493}
 494EXPORT_SYMBOL(qdisc_watchdog_init);
 495
 496void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
 497{
 498	ktime_t time;
 499
 500	if (test_bit(__QDISC_STATE_DEACTIVATED,
 501		     &qdisc_root_sleeping(wd->qdisc)->state))
 502		return;
 503
 504	qdisc_throttled(wd->qdisc);
 505	time = ktime_set(0, 0);
 506	time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
 507	hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
 
 
 508}
 509EXPORT_SYMBOL(qdisc_watchdog_schedule);
 510
 511void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 512{
 513	hrtimer_cancel(&wd->timer);
 514	qdisc_unthrottled(wd->qdisc);
 515}
 516EXPORT_SYMBOL(qdisc_watchdog_cancel);
 517
 518static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
 519{
 520	unsigned int size = n * sizeof(struct hlist_head), i;
 521	struct hlist_head *h;
 522
 523	if (size <= PAGE_SIZE)
 524		h = kmalloc(size, GFP_KERNEL);
 525	else
 526		h = (struct hlist_head *)
 527			__get_free_pages(GFP_KERNEL, get_order(size));
 528
 529	if (h != NULL) {
 530		for (i = 0; i < n; i++)
 531			INIT_HLIST_HEAD(&h[i]);
 532	}
 533	return h;
 534}
 535
 536static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
 537{
 538	unsigned int size = n * sizeof(struct hlist_head);
 539
 540	if (size <= PAGE_SIZE)
 541		kfree(h);
 542	else
 543		free_pages((unsigned long)h, get_order(size));
 544}
 545
 546void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
 547{
 548	struct Qdisc_class_common *cl;
 549	struct hlist_node *n, *next;
 550	struct hlist_head *nhash, *ohash;
 551	unsigned int nsize, nmask, osize;
 552	unsigned int i, h;
 553
 554	/* Rehash when load factor exceeds 0.75 */
 555	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
 556		return;
 557	nsize = clhash->hashsize * 2;
 558	nmask = nsize - 1;
 559	nhash = qdisc_class_hash_alloc(nsize);
 560	if (nhash == NULL)
 561		return;
 562
 563	ohash = clhash->hash;
 564	osize = clhash->hashsize;
 565
 566	sch_tree_lock(sch);
 567	for (i = 0; i < osize; i++) {
 568		hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
 569			h = qdisc_class_hash(cl->classid, nmask);
 570			hlist_add_head(&cl->hnode, &nhash[h]);
 571		}
 572	}
 573	clhash->hash     = nhash;
 574	clhash->hashsize = nsize;
 575	clhash->hashmask = nmask;
 576	sch_tree_unlock(sch);
 577
 578	qdisc_class_hash_free(ohash, osize);
 579}
 580EXPORT_SYMBOL(qdisc_class_hash_grow);
 581
 582int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
 583{
 584	unsigned int size = 4;
 585
 586	clhash->hash = qdisc_class_hash_alloc(size);
 587	if (clhash->hash == NULL)
 588		return -ENOMEM;
 589	clhash->hashsize  = size;
 590	clhash->hashmask  = size - 1;
 591	clhash->hashelems = 0;
 592	return 0;
 593}
 594EXPORT_SYMBOL(qdisc_class_hash_init);
 595
 596void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
 597{
 598	qdisc_class_hash_free(clhash->hash, clhash->hashsize);
 599}
 600EXPORT_SYMBOL(qdisc_class_hash_destroy);
 601
 602void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
 603			     struct Qdisc_class_common *cl)
 604{
 605	unsigned int h;
 606
 607	INIT_HLIST_NODE(&cl->hnode);
 608	h = qdisc_class_hash(cl->classid, clhash->hashmask);
 609	hlist_add_head(&cl->hnode, &clhash->hash[h]);
 610	clhash->hashelems++;
 611}
 612EXPORT_SYMBOL(qdisc_class_hash_insert);
 613
 614void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
 615			     struct Qdisc_class_common *cl)
 616{
 617	hlist_del(&cl->hnode);
 618	clhash->hashelems--;
 619}
 620EXPORT_SYMBOL(qdisc_class_hash_remove);
 621
 622/* Allocate an unique handle from space managed by kernel
 623 * Possible range is [8000-FFFF]:0000 (0x8000 values)
 624 */
 625static u32 qdisc_alloc_handle(struct net_device *dev)
 626{
 627	int i = 0x8000;
 628	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
 629
 630	do {
 631		autohandle += TC_H_MAKE(0x10000U, 0);
 632		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
 633			autohandle = TC_H_MAKE(0x80000000U, 0);
 634		if (!qdisc_lookup(dev, autohandle))
 635			return autohandle;
 636		cond_resched();
 637	} while	(--i > 0);
 638
 639	return 0;
 640}
 641
 642void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
 
 643{
 644	const struct Qdisc_class_ops *cops;
 645	unsigned long cl;
 646	u32 parentid;
 
 647
 648	if (n == 0)
 649		return;
 
 
 650	while ((parentid = sch->parent)) {
 651		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
 652			return;
 653
 
 
 
 654		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
 655		if (sch == NULL) {
 656			WARN_ON(parentid != TC_H_ROOT);
 657			return;
 658		}
 659		cops = sch->ops->cl_ops;
 660		if (cops->qlen_notify) {
 661			cl = cops->get(sch, parentid);
 662			cops->qlen_notify(sch, cl);
 663			cops->put(sch, cl);
 664		}
 665		sch->q.qlen -= n;
 
 
 666	}
 
 667}
 668EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
 669
 670static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 671			       struct nlmsghdr *n, u32 clid,
 672			       struct Qdisc *old, struct Qdisc *new)
 673{
 674	if (new || old)
 675		qdisc_notify(net, skb, n, clid, old, new);
 676
 677	if (old)
 678		qdisc_destroy(old);
 679}
 680
 681/* Graft qdisc "new" to class "classid" of qdisc "parent" or
 682 * to device "dev".
 683 *
 684 * When appropriate send a netlink notification using 'skb'
 685 * and "n".
 686 *
 687 * On success, destroy old qdisc.
 688 */
 689
 690static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 691		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
 692		       struct Qdisc *new, struct Qdisc *old)
 693{
 694	struct Qdisc *q = old;
 695	struct net *net = dev_net(dev);
 696	int err = 0;
 697
 698	if (parent == NULL) {
 699		unsigned int i, num_q, ingress;
 700
 701		ingress = 0;
 702		num_q = dev->num_tx_queues;
 703		if ((q && q->flags & TCQ_F_INGRESS) ||
 704		    (new && new->flags & TCQ_F_INGRESS)) {
 705			num_q = 1;
 706			ingress = 1;
 707			if (!dev_ingress_queue(dev))
 708				return -ENOENT;
 709		}
 710
 711		if (dev->flags & IFF_UP)
 712			dev_deactivate(dev);
 713
 714		if (new && new->ops->attach) {
 715			new->ops->attach(new);
 716			num_q = 0;
 717		}
 718
 719		for (i = 0; i < num_q; i++) {
 720			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
 721
 722			if (!ingress)
 723				dev_queue = netdev_get_tx_queue(dev, i);
 724
 725			old = dev_graft_qdisc(dev_queue, new);
 726			if (new && i > 0)
 727				atomic_inc(&new->refcnt);
 728
 729			if (!ingress)
 730				qdisc_destroy(old);
 731		}
 732
 
 733		if (!ingress) {
 734			notify_and_destroy(net, skb, n, classid,
 735					   dev->qdisc, new);
 736			if (new && !new->ops->attach)
 737				atomic_inc(&new->refcnt);
 738			dev->qdisc = new ? : &noop_qdisc;
 
 
 
 739		} else {
 740			notify_and_destroy(net, skb, n, classid, old, new);
 741		}
 742
 743		if (dev->flags & IFF_UP)
 744			dev_activate(dev);
 745	} else {
 746		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
 747
 748		err = -EOPNOTSUPP;
 749		if (cops && cops->graft) {
 750			unsigned long cl = cops->get(parent, classid);
 751			if (cl) {
 752				err = cops->graft(parent, cl, new, &old);
 753				cops->put(parent, cl);
 754			} else
 755				err = -ENOENT;
 756		}
 757		if (!err)
 758			notify_and_destroy(net, skb, n, classid, old, new);
 759	}
 760	return err;
 761}
 762
 763/* lockdep annotation is needed for ingress; egress gets it only for name */
 764static struct lock_class_key qdisc_tx_lock;
 765static struct lock_class_key qdisc_rx_lock;
 766
 767/*
 768   Allocate and initialize new qdisc.
 769
 770   Parameters are passed via opt.
 771 */
 772
 773static struct Qdisc *
 774qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
 775	     struct Qdisc *p, u32 parent, u32 handle,
 776	     struct nlattr **tca, int *errp)
 777{
 778	int err;
 779	struct nlattr *kind = tca[TCA_KIND];
 780	struct Qdisc *sch;
 781	struct Qdisc_ops *ops;
 782	struct qdisc_size_table *stab;
 783
 784	ops = qdisc_lookup_ops(kind);
 785#ifdef CONFIG_MODULES
 786	if (ops == NULL && kind != NULL) {
 787		char name[IFNAMSIZ];
 788		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
 789			/* We dropped the RTNL semaphore in order to
 790			 * perform the module load.  So, even if we
 791			 * succeeded in loading the module we have to
 792			 * tell the caller to replay the request.  We
 793			 * indicate this using -EAGAIN.
 794			 * We replay the request because the device may
 795			 * go away in the mean time.
 796			 */
 797			rtnl_unlock();
 798			request_module("sch_%s", name);
 799			rtnl_lock();
 800			ops = qdisc_lookup_ops(kind);
 801			if (ops != NULL) {
 802				/* We will try again qdisc_lookup_ops,
 803				 * so don't keep a reference.
 804				 */
 805				module_put(ops->owner);
 806				err = -EAGAIN;
 807				goto err_out;
 808			}
 809		}
 810	}
 811#endif
 812
 813	err = -ENOENT;
 814	if (ops == NULL)
 815		goto err_out;
 816
 817	sch = qdisc_alloc(dev_queue, ops);
 818	if (IS_ERR(sch)) {
 819		err = PTR_ERR(sch);
 820		goto err_out2;
 821	}
 822
 823	sch->parent = parent;
 824
 825	if (handle == TC_H_INGRESS) {
 826		sch->flags |= TCQ_F_INGRESS;
 827		handle = TC_H_MAKE(TC_H_INGRESS, 0);
 828		lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
 829	} else {
 830		if (handle == 0) {
 831			handle = qdisc_alloc_handle(dev);
 832			err = -ENOMEM;
 833			if (handle == 0)
 834				goto err_out3;
 835		}
 836		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
 
 
 837	}
 838
 839	sch->handle = handle;
 840
 841	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
 
 
 
 
 
 
 
 
 
 
 
 842		if (tca[TCA_STAB]) {
 843			stab = qdisc_get_stab(tca[TCA_STAB]);
 844			if (IS_ERR(stab)) {
 845				err = PTR_ERR(stab);
 846				goto err_out4;
 847			}
 848			rcu_assign_pointer(sch->stab, stab);
 849		}
 850		if (tca[TCA_RATE]) {
 851			spinlock_t *root_lock;
 852
 853			err = -EOPNOTSUPP;
 854			if (sch->flags & TCQ_F_MQROOT)
 855				goto err_out4;
 856
 857			if ((sch->parent != TC_H_ROOT) &&
 858			    !(sch->flags & TCQ_F_INGRESS) &&
 859			    (!p || !(p->flags & TCQ_F_MQROOT)))
 860				root_lock = qdisc_root_sleeping_lock(sch);
 861			else
 862				root_lock = qdisc_lock(sch);
 863
 864			err = gen_new_estimator(&sch->bstats, &sch->rate_est,
 865						root_lock, tca[TCA_RATE]);
 
 
 
 866			if (err)
 867				goto err_out4;
 868		}
 869
 870		qdisc_list_add(sch);
 871
 872		return sch;
 873	}
 874err_out3:
 875	dev_put(dev);
 876	kfree((char *) sch - sch->padded);
 877err_out2:
 878	module_put(ops->owner);
 879err_out:
 880	*errp = err;
 881	return NULL;
 882
 883err_out4:
 
 
 884	/*
 885	 * Any broken qdiscs that would require a ops->reset() here?
 886	 * The qdisc was never in action so it shouldn't be necessary.
 887	 */
 888	qdisc_put_stab(rtnl_dereference(sch->stab));
 889	if (ops->destroy)
 890		ops->destroy(sch);
 891	goto err_out3;
 892}
 893
 894static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 895{
 896	struct qdisc_size_table *ostab, *stab = NULL;
 897	int err = 0;
 898
 899	if (tca[TCA_OPTIONS]) {
 900		if (sch->ops->change == NULL)
 901			return -EINVAL;
 902		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
 903		if (err)
 904			return err;
 905	}
 906
 907	if (tca[TCA_STAB]) {
 908		stab = qdisc_get_stab(tca[TCA_STAB]);
 909		if (IS_ERR(stab))
 910			return PTR_ERR(stab);
 911	}
 912
 913	ostab = rtnl_dereference(sch->stab);
 914	rcu_assign_pointer(sch->stab, stab);
 915	qdisc_put_stab(ostab);
 916
 917	if (tca[TCA_RATE]) {
 918		/* NB: ignores errors from replace_estimator
 919		   because change can't be undone. */
 920		if (sch->flags & TCQ_F_MQROOT)
 921			goto out;
 922		gen_replace_estimator(&sch->bstats, &sch->rate_est,
 923					    qdisc_root_sleeping_lock(sch),
 924					    tca[TCA_RATE]);
 
 
 925	}
 926out:
 927	return 0;
 928}
 929
 930struct check_loop_arg {
 931	struct qdisc_walker	w;
 932	struct Qdisc		*p;
 933	int			depth;
 934};
 935
 936static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
 937
 938static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
 939{
 940	struct check_loop_arg	arg;
 941
 942	if (q->ops->cl_ops == NULL)
 943		return 0;
 944
 945	arg.w.stop = arg.w.skip = arg.w.count = 0;
 946	arg.w.fn = check_loop_fn;
 947	arg.depth = depth;
 948	arg.p = p;
 949	q->ops->cl_ops->walk(q, &arg.w);
 950	return arg.w.stop ? -ELOOP : 0;
 951}
 952
 953static int
 954check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
 955{
 956	struct Qdisc *leaf;
 957	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
 958	struct check_loop_arg *arg = (struct check_loop_arg *)w;
 959
 960	leaf = cops->leaf(q, cl);
 961	if (leaf) {
 962		if (leaf == arg->p || arg->depth > 7)
 963			return -ELOOP;
 964		return check_loop(leaf, arg->p, arg->depth + 1);
 965	}
 966	return 0;
 967}
 968
 969/*
 970 * Delete/get qdisc.
 971 */
 972
 973static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 974{
 975	struct net *net = sock_net(skb->sk);
 976	struct tcmsg *tcm = NLMSG_DATA(n);
 977	struct nlattr *tca[TCA_MAX + 1];
 978	struct net_device *dev;
 979	u32 clid = tcm->tcm_parent;
 980	struct Qdisc *q = NULL;
 981	struct Qdisc *p = NULL;
 982	int err;
 983
 984	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
 985	if (!dev)
 986		return -ENODEV;
 987
 988	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
 989	if (err < 0)
 990		return err;
 991
 
 
 
 
 
 992	if (clid) {
 993		if (clid != TC_H_ROOT) {
 994			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
 995				p = qdisc_lookup(dev, TC_H_MAJ(clid));
 996				if (!p)
 997					return -ENOENT;
 998				q = qdisc_leaf(p, clid);
 999			} else if (dev_ingress_queue(dev)) {
1000				q = dev_ingress_queue(dev)->qdisc_sleeping;
1001			}
1002		} else {
1003			q = dev->qdisc;
1004		}
1005		if (!q)
1006			return -ENOENT;
1007
1008		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1009			return -EINVAL;
1010	} else {
1011		q = qdisc_lookup(dev, tcm->tcm_handle);
1012		if (!q)
1013			return -ENOENT;
1014	}
1015
1016	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1017		return -EINVAL;
1018
1019	if (n->nlmsg_type == RTM_DELQDISC) {
1020		if (!clid)
1021			return -EINVAL;
1022		if (q->handle == 0)
1023			return -ENOENT;
1024		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1025		if (err != 0)
1026			return err;
1027	} else {
1028		qdisc_notify(net, skb, n, clid, NULL, q);
1029	}
1030	return 0;
1031}
1032
1033/*
1034 * Create/change qdisc.
1035 */
1036
1037static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1038{
1039	struct net *net = sock_net(skb->sk);
1040	struct tcmsg *tcm;
1041	struct nlattr *tca[TCA_MAX + 1];
1042	struct net_device *dev;
1043	u32 clid;
1044	struct Qdisc *q, *p;
1045	int err;
1046
 
 
 
1047replay:
1048	/* Reinit, just in case something touches this. */
1049	tcm = NLMSG_DATA(n);
 
 
 
 
1050	clid = tcm->tcm_parent;
1051	q = p = NULL;
1052
1053	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1054	if (!dev)
1055		return -ENODEV;
1056
1057	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1058	if (err < 0)
1059		return err;
1060
1061	if (clid) {
1062		if (clid != TC_H_ROOT) {
1063			if (clid != TC_H_INGRESS) {
1064				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1065				if (!p)
1066					return -ENOENT;
1067				q = qdisc_leaf(p, clid);
1068			} else if (dev_ingress_queue_create(dev)) {
1069				q = dev_ingress_queue(dev)->qdisc_sleeping;
1070			}
1071		} else {
1072			q = dev->qdisc;
1073		}
1074
1075		/* It may be default qdisc, ignore it */
1076		if (q && q->handle == 0)
1077			q = NULL;
1078
1079		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1080			if (tcm->tcm_handle) {
1081				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1082					return -EEXIST;
1083				if (TC_H_MIN(tcm->tcm_handle))
1084					return -EINVAL;
1085				q = qdisc_lookup(dev, tcm->tcm_handle);
1086				if (!q)
1087					goto create_n_graft;
1088				if (n->nlmsg_flags & NLM_F_EXCL)
1089					return -EEXIST;
1090				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1091					return -EINVAL;
1092				if (q == p ||
1093				    (p && check_loop(q, p, 0)))
1094					return -ELOOP;
1095				atomic_inc(&q->refcnt);
1096				goto graft;
1097			} else {
1098				if (!q)
1099					goto create_n_graft;
1100
1101				/* This magic test requires explanation.
1102				 *
1103				 *   We know, that some child q is already
1104				 *   attached to this parent and have choice:
1105				 *   either to change it or to create/graft new one.
1106				 *
1107				 *   1. We are allowed to create/graft only
1108				 *   if CREATE and REPLACE flags are set.
1109				 *
1110				 *   2. If EXCL is set, requestor wanted to say,
1111				 *   that qdisc tcm_handle is not expected
1112				 *   to exist, so that we choose create/graft too.
1113				 *
1114				 *   3. The last case is when no flags are set.
1115				 *   Alas, it is sort of hole in API, we
1116				 *   cannot decide what to do unambiguously.
1117				 *   For now we select create/graft, if
1118				 *   user gave KIND, which does not match existing.
1119				 */
1120				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1121				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1122				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1123				     (tca[TCA_KIND] &&
1124				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1125					goto create_n_graft;
1126			}
1127		}
1128	} else {
1129		if (!tcm->tcm_handle)
1130			return -EINVAL;
1131		q = qdisc_lookup(dev, tcm->tcm_handle);
1132	}
1133
1134	/* Change qdisc parameters */
1135	if (q == NULL)
1136		return -ENOENT;
1137	if (n->nlmsg_flags & NLM_F_EXCL)
1138		return -EEXIST;
1139	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1140		return -EINVAL;
1141	err = qdisc_change(q, tca);
1142	if (err == 0)
1143		qdisc_notify(net, skb, n, clid, NULL, q);
1144	return err;
1145
1146create_n_graft:
1147	if (!(n->nlmsg_flags & NLM_F_CREATE))
1148		return -ENOENT;
1149	if (clid == TC_H_INGRESS) {
1150		if (dev_ingress_queue(dev))
1151			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1152					 tcm->tcm_parent, tcm->tcm_parent,
1153					 tca, &err);
1154		else
1155			err = -ENOENT;
1156	} else {
1157		struct netdev_queue *dev_queue;
1158
1159		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1160			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1161		else if (p)
1162			dev_queue = p->dev_queue;
1163		else
1164			dev_queue = netdev_get_tx_queue(dev, 0);
1165
1166		q = qdisc_create(dev, dev_queue, p,
1167				 tcm->tcm_parent, tcm->tcm_handle,
1168				 tca, &err);
1169	}
1170	if (q == NULL) {
1171		if (err == -EAGAIN)
1172			goto replay;
1173		return err;
1174	}
1175
1176graft:
1177	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1178	if (err) {
1179		if (q)
1180			qdisc_destroy(q);
1181		return err;
1182	}
1183
1184	return 0;
1185}
1186
1187static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1188			 u32 pid, u32 seq, u16 flags, int event)
1189{
 
 
1190	struct tcmsg *tcm;
1191	struct nlmsghdr  *nlh;
1192	unsigned char *b = skb_tail_pointer(skb);
1193	struct gnet_dump d;
1194	struct qdisc_size_table *stab;
 
1195
1196	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1197	tcm = NLMSG_DATA(nlh);
 
 
 
1198	tcm->tcm_family = AF_UNSPEC;
1199	tcm->tcm__pad1 = 0;
1200	tcm->tcm__pad2 = 0;
1201	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1202	tcm->tcm_parent = clid;
1203	tcm->tcm_handle = q->handle;
1204	tcm->tcm_info = atomic_read(&q->refcnt);
1205	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1206		goto nla_put_failure;
1207	if (q->ops->dump && q->ops->dump(q, skb) < 0)
1208		goto nla_put_failure;
1209	q->qstats.qlen = q->q.qlen;
1210
1211	stab = rtnl_dereference(q->stab);
1212	if (stab && qdisc_dump_stab(skb, stab) < 0)
1213		goto nla_put_failure;
1214
1215	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1216					 qdisc_root_sleeping_lock(q), &d) < 0)
1217		goto nla_put_failure;
1218
1219	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1220		goto nla_put_failure;
1221
1222	if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
 
 
 
 
 
1223	    gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1224	    gnet_stats_copy_queue(&d, &q->qstats) < 0)
1225		goto nla_put_failure;
1226
1227	if (gnet_stats_finish_copy(&d) < 0)
1228		goto nla_put_failure;
1229
1230	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1231	return skb->len;
1232
1233nlmsg_failure:
1234nla_put_failure:
1235	nlmsg_trim(skb, b);
1236	return -1;
1237}
1238
1239static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1240{
1241	return (q->flags & TCQ_F_BUILTIN) ? true : false;
1242}
1243
1244static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1245			struct nlmsghdr *n, u32 clid,
1246			struct Qdisc *old, struct Qdisc *new)
1247{
1248	struct sk_buff *skb;
1249	u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1250
1251	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1252	if (!skb)
1253		return -ENOBUFS;
1254
1255	if (old && !tc_qdisc_dump_ignore(old)) {
1256		if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
1257				  0, RTM_DELQDISC) < 0)
1258			goto err_out;
1259	}
1260	if (new && !tc_qdisc_dump_ignore(new)) {
1261		if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
1262				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1263			goto err_out;
1264	}
1265
1266	if (skb->len)
1267		return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
1268				      n->nlmsg_flags & NLM_F_ECHO);
1269
1270err_out:
1271	kfree_skb(skb);
1272	return -EINVAL;
1273}
1274
1275static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1276			      struct netlink_callback *cb,
1277			      int *q_idx_p, int s_q_idx)
1278{
1279	int ret = 0, q_idx = *q_idx_p;
1280	struct Qdisc *q;
1281
1282	if (!root)
1283		return 0;
1284
1285	q = root;
1286	if (q_idx < s_q_idx) {
1287		q_idx++;
1288	} else {
1289		if (!tc_qdisc_dump_ignore(q) &&
1290		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1291				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1292			goto done;
1293		q_idx++;
1294	}
1295	list_for_each_entry(q, &root->list, list) {
1296		if (q_idx < s_q_idx) {
1297			q_idx++;
1298			continue;
1299		}
1300		if (!tc_qdisc_dump_ignore(q) &&
1301		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1302				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1303			goto done;
1304		q_idx++;
1305	}
1306
1307out:
1308	*q_idx_p = q_idx;
1309	return ret;
1310done:
1311	ret = -1;
1312	goto out;
1313}
1314
1315static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1316{
1317	struct net *net = sock_net(skb->sk);
1318	int idx, q_idx;
1319	int s_idx, s_q_idx;
1320	struct net_device *dev;
1321
1322	s_idx = cb->args[0];
1323	s_q_idx = q_idx = cb->args[1];
1324
1325	rcu_read_lock();
1326	idx = 0;
1327	for_each_netdev_rcu(net, dev) {
 
1328		struct netdev_queue *dev_queue;
1329
1330		if (idx < s_idx)
1331			goto cont;
1332		if (idx > s_idx)
1333			s_q_idx = 0;
1334		q_idx = 0;
1335
1336		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1337			goto done;
1338
1339		dev_queue = dev_ingress_queue(dev);
1340		if (dev_queue &&
1341		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1342				       &q_idx, s_q_idx) < 0)
1343			goto done;
1344
1345cont:
1346		idx++;
1347	}
1348
1349done:
1350	rcu_read_unlock();
1351
1352	cb->args[0] = idx;
1353	cb->args[1] = q_idx;
1354
1355	return skb->len;
1356}
1357
1358
1359
1360/************************************************
1361 *	Traffic classes manipulation.		*
1362 ************************************************/
1363
1364
1365
1366static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1367{
1368	struct net *net = sock_net(skb->sk);
1369	struct tcmsg *tcm = NLMSG_DATA(n);
1370	struct nlattr *tca[TCA_MAX + 1];
1371	struct net_device *dev;
1372	struct Qdisc *q = NULL;
1373	const struct Qdisc_class_ops *cops;
1374	unsigned long cl = 0;
1375	unsigned long new_cl;
1376	u32 pid = tcm->tcm_parent;
1377	u32 clid = tcm->tcm_handle;
1378	u32 qid = TC_H_MAJ(clid);
1379	int err;
1380
1381	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1382	if (!dev)
1383		return -ENODEV;
1384
1385	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1386	if (err < 0)
1387		return err;
1388
 
 
 
 
1389	/*
1390	   parent == TC_H_UNSPEC - unspecified parent.
1391	   parent == TC_H_ROOT   - class is root, which has no parent.
1392	   parent == X:0	 - parent is root class.
1393	   parent == X:Y	 - parent is a node in hierarchy.
1394	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1395
1396	   handle == 0:0	 - generate handle from kernel pool.
1397	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1398	   handle == X:Y	 - clear.
1399	   handle == X:0	 - root class.
1400	 */
1401
1402	/* Step 1. Determine qdisc handle X:0 */
1403
1404	if (pid != TC_H_ROOT) {
1405		u32 qid1 = TC_H_MAJ(pid);
 
 
 
 
1406
1407		if (qid && qid1) {
1408			/* If both majors are known, they must be identical. */
1409			if (qid != qid1)
1410				return -EINVAL;
1411		} else if (qid1) {
1412			qid = qid1;
1413		} else if (qid == 0)
1414			qid = dev->qdisc->handle;
1415
1416		/* Now qid is genuine qdisc handle consistent
1417		 * both with parent and child.
1418		 *
1419		 * TC_H_MAJ(pid) still may be unspecified, complete it now.
1420		 */
1421		if (pid)
1422			pid = TC_H_MAKE(qid, pid);
1423	} else {
1424		if (qid == 0)
1425			qid = dev->qdisc->handle;
1426	}
1427
1428	/* OK. Locate qdisc */
1429	q = qdisc_lookup(dev, qid);
1430	if (!q)
1431		return -ENOENT;
1432
1433	/* An check that it supports classes */
1434	cops = q->ops->cl_ops;
1435	if (cops == NULL)
1436		return -EINVAL;
1437
1438	/* Now try to get class */
1439	if (clid == 0) {
1440		if (pid == TC_H_ROOT)
1441			clid = qid;
1442	} else
1443		clid = TC_H_MAKE(qid, clid);
1444
1445	if (clid)
1446		cl = cops->get(q, clid);
1447
1448	if (cl == 0) {
1449		err = -ENOENT;
1450		if (n->nlmsg_type != RTM_NEWTCLASS ||
1451		    !(n->nlmsg_flags & NLM_F_CREATE))
1452			goto out;
1453	} else {
1454		switch (n->nlmsg_type) {
1455		case RTM_NEWTCLASS:
1456			err = -EEXIST;
1457			if (n->nlmsg_flags & NLM_F_EXCL)
1458				goto out;
1459			break;
1460		case RTM_DELTCLASS:
1461			err = -EOPNOTSUPP;
1462			if (cops->delete)
1463				err = cops->delete(q, cl);
1464			if (err == 0)
1465				tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1466			goto out;
1467		case RTM_GETTCLASS:
1468			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1469			goto out;
1470		default:
1471			err = -EINVAL;
1472			goto out;
1473		}
1474	}
1475
1476	new_cl = cl;
1477	err = -EOPNOTSUPP;
1478	if (cops->change)
1479		err = cops->change(q, clid, pid, tca, &new_cl);
1480	if (err == 0)
1481		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1482
1483out:
1484	if (cl)
1485		cops->put(q, cl);
1486
1487	return err;
1488}
1489
1490
1491static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1492			  unsigned long cl,
1493			  u32 pid, u32 seq, u16 flags, int event)
1494{
1495	struct tcmsg *tcm;
1496	struct nlmsghdr  *nlh;
1497	unsigned char *b = skb_tail_pointer(skb);
1498	struct gnet_dump d;
1499	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1500
1501	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1502	tcm = NLMSG_DATA(nlh);
 
 
 
1503	tcm->tcm_family = AF_UNSPEC;
1504	tcm->tcm__pad1 = 0;
1505	tcm->tcm__pad2 = 0;
1506	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1507	tcm->tcm_parent = q->handle;
1508	tcm->tcm_handle = q->handle;
1509	tcm->tcm_info = 0;
1510	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1511		goto nla_put_failure;
1512	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1513		goto nla_put_failure;
1514
1515	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1516					 qdisc_root_sleeping_lock(q), &d) < 0)
1517		goto nla_put_failure;
1518
1519	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1520		goto nla_put_failure;
1521
1522	if (gnet_stats_finish_copy(&d) < 0)
1523		goto nla_put_failure;
1524
1525	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1526	return skb->len;
1527
1528nlmsg_failure:
1529nla_put_failure:
1530	nlmsg_trim(skb, b);
1531	return -1;
1532}
1533
1534static int tclass_notify(struct net *net, struct sk_buff *oskb,
1535			 struct nlmsghdr *n, struct Qdisc *q,
1536			 unsigned long cl, int event)
1537{
1538	struct sk_buff *skb;
1539	u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1540
1541	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1542	if (!skb)
1543		return -ENOBUFS;
1544
1545	if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1546		kfree_skb(skb);
1547		return -EINVAL;
1548	}
1549
1550	return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
1551			      n->nlmsg_flags & NLM_F_ECHO);
1552}
1553
1554struct qdisc_dump_args {
1555	struct qdisc_walker	w;
1556	struct sk_buff		*skb;
1557	struct netlink_callback	*cb;
1558};
1559
1560static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1561{
1562	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1563
1564	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1565			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1566}
1567
1568static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1569				struct tcmsg *tcm, struct netlink_callback *cb,
1570				int *t_p, int s_t)
1571{
1572	struct qdisc_dump_args arg;
1573
1574	if (tc_qdisc_dump_ignore(q) ||
1575	    *t_p < s_t || !q->ops->cl_ops ||
1576	    (tcm->tcm_parent &&
1577	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1578		(*t_p)++;
1579		return 0;
1580	}
1581	if (*t_p > s_t)
1582		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1583	arg.w.fn = qdisc_class_dump;
1584	arg.skb = skb;
1585	arg.cb = cb;
1586	arg.w.stop  = 0;
1587	arg.w.skip = cb->args[1];
1588	arg.w.count = 0;
1589	q->ops->cl_ops->walk(q, &arg.w);
1590	cb->args[1] = arg.w.count;
1591	if (arg.w.stop)
1592		return -1;
1593	(*t_p)++;
1594	return 0;
1595}
1596
1597static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1598			       struct tcmsg *tcm, struct netlink_callback *cb,
1599			       int *t_p, int s_t)
1600{
1601	struct Qdisc *q;
1602
1603	if (!root)
1604		return 0;
1605
1606	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1607		return -1;
1608
1609	list_for_each_entry(q, &root->list, list) {
1610		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1611			return -1;
1612	}
1613
1614	return 0;
1615}
1616
1617static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1618{
1619	struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
1620	struct net *net = sock_net(skb->sk);
1621	struct netdev_queue *dev_queue;
1622	struct net_device *dev;
1623	int t, s_t;
1624
1625	if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1626		return 0;
1627	dev = dev_get_by_index(net, tcm->tcm_ifindex);
1628	if (!dev)
1629		return 0;
1630
1631	s_t = cb->args[0];
1632	t = 0;
1633
1634	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1635		goto done;
1636
1637	dev_queue = dev_ingress_queue(dev);
1638	if (dev_queue &&
1639	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1640				&t, s_t) < 0)
1641		goto done;
1642
1643done:
1644	cb->args[0] = t;
1645
1646	dev_put(dev);
1647	return skb->len;
1648}
1649
1650/* Main classifier routine: scans classifier chain attached
1651 * to this qdisc, (optionally) tests for protocol and asks
1652 * specific classifiers.
1653 */
1654int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
1655		       struct tcf_result *res)
1656{
1657	__be16 protocol = skb->protocol;
1658	int err;
 
 
 
 
 
 
 
1659
1660	for (; tp; tp = tp->next) {
1661		if (tp->protocol != protocol &&
1662		    tp->protocol != htons(ETH_P_ALL))
1663			continue;
1664		err = tp->classify(skb, tp, res);
1665
1666		if (err >= 0) {
1667#ifdef CONFIG_NET_CLS_ACT
1668			if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1669				skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1670#endif
 
1671			return err;
1672		}
1673	}
1674	return -1;
1675}
1676EXPORT_SYMBOL(tc_classify_compat);
1677
1678int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1679		struct tcf_result *res)
1680{
1681	int err = 0;
1682#ifdef CONFIG_NET_CLS_ACT
1683	const struct tcf_proto *otp = tp;
1684reclassify:
1685#endif
1686
1687	err = tc_classify_compat(skb, tp, res);
1688#ifdef CONFIG_NET_CLS_ACT
1689	if (err == TC_ACT_RECLASSIFY) {
1690		u32 verd = G_TC_VERD(skb->tc_verd);
1691		tp = otp;
1692
1693		if (verd++ >= MAX_REC_LOOP) {
1694			net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1695					       tp->q->ops->id,
1696					       tp->prio & 0xffff,
1697					       ntohs(tp->protocol));
1698			return TC_ACT_SHOT;
1699		}
1700		skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1701		goto reclassify;
1702	}
 
 
 
 
1703#endif
1704	return err;
1705}
1706EXPORT_SYMBOL(tc_classify);
1707
1708void tcf_destroy(struct tcf_proto *tp)
1709{
1710	tp->ops->destroy(tp);
1711	module_put(tp->ops->owner);
1712	kfree(tp);
 
 
 
 
1713}
1714
1715void tcf_destroy_chain(struct tcf_proto **fl)
1716{
1717	struct tcf_proto *tp;
1718
1719	while ((tp = *fl) != NULL) {
1720		*fl = tp->next;
1721		tcf_destroy(tp);
1722	}
1723}
1724EXPORT_SYMBOL(tcf_destroy_chain);
1725
1726#ifdef CONFIG_PROC_FS
1727static int psched_show(struct seq_file *seq, void *v)
1728{
1729	struct timespec ts;
1730
1731	hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1732	seq_printf(seq, "%08x %08x %08x %08x\n",
1733		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1734		   1000000,
1735		   (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1736
1737	return 0;
1738}
1739
1740static int psched_open(struct inode *inode, struct file *file)
1741{
1742	return single_open(file, psched_show, NULL);
1743}
1744
1745static const struct file_operations psched_fops = {
1746	.owner = THIS_MODULE,
1747	.open = psched_open,
1748	.read  = seq_read,
1749	.llseek = seq_lseek,
1750	.release = single_release,
1751};
1752
1753static int __net_init psched_net_init(struct net *net)
1754{
1755	struct proc_dir_entry *e;
1756
1757	e = proc_net_fops_create(net, "psched", 0, &psched_fops);
1758	if (e == NULL)
1759		return -ENOMEM;
1760
1761	return 0;
1762}
1763
1764static void __net_exit psched_net_exit(struct net *net)
1765{
1766	proc_net_remove(net, "psched");
1767}
1768#else
1769static int __net_init psched_net_init(struct net *net)
1770{
1771	return 0;
1772}
1773
1774static void __net_exit psched_net_exit(struct net *net)
1775{
1776}
1777#endif
1778
1779static struct pernet_operations psched_net_ops = {
1780	.init = psched_net_init,
1781	.exit = psched_net_exit,
1782};
1783
1784static int __init pktsched_init(void)
1785{
1786	int err;
1787
1788	err = register_pernet_subsys(&psched_net_ops);
1789	if (err) {
1790		pr_err("pktsched_init: "
1791		       "cannot initialize per netns operations\n");
1792		return err;
1793	}
1794
 
1795	register_qdisc(&pfifo_qdisc_ops);
1796	register_qdisc(&bfifo_qdisc_ops);
1797	register_qdisc(&pfifo_head_drop_qdisc_ops);
1798	register_qdisc(&mq_qdisc_ops);
 
1799
1800	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1801	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1802	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1803	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1804	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1805	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1806
1807	return 0;
1808}
1809
1810subsys_initcall(pktsched_init);
v4.6
   1/*
   2 * net/sched/sch_api.c	Packet scheduler API.
   3 *
   4 *		This program is free software; you can redistribute it and/or
   5 *		modify it under the terms of the GNU General Public License
   6 *		as published by the Free Software Foundation; either version
   7 *		2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 *
  11 * Fixes:
  12 *
  13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
  14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/string.h>
  22#include <linux/errno.h>
  23#include <linux/skbuff.h>
  24#include <linux/init.h>
  25#include <linux/proc_fs.h>
  26#include <linux/seq_file.h>
  27#include <linux/kmod.h>
  28#include <linux/list.h>
  29#include <linux/hrtimer.h>
  30#include <linux/lockdep.h>
  31#include <linux/slab.h>
  32
  33#include <net/net_namespace.h>
  34#include <net/sock.h>
  35#include <net/netlink.h>
  36#include <net/pkt_sched.h>
  37
  38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
  39			struct nlmsghdr *n, u32 clid,
  40			struct Qdisc *old, struct Qdisc *new);
  41static int tclass_notify(struct net *net, struct sk_buff *oskb,
  42			 struct nlmsghdr *n, struct Qdisc *q,
  43			 unsigned long cl, int event);
  44
  45/*
  46
  47   Short review.
  48   -------------
  49
  50   This file consists of two interrelated parts:
  51
  52   1. queueing disciplines manager frontend.
  53   2. traffic classes manager frontend.
  54
  55   Generally, queueing discipline ("qdisc") is a black box,
  56   which is able to enqueue packets and to dequeue them (when
  57   device is ready to send something) in order and at times
  58   determined by algorithm hidden in it.
  59
  60   qdisc's are divided to two categories:
  61   - "queues", which have no internal structure visible from outside.
  62   - "schedulers", which split all the packets to "traffic classes",
  63     using "packet classifiers" (look at cls_api.c)
  64
  65   In turn, classes may have child qdiscs (as rule, queues)
  66   attached to them etc. etc. etc.
  67
  68   The goal of the routines in this file is to translate
  69   information supplied by user in the form of handles
  70   to more intelligible for kernel form, to make some sanity
  71   checks and part of work, which is common to all qdiscs
  72   and to provide rtnetlink notifications.
  73
  74   All real intelligent work is done inside qdisc modules.
  75
  76
  77
  78   Every discipline has two major routines: enqueue and dequeue.
  79
  80   ---dequeue
  81
  82   dequeue usually returns a skb to send. It is allowed to return NULL,
  83   but it does not mean that queue is empty, it just means that
  84   discipline does not want to send anything this time.
  85   Queue is really empty if q->q.qlen == 0.
  86   For complicated disciplines with multiple queues q->q is not
  87   real packet queue, but however q->q.qlen must be valid.
  88
  89   ---enqueue
  90
  91   enqueue returns 0, if packet was enqueued successfully.
  92   If packet (this one or another one) was dropped, it returns
  93   not zero error code.
  94   NET_XMIT_DROP 	- this packet dropped
  95     Expected action: do not backoff, but wait until queue will clear.
  96   NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
  97     Expected action: backoff or ignore
  98   NET_XMIT_POLICED	- dropped by police.
  99     Expected action: backoff or error to real-time apps.
 100
 101   Auxiliary routines:
 102
 103   ---peek
 104
 105   like dequeue but without removing a packet from the queue
 106
 107   ---reset
 108
 109   returns qdisc to initial state: purge all buffers, clear all
 110   timers, counters (except for statistics) etc.
 111
 112   ---init
 113
 114   initializes newly created qdisc.
 115
 116   ---destroy
 117
 118   destroys resources allocated by init and during lifetime of qdisc.
 119
 120   ---change
 121
 122   changes qdisc parameters.
 123 */
 124
 125/* Protects list of registered TC modules. It is pure SMP lock. */
 126static DEFINE_RWLOCK(qdisc_mod_lock);
 127
 128
 129/************************************************
 130 *	Queueing disciplines manipulation.	*
 131 ************************************************/
 132
 133
 134/* The list of all installed queueing disciplines. */
 135
 136static struct Qdisc_ops *qdisc_base;
 137
 138/* Register/unregister queueing discipline */
 139
 140int register_qdisc(struct Qdisc_ops *qops)
 141{
 142	struct Qdisc_ops *q, **qp;
 143	int rc = -EEXIST;
 144
 145	write_lock(&qdisc_mod_lock);
 146	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 147		if (!strcmp(qops->id, q->id))
 148			goto out;
 149
 150	if (qops->enqueue == NULL)
 151		qops->enqueue = noop_qdisc_ops.enqueue;
 152	if (qops->peek == NULL) {
 153		if (qops->dequeue == NULL)
 154			qops->peek = noop_qdisc_ops.peek;
 155		else
 156			goto out_einval;
 157	}
 158	if (qops->dequeue == NULL)
 159		qops->dequeue = noop_qdisc_ops.dequeue;
 160
 161	if (qops->cl_ops) {
 162		const struct Qdisc_class_ops *cops = qops->cl_ops;
 163
 164		if (!(cops->get && cops->put && cops->walk && cops->leaf))
 165			goto out_einval;
 166
 167		if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
 168			goto out_einval;
 169	}
 170
 171	qops->next = NULL;
 172	*qp = qops;
 173	rc = 0;
 174out:
 175	write_unlock(&qdisc_mod_lock);
 176	return rc;
 177
 178out_einval:
 179	rc = -EINVAL;
 180	goto out;
 181}
 182EXPORT_SYMBOL(register_qdisc);
 183
 184int unregister_qdisc(struct Qdisc_ops *qops)
 185{
 186	struct Qdisc_ops *q, **qp;
 187	int err = -ENOENT;
 188
 189	write_lock(&qdisc_mod_lock);
 190	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 191		if (q == qops)
 192			break;
 193	if (q) {
 194		*qp = q->next;
 195		q->next = NULL;
 196		err = 0;
 197	}
 198	write_unlock(&qdisc_mod_lock);
 199	return err;
 200}
 201EXPORT_SYMBOL(unregister_qdisc);
 202
 203/* Get default qdisc if not otherwise specified */
 204void qdisc_get_default(char *name, size_t len)
 205{
 206	read_lock(&qdisc_mod_lock);
 207	strlcpy(name, default_qdisc_ops->id, len);
 208	read_unlock(&qdisc_mod_lock);
 209}
 210
 211static struct Qdisc_ops *qdisc_lookup_default(const char *name)
 212{
 213	struct Qdisc_ops *q = NULL;
 214
 215	for (q = qdisc_base; q; q = q->next) {
 216		if (!strcmp(name, q->id)) {
 217			if (!try_module_get(q->owner))
 218				q = NULL;
 219			break;
 220		}
 221	}
 222
 223	return q;
 224}
 225
 226/* Set new default qdisc to use */
 227int qdisc_set_default(const char *name)
 228{
 229	const struct Qdisc_ops *ops;
 230
 231	if (!capable(CAP_NET_ADMIN))
 232		return -EPERM;
 233
 234	write_lock(&qdisc_mod_lock);
 235	ops = qdisc_lookup_default(name);
 236	if (!ops) {
 237		/* Not found, drop lock and try to load module */
 238		write_unlock(&qdisc_mod_lock);
 239		request_module("sch_%s", name);
 240		write_lock(&qdisc_mod_lock);
 241
 242		ops = qdisc_lookup_default(name);
 243	}
 244
 245	if (ops) {
 246		/* Set new default */
 247		module_put(default_qdisc_ops->owner);
 248		default_qdisc_ops = ops;
 249	}
 250	write_unlock(&qdisc_mod_lock);
 251
 252	return ops ? 0 : -ENOENT;
 253}
 254
 255/* We know handle. Find qdisc among all qdisc's attached to device
 256 * (root qdisc, all its children, children of children etc.)
 257 * Note: caller either uses rtnl or rcu_read_lock()
 258 */
 259
 260static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 261{
 262	struct Qdisc *q;
 263
 264	if (!(root->flags & TCQ_F_BUILTIN) &&
 265	    root->handle == handle)
 266		return root;
 267
 268	list_for_each_entry_rcu(q, &root->list, list) {
 269		if (q->handle == handle)
 270			return q;
 271	}
 272	return NULL;
 273}
 274
 275void qdisc_list_add(struct Qdisc *q)
 276{
 277	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
 278		struct Qdisc *root = qdisc_dev(q)->qdisc;
 279
 280		WARN_ON_ONCE(root == &noop_qdisc);
 281		ASSERT_RTNL();
 282		list_add_tail_rcu(&q->list, &root->list);
 283	}
 284}
 285EXPORT_SYMBOL(qdisc_list_add);
 286
 287void qdisc_list_del(struct Qdisc *q)
 288{
 289	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
 290		ASSERT_RTNL();
 291		list_del_rcu(&q->list);
 292	}
 293}
 294EXPORT_SYMBOL(qdisc_list_del);
 295
 296struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 297{
 298	struct Qdisc *q;
 299
 300	q = qdisc_match_from_root(dev->qdisc, handle);
 301	if (q)
 302		goto out;
 303
 304	if (dev_ingress_queue(dev))
 305		q = qdisc_match_from_root(
 306			dev_ingress_queue(dev)->qdisc_sleeping,
 307			handle);
 308out:
 309	return q;
 310}
 311
 312static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 313{
 314	unsigned long cl;
 315	struct Qdisc *leaf;
 316	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
 317
 318	if (cops == NULL)
 319		return NULL;
 320	cl = cops->get(p, classid);
 321
 322	if (cl == 0)
 323		return NULL;
 324	leaf = cops->leaf(p, cl);
 325	cops->put(p, cl);
 326	return leaf;
 327}
 328
 329/* Find queueing discipline by name */
 330
 331static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
 332{
 333	struct Qdisc_ops *q = NULL;
 334
 335	if (kind) {
 336		read_lock(&qdisc_mod_lock);
 337		for (q = qdisc_base; q; q = q->next) {
 338			if (nla_strcmp(kind, q->id) == 0) {
 339				if (!try_module_get(q->owner))
 340					q = NULL;
 341				break;
 342			}
 343		}
 344		read_unlock(&qdisc_mod_lock);
 345	}
 346	return q;
 347}
 348
 349/* The linklayer setting were not transferred from iproute2, in older
 350 * versions, and the rate tables lookup systems have been dropped in
 351 * the kernel. To keep backward compatible with older iproute2 tc
 352 * utils, we detect the linklayer setting by detecting if the rate
 353 * table were modified.
 354 *
 355 * For linklayer ATM table entries, the rate table will be aligned to
 356 * 48 bytes, thus some table entries will contain the same value.  The
 357 * mpu (min packet unit) is also encoded into the old rate table, thus
 358 * starting from the mpu, we find low and high table entries for
 359 * mapping this cell.  If these entries contain the same value, when
 360 * the rate tables have been modified for linklayer ATM.
 361 *
 362 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
 363 * and then roundup to the next cell, calc the table entry one below,
 364 * and compare.
 365 */
 366static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 367{
 368	int low       = roundup(r->mpu, 48);
 369	int high      = roundup(low+1, 48);
 370	int cell_low  = low >> r->cell_log;
 371	int cell_high = (high >> r->cell_log) - 1;
 372
 373	/* rtab is too inaccurate at rates > 100Mbit/s */
 374	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
 375		pr_debug("TC linklayer: Giving up ATM detection\n");
 376		return TC_LINKLAYER_ETHERNET;
 377	}
 378
 379	if ((cell_high > cell_low) && (cell_high < 256)
 380	    && (rtab[cell_low] == rtab[cell_high])) {
 381		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
 382			 cell_low, cell_high, rtab[cell_high]);
 383		return TC_LINKLAYER_ATM;
 384	}
 385	return TC_LINKLAYER_ETHERNET;
 386}
 387
 388static struct qdisc_rate_table *qdisc_rtab_list;
 389
 390struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
 391{
 392	struct qdisc_rate_table *rtab;
 393
 394	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
 395	    nla_len(tab) != TC_RTAB_SIZE)
 396		return NULL;
 397
 398	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
 399		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
 400		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
 401			rtab->refcnt++;
 402			return rtab;
 403		}
 404	}
 405
 
 
 
 
 406	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
 407	if (rtab) {
 408		rtab->rate = *r;
 409		rtab->refcnt = 1;
 410		memcpy(rtab->data, nla_data(tab), 1024);
 411		if (r->linklayer == TC_LINKLAYER_UNAWARE)
 412			r->linklayer = __detect_linklayer(r, rtab->data);
 413		rtab->next = qdisc_rtab_list;
 414		qdisc_rtab_list = rtab;
 415	}
 416	return rtab;
 417}
 418EXPORT_SYMBOL(qdisc_get_rtab);
 419
 420void qdisc_put_rtab(struct qdisc_rate_table *tab)
 421{
 422	struct qdisc_rate_table *rtab, **rtabp;
 423
 424	if (!tab || --tab->refcnt)
 425		return;
 426
 427	for (rtabp = &qdisc_rtab_list;
 428	     (rtab = *rtabp) != NULL;
 429	     rtabp = &rtab->next) {
 430		if (rtab == tab) {
 431			*rtabp = rtab->next;
 432			kfree(rtab);
 433			return;
 434		}
 435	}
 436}
 437EXPORT_SYMBOL(qdisc_put_rtab);
 438
 439static LIST_HEAD(qdisc_stab_list);
 440static DEFINE_SPINLOCK(qdisc_stab_lock);
 441
 442static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
 443	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
 444	[TCA_STAB_DATA] = { .type = NLA_BINARY },
 445};
 446
 447static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
 448{
 449	struct nlattr *tb[TCA_STAB_MAX + 1];
 450	struct qdisc_size_table *stab;
 451	struct tc_sizespec *s;
 452	unsigned int tsize = 0;
 453	u16 *tab = NULL;
 454	int err;
 455
 456	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
 457	if (err < 0)
 458		return ERR_PTR(err);
 459	if (!tb[TCA_STAB_BASE])
 460		return ERR_PTR(-EINVAL);
 461
 462	s = nla_data(tb[TCA_STAB_BASE]);
 463
 464	if (s->tsize > 0) {
 465		if (!tb[TCA_STAB_DATA])
 466			return ERR_PTR(-EINVAL);
 467		tab = nla_data(tb[TCA_STAB_DATA]);
 468		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
 469	}
 470
 471	if (tsize != s->tsize || (!tab && tsize > 0))
 472		return ERR_PTR(-EINVAL);
 473
 474	spin_lock(&qdisc_stab_lock);
 475
 476	list_for_each_entry(stab, &qdisc_stab_list, list) {
 477		if (memcmp(&stab->szopts, s, sizeof(*s)))
 478			continue;
 479		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
 480			continue;
 481		stab->refcnt++;
 482		spin_unlock(&qdisc_stab_lock);
 483		return stab;
 484	}
 485
 486	spin_unlock(&qdisc_stab_lock);
 487
 488	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
 489	if (!stab)
 490		return ERR_PTR(-ENOMEM);
 491
 492	stab->refcnt = 1;
 493	stab->szopts = *s;
 494	if (tsize > 0)
 495		memcpy(stab->data, tab, tsize * sizeof(u16));
 496
 497	spin_lock(&qdisc_stab_lock);
 498	list_add_tail(&stab->list, &qdisc_stab_list);
 499	spin_unlock(&qdisc_stab_lock);
 500
 501	return stab;
 502}
 503
 504static void stab_kfree_rcu(struct rcu_head *head)
 505{
 506	kfree(container_of(head, struct qdisc_size_table, rcu));
 507}
 508
 509void qdisc_put_stab(struct qdisc_size_table *tab)
 510{
 511	if (!tab)
 512		return;
 513
 514	spin_lock(&qdisc_stab_lock);
 515
 516	if (--tab->refcnt == 0) {
 517		list_del(&tab->list);
 518		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
 519	}
 520
 521	spin_unlock(&qdisc_stab_lock);
 522}
 523EXPORT_SYMBOL(qdisc_put_stab);
 524
 525static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
 526{
 527	struct nlattr *nest;
 528
 529	nest = nla_nest_start(skb, TCA_STAB);
 530	if (nest == NULL)
 531		goto nla_put_failure;
 532	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
 533		goto nla_put_failure;
 534	nla_nest_end(skb, nest);
 535
 536	return skb->len;
 537
 538nla_put_failure:
 539	return -1;
 540}
 541
 542void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
 543{
 544	int pkt_len, slot;
 545
 546	pkt_len = skb->len + stab->szopts.overhead;
 547	if (unlikely(!stab->szopts.tsize))
 548		goto out;
 549
 550	slot = pkt_len + stab->szopts.cell_align;
 551	if (unlikely(slot < 0))
 552		slot = 0;
 553
 554	slot >>= stab->szopts.cell_log;
 555	if (likely(slot < stab->szopts.tsize))
 556		pkt_len = stab->data[slot];
 557	else
 558		pkt_len = stab->data[stab->szopts.tsize - 1] *
 559				(slot / stab->szopts.tsize) +
 560				stab->data[slot % stab->szopts.tsize];
 561
 562	pkt_len <<= stab->szopts.size_log;
 563out:
 564	if (unlikely(pkt_len < 1))
 565		pkt_len = 1;
 566	qdisc_skb_cb(skb)->pkt_len = pkt_len;
 567}
 568EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 569
 570void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
 571{
 572	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
 573		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
 574			txt, qdisc->ops->id, qdisc->handle >> 16);
 575		qdisc->flags |= TCQ_F_WARN_NONWC;
 576	}
 577}
 578EXPORT_SYMBOL(qdisc_warn_nonwc);
 579
 580static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 581{
 582	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 583						 timer);
 584
 585	rcu_read_lock();
 586	qdisc_unthrottled(wd->qdisc);
 587	__netif_schedule(qdisc_root(wd->qdisc));
 588	rcu_read_unlock();
 589
 590	return HRTIMER_NORESTART;
 591}
 592
 593void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 594{
 595	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
 596	wd->timer.function = qdisc_watchdog;
 597	wd->qdisc = qdisc;
 598}
 599EXPORT_SYMBOL(qdisc_watchdog_init);
 600
 601void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
 602{
 
 
 603	if (test_bit(__QDISC_STATE_DEACTIVATED,
 604		     &qdisc_root_sleeping(wd->qdisc)->state))
 605		return;
 606
 607	if (throttle)
 608		qdisc_throttled(wd->qdisc);
 609
 610	hrtimer_start(&wd->timer,
 611		      ns_to_ktime(expires),
 612		      HRTIMER_MODE_ABS_PINNED);
 613}
 614EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
 615
 616void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 617{
 618	hrtimer_cancel(&wd->timer);
 619	qdisc_unthrottled(wd->qdisc);
 620}
 621EXPORT_SYMBOL(qdisc_watchdog_cancel);
 622
 623static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
 624{
 625	unsigned int size = n * sizeof(struct hlist_head), i;
 626	struct hlist_head *h;
 627
 628	if (size <= PAGE_SIZE)
 629		h = kmalloc(size, GFP_KERNEL);
 630	else
 631		h = (struct hlist_head *)
 632			__get_free_pages(GFP_KERNEL, get_order(size));
 633
 634	if (h != NULL) {
 635		for (i = 0; i < n; i++)
 636			INIT_HLIST_HEAD(&h[i]);
 637	}
 638	return h;
 639}
 640
 641static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
 642{
 643	unsigned int size = n * sizeof(struct hlist_head);
 644
 645	if (size <= PAGE_SIZE)
 646		kfree(h);
 647	else
 648		free_pages((unsigned long)h, get_order(size));
 649}
 650
 651void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
 652{
 653	struct Qdisc_class_common *cl;
 654	struct hlist_node *next;
 655	struct hlist_head *nhash, *ohash;
 656	unsigned int nsize, nmask, osize;
 657	unsigned int i, h;
 658
 659	/* Rehash when load factor exceeds 0.75 */
 660	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
 661		return;
 662	nsize = clhash->hashsize * 2;
 663	nmask = nsize - 1;
 664	nhash = qdisc_class_hash_alloc(nsize);
 665	if (nhash == NULL)
 666		return;
 667
 668	ohash = clhash->hash;
 669	osize = clhash->hashsize;
 670
 671	sch_tree_lock(sch);
 672	for (i = 0; i < osize; i++) {
 673		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
 674			h = qdisc_class_hash(cl->classid, nmask);
 675			hlist_add_head(&cl->hnode, &nhash[h]);
 676		}
 677	}
 678	clhash->hash     = nhash;
 679	clhash->hashsize = nsize;
 680	clhash->hashmask = nmask;
 681	sch_tree_unlock(sch);
 682
 683	qdisc_class_hash_free(ohash, osize);
 684}
 685EXPORT_SYMBOL(qdisc_class_hash_grow);
 686
 687int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
 688{
 689	unsigned int size = 4;
 690
 691	clhash->hash = qdisc_class_hash_alloc(size);
 692	if (clhash->hash == NULL)
 693		return -ENOMEM;
 694	clhash->hashsize  = size;
 695	clhash->hashmask  = size - 1;
 696	clhash->hashelems = 0;
 697	return 0;
 698}
 699EXPORT_SYMBOL(qdisc_class_hash_init);
 700
 701void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
 702{
 703	qdisc_class_hash_free(clhash->hash, clhash->hashsize);
 704}
 705EXPORT_SYMBOL(qdisc_class_hash_destroy);
 706
 707void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
 708			     struct Qdisc_class_common *cl)
 709{
 710	unsigned int h;
 711
 712	INIT_HLIST_NODE(&cl->hnode);
 713	h = qdisc_class_hash(cl->classid, clhash->hashmask);
 714	hlist_add_head(&cl->hnode, &clhash->hash[h]);
 715	clhash->hashelems++;
 716}
 717EXPORT_SYMBOL(qdisc_class_hash_insert);
 718
 719void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
 720			     struct Qdisc_class_common *cl)
 721{
 722	hlist_del(&cl->hnode);
 723	clhash->hashelems--;
 724}
 725EXPORT_SYMBOL(qdisc_class_hash_remove);
 726
 727/* Allocate an unique handle from space managed by kernel
 728 * Possible range is [8000-FFFF]:0000 (0x8000 values)
 729 */
 730static u32 qdisc_alloc_handle(struct net_device *dev)
 731{
 732	int i = 0x8000;
 733	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
 734
 735	do {
 736		autohandle += TC_H_MAKE(0x10000U, 0);
 737		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
 738			autohandle = TC_H_MAKE(0x80000000U, 0);
 739		if (!qdisc_lookup(dev, autohandle))
 740			return autohandle;
 741		cond_resched();
 742	} while	(--i > 0);
 743
 744	return 0;
 745}
 746
 747void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
 748			       unsigned int len)
 749{
 750	const struct Qdisc_class_ops *cops;
 751	unsigned long cl;
 752	u32 parentid;
 753	int drops;
 754
 755	if (n == 0 && len == 0)
 756		return;
 757	drops = max_t(int, n, 0);
 758	rcu_read_lock();
 759	while ((parentid = sch->parent)) {
 760		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
 761			break;
 762
 763		if (sch->flags & TCQ_F_NOPARENT)
 764			break;
 765		/* TODO: perform the search on a per txq basis */
 766		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
 767		if (sch == NULL) {
 768			WARN_ON_ONCE(parentid != TC_H_ROOT);
 769			break;
 770		}
 771		cops = sch->ops->cl_ops;
 772		if (cops->qlen_notify) {
 773			cl = cops->get(sch, parentid);
 774			cops->qlen_notify(sch, cl);
 775			cops->put(sch, cl);
 776		}
 777		sch->q.qlen -= n;
 778		sch->qstats.backlog -= len;
 779		__qdisc_qstats_drop(sch, drops);
 780	}
 781	rcu_read_unlock();
 782}
 783EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
 784
 785static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 786			       struct nlmsghdr *n, u32 clid,
 787			       struct Qdisc *old, struct Qdisc *new)
 788{
 789	if (new || old)
 790		qdisc_notify(net, skb, n, clid, old, new);
 791
 792	if (old)
 793		qdisc_destroy(old);
 794}
 795
 796/* Graft qdisc "new" to class "classid" of qdisc "parent" or
 797 * to device "dev".
 798 *
 799 * When appropriate send a netlink notification using 'skb'
 800 * and "n".
 801 *
 802 * On success, destroy old qdisc.
 803 */
 804
 805static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 806		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
 807		       struct Qdisc *new, struct Qdisc *old)
 808{
 809	struct Qdisc *q = old;
 810	struct net *net = dev_net(dev);
 811	int err = 0;
 812
 813	if (parent == NULL) {
 814		unsigned int i, num_q, ingress;
 815
 816		ingress = 0;
 817		num_q = dev->num_tx_queues;
 818		if ((q && q->flags & TCQ_F_INGRESS) ||
 819		    (new && new->flags & TCQ_F_INGRESS)) {
 820			num_q = 1;
 821			ingress = 1;
 822			if (!dev_ingress_queue(dev))
 823				return -ENOENT;
 824		}
 825
 826		if (dev->flags & IFF_UP)
 827			dev_deactivate(dev);
 828
 829		if (new && new->ops->attach)
 830			goto skip;
 
 
 831
 832		for (i = 0; i < num_q; i++) {
 833			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
 834
 835			if (!ingress)
 836				dev_queue = netdev_get_tx_queue(dev, i);
 837
 838			old = dev_graft_qdisc(dev_queue, new);
 839			if (new && i > 0)
 840				atomic_inc(&new->refcnt);
 841
 842			if (!ingress)
 843				qdisc_destroy(old);
 844		}
 845
 846skip:
 847		if (!ingress) {
 848			notify_and_destroy(net, skb, n, classid,
 849					   dev->qdisc, new);
 850			if (new && !new->ops->attach)
 851				atomic_inc(&new->refcnt);
 852			dev->qdisc = new ? : &noop_qdisc;
 853
 854			if (new && new->ops->attach)
 855				new->ops->attach(new);
 856		} else {
 857			notify_and_destroy(net, skb, n, classid, old, new);
 858		}
 859
 860		if (dev->flags & IFF_UP)
 861			dev_activate(dev);
 862	} else {
 863		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
 864
 865		err = -EOPNOTSUPP;
 866		if (cops && cops->graft) {
 867			unsigned long cl = cops->get(parent, classid);
 868			if (cl) {
 869				err = cops->graft(parent, cl, new, &old);
 870				cops->put(parent, cl);
 871			} else
 872				err = -ENOENT;
 873		}
 874		if (!err)
 875			notify_and_destroy(net, skb, n, classid, old, new);
 876	}
 877	return err;
 878}
 879
 880/* lockdep annotation is needed for ingress; egress gets it only for name */
 881static struct lock_class_key qdisc_tx_lock;
 882static struct lock_class_key qdisc_rx_lock;
 883
 884/*
 885   Allocate and initialize new qdisc.
 886
 887   Parameters are passed via opt.
 888 */
 889
 890static struct Qdisc *
 891qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
 892	     struct Qdisc *p, u32 parent, u32 handle,
 893	     struct nlattr **tca, int *errp)
 894{
 895	int err;
 896	struct nlattr *kind = tca[TCA_KIND];
 897	struct Qdisc *sch;
 898	struct Qdisc_ops *ops;
 899	struct qdisc_size_table *stab;
 900
 901	ops = qdisc_lookup_ops(kind);
 902#ifdef CONFIG_MODULES
 903	if (ops == NULL && kind != NULL) {
 904		char name[IFNAMSIZ];
 905		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
 906			/* We dropped the RTNL semaphore in order to
 907			 * perform the module load.  So, even if we
 908			 * succeeded in loading the module we have to
 909			 * tell the caller to replay the request.  We
 910			 * indicate this using -EAGAIN.
 911			 * We replay the request because the device may
 912			 * go away in the mean time.
 913			 */
 914			rtnl_unlock();
 915			request_module("sch_%s", name);
 916			rtnl_lock();
 917			ops = qdisc_lookup_ops(kind);
 918			if (ops != NULL) {
 919				/* We will try again qdisc_lookup_ops,
 920				 * so don't keep a reference.
 921				 */
 922				module_put(ops->owner);
 923				err = -EAGAIN;
 924				goto err_out;
 925			}
 926		}
 927	}
 928#endif
 929
 930	err = -ENOENT;
 931	if (ops == NULL)
 932		goto err_out;
 933
 934	sch = qdisc_alloc(dev_queue, ops);
 935	if (IS_ERR(sch)) {
 936		err = PTR_ERR(sch);
 937		goto err_out2;
 938	}
 939
 940	sch->parent = parent;
 941
 942	if (handle == TC_H_INGRESS) {
 943		sch->flags |= TCQ_F_INGRESS;
 944		handle = TC_H_MAKE(TC_H_INGRESS, 0);
 945		lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
 946	} else {
 947		if (handle == 0) {
 948			handle = qdisc_alloc_handle(dev);
 949			err = -ENOMEM;
 950			if (handle == 0)
 951				goto err_out3;
 952		}
 953		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
 954		if (!netif_is_multiqueue(dev))
 955			sch->flags |= TCQ_F_ONETXQUEUE;
 956	}
 957
 958	sch->handle = handle;
 959
 960	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
 961		if (qdisc_is_percpu_stats(sch)) {
 962			sch->cpu_bstats =
 963				netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
 964			if (!sch->cpu_bstats)
 965				goto err_out4;
 966
 967			sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 968			if (!sch->cpu_qstats)
 969				goto err_out4;
 970		}
 971
 972		if (tca[TCA_STAB]) {
 973			stab = qdisc_get_stab(tca[TCA_STAB]);
 974			if (IS_ERR(stab)) {
 975				err = PTR_ERR(stab);
 976				goto err_out4;
 977			}
 978			rcu_assign_pointer(sch->stab, stab);
 979		}
 980		if (tca[TCA_RATE]) {
 981			spinlock_t *root_lock;
 982
 983			err = -EOPNOTSUPP;
 984			if (sch->flags & TCQ_F_MQROOT)
 985				goto err_out4;
 986
 987			if ((sch->parent != TC_H_ROOT) &&
 988			    !(sch->flags & TCQ_F_INGRESS) &&
 989			    (!p || !(p->flags & TCQ_F_MQROOT)))
 990				root_lock = qdisc_root_sleeping_lock(sch);
 991			else
 992				root_lock = qdisc_lock(sch);
 993
 994			err = gen_new_estimator(&sch->bstats,
 995						sch->cpu_bstats,
 996						&sch->rate_est,
 997						root_lock,
 998						tca[TCA_RATE]);
 999			if (err)
1000				goto err_out4;
1001		}
1002
1003		qdisc_list_add(sch);
1004
1005		return sch;
1006	}
1007err_out3:
1008	dev_put(dev);
1009	kfree((char *) sch - sch->padded);
1010err_out2:
1011	module_put(ops->owner);
1012err_out:
1013	*errp = err;
1014	return NULL;
1015
1016err_out4:
1017	free_percpu(sch->cpu_bstats);
1018	free_percpu(sch->cpu_qstats);
1019	/*
1020	 * Any broken qdiscs that would require a ops->reset() here?
1021	 * The qdisc was never in action so it shouldn't be necessary.
1022	 */
1023	qdisc_put_stab(rtnl_dereference(sch->stab));
1024	if (ops->destroy)
1025		ops->destroy(sch);
1026	goto err_out3;
1027}
1028
1029static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1030{
1031	struct qdisc_size_table *ostab, *stab = NULL;
1032	int err = 0;
1033
1034	if (tca[TCA_OPTIONS]) {
1035		if (sch->ops->change == NULL)
1036			return -EINVAL;
1037		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1038		if (err)
1039			return err;
1040	}
1041
1042	if (tca[TCA_STAB]) {
1043		stab = qdisc_get_stab(tca[TCA_STAB]);
1044		if (IS_ERR(stab))
1045			return PTR_ERR(stab);
1046	}
1047
1048	ostab = rtnl_dereference(sch->stab);
1049	rcu_assign_pointer(sch->stab, stab);
1050	qdisc_put_stab(ostab);
1051
1052	if (tca[TCA_RATE]) {
1053		/* NB: ignores errors from replace_estimator
1054		   because change can't be undone. */
1055		if (sch->flags & TCQ_F_MQROOT)
1056			goto out;
1057		gen_replace_estimator(&sch->bstats,
1058				      sch->cpu_bstats,
1059				      &sch->rate_est,
1060				      qdisc_root_sleeping_lock(sch),
1061				      tca[TCA_RATE]);
1062	}
1063out:
1064	return 0;
1065}
1066
1067struct check_loop_arg {
1068	struct qdisc_walker	w;
1069	struct Qdisc		*p;
1070	int			depth;
1071};
1072
1073static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1074
1075static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1076{
1077	struct check_loop_arg	arg;
1078
1079	if (q->ops->cl_ops == NULL)
1080		return 0;
1081
1082	arg.w.stop = arg.w.skip = arg.w.count = 0;
1083	arg.w.fn = check_loop_fn;
1084	arg.depth = depth;
1085	arg.p = p;
1086	q->ops->cl_ops->walk(q, &arg.w);
1087	return arg.w.stop ? -ELOOP : 0;
1088}
1089
1090static int
1091check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1092{
1093	struct Qdisc *leaf;
1094	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1095	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1096
1097	leaf = cops->leaf(q, cl);
1098	if (leaf) {
1099		if (leaf == arg->p || arg->depth > 7)
1100			return -ELOOP;
1101		return check_loop(leaf, arg->p, arg->depth + 1);
1102	}
1103	return 0;
1104}
1105
1106/*
1107 * Delete/get qdisc.
1108 */
1109
1110static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1111{
1112	struct net *net = sock_net(skb->sk);
1113	struct tcmsg *tcm = nlmsg_data(n);
1114	struct nlattr *tca[TCA_MAX + 1];
1115	struct net_device *dev;
1116	u32 clid;
1117	struct Qdisc *q = NULL;
1118	struct Qdisc *p = NULL;
1119	int err;
1120
1121	if ((n->nlmsg_type != RTM_GETQDISC) &&
1122	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1123		return -EPERM;
1124
1125	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1126	if (err < 0)
1127		return err;
1128
1129	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1130	if (!dev)
1131		return -ENODEV;
1132
1133	clid = tcm->tcm_parent;
1134	if (clid) {
1135		if (clid != TC_H_ROOT) {
1136			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1137				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1138				if (!p)
1139					return -ENOENT;
1140				q = qdisc_leaf(p, clid);
1141			} else if (dev_ingress_queue(dev)) {
1142				q = dev_ingress_queue(dev)->qdisc_sleeping;
1143			}
1144		} else {
1145			q = dev->qdisc;
1146		}
1147		if (!q)
1148			return -ENOENT;
1149
1150		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1151			return -EINVAL;
1152	} else {
1153		q = qdisc_lookup(dev, tcm->tcm_handle);
1154		if (!q)
1155			return -ENOENT;
1156	}
1157
1158	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1159		return -EINVAL;
1160
1161	if (n->nlmsg_type == RTM_DELQDISC) {
1162		if (!clid)
1163			return -EINVAL;
1164		if (q->handle == 0)
1165			return -ENOENT;
1166		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1167		if (err != 0)
1168			return err;
1169	} else {
1170		qdisc_notify(net, skb, n, clid, NULL, q);
1171	}
1172	return 0;
1173}
1174
1175/*
1176 * Create/change qdisc.
1177 */
1178
1179static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1180{
1181	struct net *net = sock_net(skb->sk);
1182	struct tcmsg *tcm;
1183	struct nlattr *tca[TCA_MAX + 1];
1184	struct net_device *dev;
1185	u32 clid;
1186	struct Qdisc *q, *p;
1187	int err;
1188
1189	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1190		return -EPERM;
1191
1192replay:
1193	/* Reinit, just in case something touches this. */
1194	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1195	if (err < 0)
1196		return err;
1197
1198	tcm = nlmsg_data(n);
1199	clid = tcm->tcm_parent;
1200	q = p = NULL;
1201
1202	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1203	if (!dev)
1204		return -ENODEV;
1205
 
 
 
1206
1207	if (clid) {
1208		if (clid != TC_H_ROOT) {
1209			if (clid != TC_H_INGRESS) {
1210				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1211				if (!p)
1212					return -ENOENT;
1213				q = qdisc_leaf(p, clid);
1214			} else if (dev_ingress_queue_create(dev)) {
1215				q = dev_ingress_queue(dev)->qdisc_sleeping;
1216			}
1217		} else {
1218			q = dev->qdisc;
1219		}
1220
1221		/* It may be default qdisc, ignore it */
1222		if (q && q->handle == 0)
1223			q = NULL;
1224
1225		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1226			if (tcm->tcm_handle) {
1227				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1228					return -EEXIST;
1229				if (TC_H_MIN(tcm->tcm_handle))
1230					return -EINVAL;
1231				q = qdisc_lookup(dev, tcm->tcm_handle);
1232				if (!q)
1233					goto create_n_graft;
1234				if (n->nlmsg_flags & NLM_F_EXCL)
1235					return -EEXIST;
1236				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1237					return -EINVAL;
1238				if (q == p ||
1239				    (p && check_loop(q, p, 0)))
1240					return -ELOOP;
1241				atomic_inc(&q->refcnt);
1242				goto graft;
1243			} else {
1244				if (!q)
1245					goto create_n_graft;
1246
1247				/* This magic test requires explanation.
1248				 *
1249				 *   We know, that some child q is already
1250				 *   attached to this parent and have choice:
1251				 *   either to change it or to create/graft new one.
1252				 *
1253				 *   1. We are allowed to create/graft only
1254				 *   if CREATE and REPLACE flags are set.
1255				 *
1256				 *   2. If EXCL is set, requestor wanted to say,
1257				 *   that qdisc tcm_handle is not expected
1258				 *   to exist, so that we choose create/graft too.
1259				 *
1260				 *   3. The last case is when no flags are set.
1261				 *   Alas, it is sort of hole in API, we
1262				 *   cannot decide what to do unambiguously.
1263				 *   For now we select create/graft, if
1264				 *   user gave KIND, which does not match existing.
1265				 */
1266				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1267				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1268				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1269				     (tca[TCA_KIND] &&
1270				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1271					goto create_n_graft;
1272			}
1273		}
1274	} else {
1275		if (!tcm->tcm_handle)
1276			return -EINVAL;
1277		q = qdisc_lookup(dev, tcm->tcm_handle);
1278	}
1279
1280	/* Change qdisc parameters */
1281	if (q == NULL)
1282		return -ENOENT;
1283	if (n->nlmsg_flags & NLM_F_EXCL)
1284		return -EEXIST;
1285	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1286		return -EINVAL;
1287	err = qdisc_change(q, tca);
1288	if (err == 0)
1289		qdisc_notify(net, skb, n, clid, NULL, q);
1290	return err;
1291
1292create_n_graft:
1293	if (!(n->nlmsg_flags & NLM_F_CREATE))
1294		return -ENOENT;
1295	if (clid == TC_H_INGRESS) {
1296		if (dev_ingress_queue(dev))
1297			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1298					 tcm->tcm_parent, tcm->tcm_parent,
1299					 tca, &err);
1300		else
1301			err = -ENOENT;
1302	} else {
1303		struct netdev_queue *dev_queue;
1304
1305		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1306			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1307		else if (p)
1308			dev_queue = p->dev_queue;
1309		else
1310			dev_queue = netdev_get_tx_queue(dev, 0);
1311
1312		q = qdisc_create(dev, dev_queue, p,
1313				 tcm->tcm_parent, tcm->tcm_handle,
1314				 tca, &err);
1315	}
1316	if (q == NULL) {
1317		if (err == -EAGAIN)
1318			goto replay;
1319		return err;
1320	}
1321
1322graft:
1323	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1324	if (err) {
1325		if (q)
1326			qdisc_destroy(q);
1327		return err;
1328	}
1329
1330	return 0;
1331}
1332
1333static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1334			 u32 portid, u32 seq, u16 flags, int event)
1335{
1336	struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1337	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1338	struct tcmsg *tcm;
1339	struct nlmsghdr  *nlh;
1340	unsigned char *b = skb_tail_pointer(skb);
1341	struct gnet_dump d;
1342	struct qdisc_size_table *stab;
1343	__u32 qlen;
1344
1345	cond_resched();
1346	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1347	if (!nlh)
1348		goto out_nlmsg_trim;
1349	tcm = nlmsg_data(nlh);
1350	tcm->tcm_family = AF_UNSPEC;
1351	tcm->tcm__pad1 = 0;
1352	tcm->tcm__pad2 = 0;
1353	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1354	tcm->tcm_parent = clid;
1355	tcm->tcm_handle = q->handle;
1356	tcm->tcm_info = atomic_read(&q->refcnt);
1357	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1358		goto nla_put_failure;
1359	if (q->ops->dump && q->ops->dump(q, skb) < 0)
1360		goto nla_put_failure;
1361	qlen = q->q.qlen;
1362
1363	stab = rtnl_dereference(q->stab);
1364	if (stab && qdisc_dump_stab(skb, stab) < 0)
1365		goto nla_put_failure;
1366
1367	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1368					 qdisc_root_sleeping_lock(q), &d) < 0)
1369		goto nla_put_failure;
1370
1371	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1372		goto nla_put_failure;
1373
1374	if (qdisc_is_percpu_stats(q)) {
1375		cpu_bstats = q->cpu_bstats;
1376		cpu_qstats = q->cpu_qstats;
1377	}
1378
1379	if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
1380	    gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1381	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1382		goto nla_put_failure;
1383
1384	if (gnet_stats_finish_copy(&d) < 0)
1385		goto nla_put_failure;
1386
1387	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1388	return skb->len;
1389
1390out_nlmsg_trim:
1391nla_put_failure:
1392	nlmsg_trim(skb, b);
1393	return -1;
1394}
1395
1396static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1397{
1398	return (q->flags & TCQ_F_BUILTIN) ? true : false;
1399}
1400
1401static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1402			struct nlmsghdr *n, u32 clid,
1403			struct Qdisc *old, struct Qdisc *new)
1404{
1405	struct sk_buff *skb;
1406	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1407
1408	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1409	if (!skb)
1410		return -ENOBUFS;
1411
1412	if (old && !tc_qdisc_dump_ignore(old)) {
1413		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1414				  0, RTM_DELQDISC) < 0)
1415			goto err_out;
1416	}
1417	if (new && !tc_qdisc_dump_ignore(new)) {
1418		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1419				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1420			goto err_out;
1421	}
1422
1423	if (skb->len)
1424		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1425				      n->nlmsg_flags & NLM_F_ECHO);
1426
1427err_out:
1428	kfree_skb(skb);
1429	return -EINVAL;
1430}
1431
1432static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1433			      struct netlink_callback *cb,
1434			      int *q_idx_p, int s_q_idx)
1435{
1436	int ret = 0, q_idx = *q_idx_p;
1437	struct Qdisc *q;
1438
1439	if (!root)
1440		return 0;
1441
1442	q = root;
1443	if (q_idx < s_q_idx) {
1444		q_idx++;
1445	} else {
1446		if (!tc_qdisc_dump_ignore(q) &&
1447		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1448				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1449			goto done;
1450		q_idx++;
1451	}
1452	list_for_each_entry(q, &root->list, list) {
1453		if (q_idx < s_q_idx) {
1454			q_idx++;
1455			continue;
1456		}
1457		if (!tc_qdisc_dump_ignore(q) &&
1458		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1459				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1460			goto done;
1461		q_idx++;
1462	}
1463
1464out:
1465	*q_idx_p = q_idx;
1466	return ret;
1467done:
1468	ret = -1;
1469	goto out;
1470}
1471
1472static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1473{
1474	struct net *net = sock_net(skb->sk);
1475	int idx, q_idx;
1476	int s_idx, s_q_idx;
1477	struct net_device *dev;
1478
1479	s_idx = cb->args[0];
1480	s_q_idx = q_idx = cb->args[1];
1481
 
1482	idx = 0;
1483	ASSERT_RTNL();
1484	for_each_netdev(net, dev) {
1485		struct netdev_queue *dev_queue;
1486
1487		if (idx < s_idx)
1488			goto cont;
1489		if (idx > s_idx)
1490			s_q_idx = 0;
1491		q_idx = 0;
1492
1493		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1494			goto done;
1495
1496		dev_queue = dev_ingress_queue(dev);
1497		if (dev_queue &&
1498		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1499				       &q_idx, s_q_idx) < 0)
1500			goto done;
1501
1502cont:
1503		idx++;
1504	}
1505
1506done:
 
 
1507	cb->args[0] = idx;
1508	cb->args[1] = q_idx;
1509
1510	return skb->len;
1511}
1512
1513
1514
1515/************************************************
1516 *	Traffic classes manipulation.		*
1517 ************************************************/
1518
1519
1520
1521static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1522{
1523	struct net *net = sock_net(skb->sk);
1524	struct tcmsg *tcm = nlmsg_data(n);
1525	struct nlattr *tca[TCA_MAX + 1];
1526	struct net_device *dev;
1527	struct Qdisc *q = NULL;
1528	const struct Qdisc_class_ops *cops;
1529	unsigned long cl = 0;
1530	unsigned long new_cl;
1531	u32 portid;
1532	u32 clid;
1533	u32 qid;
1534	int err;
1535
1536	if ((n->nlmsg_type != RTM_GETTCLASS) &&
1537	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1538		return -EPERM;
1539
1540	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1541	if (err < 0)
1542		return err;
1543
1544	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1545	if (!dev)
1546		return -ENODEV;
1547
1548	/*
1549	   parent == TC_H_UNSPEC - unspecified parent.
1550	   parent == TC_H_ROOT   - class is root, which has no parent.
1551	   parent == X:0	 - parent is root class.
1552	   parent == X:Y	 - parent is a node in hierarchy.
1553	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1554
1555	   handle == 0:0	 - generate handle from kernel pool.
1556	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1557	   handle == X:Y	 - clear.
1558	   handle == X:0	 - root class.
1559	 */
1560
1561	/* Step 1. Determine qdisc handle X:0 */
1562
1563	portid = tcm->tcm_parent;
1564	clid = tcm->tcm_handle;
1565	qid = TC_H_MAJ(clid);
1566
1567	if (portid != TC_H_ROOT) {
1568		u32 qid1 = TC_H_MAJ(portid);
1569
1570		if (qid && qid1) {
1571			/* If both majors are known, they must be identical. */
1572			if (qid != qid1)
1573				return -EINVAL;
1574		} else if (qid1) {
1575			qid = qid1;
1576		} else if (qid == 0)
1577			qid = dev->qdisc->handle;
1578
1579		/* Now qid is genuine qdisc handle consistent
1580		 * both with parent and child.
1581		 *
1582		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1583		 */
1584		if (portid)
1585			portid = TC_H_MAKE(qid, portid);
1586	} else {
1587		if (qid == 0)
1588			qid = dev->qdisc->handle;
1589	}
1590
1591	/* OK. Locate qdisc */
1592	q = qdisc_lookup(dev, qid);
1593	if (!q)
1594		return -ENOENT;
1595
1596	/* An check that it supports classes */
1597	cops = q->ops->cl_ops;
1598	if (cops == NULL)
1599		return -EINVAL;
1600
1601	/* Now try to get class */
1602	if (clid == 0) {
1603		if (portid == TC_H_ROOT)
1604			clid = qid;
1605	} else
1606		clid = TC_H_MAKE(qid, clid);
1607
1608	if (clid)
1609		cl = cops->get(q, clid);
1610
1611	if (cl == 0) {
1612		err = -ENOENT;
1613		if (n->nlmsg_type != RTM_NEWTCLASS ||
1614		    !(n->nlmsg_flags & NLM_F_CREATE))
1615			goto out;
1616	} else {
1617		switch (n->nlmsg_type) {
1618		case RTM_NEWTCLASS:
1619			err = -EEXIST;
1620			if (n->nlmsg_flags & NLM_F_EXCL)
1621				goto out;
1622			break;
1623		case RTM_DELTCLASS:
1624			err = -EOPNOTSUPP;
1625			if (cops->delete)
1626				err = cops->delete(q, cl);
1627			if (err == 0)
1628				tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1629			goto out;
1630		case RTM_GETTCLASS:
1631			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1632			goto out;
1633		default:
1634			err = -EINVAL;
1635			goto out;
1636		}
1637	}
1638
1639	new_cl = cl;
1640	err = -EOPNOTSUPP;
1641	if (cops->change)
1642		err = cops->change(q, clid, portid, tca, &new_cl);
1643	if (err == 0)
1644		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1645
1646out:
1647	if (cl)
1648		cops->put(q, cl);
1649
1650	return err;
1651}
1652
1653
1654static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1655			  unsigned long cl,
1656			  u32 portid, u32 seq, u16 flags, int event)
1657{
1658	struct tcmsg *tcm;
1659	struct nlmsghdr  *nlh;
1660	unsigned char *b = skb_tail_pointer(skb);
1661	struct gnet_dump d;
1662	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1663
1664	cond_resched();
1665	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1666	if (!nlh)
1667		goto out_nlmsg_trim;
1668	tcm = nlmsg_data(nlh);
1669	tcm->tcm_family = AF_UNSPEC;
1670	tcm->tcm__pad1 = 0;
1671	tcm->tcm__pad2 = 0;
1672	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1673	tcm->tcm_parent = q->handle;
1674	tcm->tcm_handle = q->handle;
1675	tcm->tcm_info = 0;
1676	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1677		goto nla_put_failure;
1678	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1679		goto nla_put_failure;
1680
1681	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1682					 qdisc_root_sleeping_lock(q), &d) < 0)
1683		goto nla_put_failure;
1684
1685	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1686		goto nla_put_failure;
1687
1688	if (gnet_stats_finish_copy(&d) < 0)
1689		goto nla_put_failure;
1690
1691	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1692	return skb->len;
1693
1694out_nlmsg_trim:
1695nla_put_failure:
1696	nlmsg_trim(skb, b);
1697	return -1;
1698}
1699
1700static int tclass_notify(struct net *net, struct sk_buff *oskb,
1701			 struct nlmsghdr *n, struct Qdisc *q,
1702			 unsigned long cl, int event)
1703{
1704	struct sk_buff *skb;
1705	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1706
1707	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1708	if (!skb)
1709		return -ENOBUFS;
1710
1711	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1712		kfree_skb(skb);
1713		return -EINVAL;
1714	}
1715
1716	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1717			      n->nlmsg_flags & NLM_F_ECHO);
1718}
1719
1720struct qdisc_dump_args {
1721	struct qdisc_walker	w;
1722	struct sk_buff		*skb;
1723	struct netlink_callback	*cb;
1724};
1725
1726static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1727{
1728	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1729
1730	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1731			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1732}
1733
1734static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1735				struct tcmsg *tcm, struct netlink_callback *cb,
1736				int *t_p, int s_t)
1737{
1738	struct qdisc_dump_args arg;
1739
1740	if (tc_qdisc_dump_ignore(q) ||
1741	    *t_p < s_t || !q->ops->cl_ops ||
1742	    (tcm->tcm_parent &&
1743	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1744		(*t_p)++;
1745		return 0;
1746	}
1747	if (*t_p > s_t)
1748		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1749	arg.w.fn = qdisc_class_dump;
1750	arg.skb = skb;
1751	arg.cb = cb;
1752	arg.w.stop  = 0;
1753	arg.w.skip = cb->args[1];
1754	arg.w.count = 0;
1755	q->ops->cl_ops->walk(q, &arg.w);
1756	cb->args[1] = arg.w.count;
1757	if (arg.w.stop)
1758		return -1;
1759	(*t_p)++;
1760	return 0;
1761}
1762
1763static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1764			       struct tcmsg *tcm, struct netlink_callback *cb,
1765			       int *t_p, int s_t)
1766{
1767	struct Qdisc *q;
1768
1769	if (!root)
1770		return 0;
1771
1772	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1773		return -1;
1774
1775	list_for_each_entry(q, &root->list, list) {
1776		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1777			return -1;
1778	}
1779
1780	return 0;
1781}
1782
1783static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1784{
1785	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1786	struct net *net = sock_net(skb->sk);
1787	struct netdev_queue *dev_queue;
1788	struct net_device *dev;
1789	int t, s_t;
1790
1791	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1792		return 0;
1793	dev = dev_get_by_index(net, tcm->tcm_ifindex);
1794	if (!dev)
1795		return 0;
1796
1797	s_t = cb->args[0];
1798	t = 0;
1799
1800	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1801		goto done;
1802
1803	dev_queue = dev_ingress_queue(dev);
1804	if (dev_queue &&
1805	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1806				&t, s_t) < 0)
1807		goto done;
1808
1809done:
1810	cb->args[0] = t;
1811
1812	dev_put(dev);
1813	return skb->len;
1814}
1815
1816/* Main classifier routine: scans classifier chain attached
1817 * to this qdisc, (optionally) tests for protocol and asks
1818 * specific classifiers.
1819 */
1820int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1821		struct tcf_result *res, bool compat_mode)
1822{
1823	__be16 protocol = tc_skb_protocol(skb);
1824#ifdef CONFIG_NET_CLS_ACT
1825	const struct tcf_proto *old_tp = tp;
1826	int limit = 0;
1827
1828reclassify:
1829#endif
1830	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1831		int err;
1832
 
1833		if (tp->protocol != protocol &&
1834		    tp->protocol != htons(ETH_P_ALL))
1835			continue;
 
1836
1837		err = tp->classify(skb, tp, res);
1838#ifdef CONFIG_NET_CLS_ACT
1839		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
1840			goto reset;
1841#endif
1842		if (err >= 0)
1843			return err;
 
1844	}
 
 
 
 
 
 
 
 
 
 
 
 
1845
1846	return TC_ACT_UNSPEC; /* signal: continue lookup */
1847#ifdef CONFIG_NET_CLS_ACT
1848reset:
1849	if (unlikely(limit++ >= MAX_REC_LOOP)) {
1850		net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1851				       tp->q->ops->id, tp->prio & 0xffff,
1852				       ntohs(tp->protocol));
1853		return TC_ACT_SHOT;
 
 
 
 
 
 
 
1854	}
1855
1856	tp = old_tp;
1857	protocol = tc_skb_protocol(skb);
1858	goto reclassify;
1859#endif
 
1860}
1861EXPORT_SYMBOL(tc_classify);
1862
1863bool tcf_destroy(struct tcf_proto *tp, bool force)
1864{
1865	if (tp->ops->destroy(tp, force)) {
1866		module_put(tp->ops->owner);
1867		kfree_rcu(tp, rcu);
1868		return true;
1869	}
1870
1871	return false;
1872}
1873
1874void tcf_destroy_chain(struct tcf_proto __rcu **fl)
1875{
1876	struct tcf_proto *tp;
1877
1878	while ((tp = rtnl_dereference(*fl)) != NULL) {
1879		RCU_INIT_POINTER(*fl, tp->next);
1880		tcf_destroy(tp, true);
1881	}
1882}
1883EXPORT_SYMBOL(tcf_destroy_chain);
1884
1885#ifdef CONFIG_PROC_FS
1886static int psched_show(struct seq_file *seq, void *v)
1887{
 
 
 
1888	seq_printf(seq, "%08x %08x %08x %08x\n",
1889		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1890		   1000000,
1891		   (u32)NSEC_PER_SEC / hrtimer_resolution);
1892
1893	return 0;
1894}
1895
1896static int psched_open(struct inode *inode, struct file *file)
1897{
1898	return single_open(file, psched_show, NULL);
1899}
1900
1901static const struct file_operations psched_fops = {
1902	.owner = THIS_MODULE,
1903	.open = psched_open,
1904	.read  = seq_read,
1905	.llseek = seq_lseek,
1906	.release = single_release,
1907};
1908
1909static int __net_init psched_net_init(struct net *net)
1910{
1911	struct proc_dir_entry *e;
1912
1913	e = proc_create("psched", 0, net->proc_net, &psched_fops);
1914	if (e == NULL)
1915		return -ENOMEM;
1916
1917	return 0;
1918}
1919
1920static void __net_exit psched_net_exit(struct net *net)
1921{
1922	remove_proc_entry("psched", net->proc_net);
1923}
1924#else
1925static int __net_init psched_net_init(struct net *net)
1926{
1927	return 0;
1928}
1929
1930static void __net_exit psched_net_exit(struct net *net)
1931{
1932}
1933#endif
1934
1935static struct pernet_operations psched_net_ops = {
1936	.init = psched_net_init,
1937	.exit = psched_net_exit,
1938};
1939
1940static int __init pktsched_init(void)
1941{
1942	int err;
1943
1944	err = register_pernet_subsys(&psched_net_ops);
1945	if (err) {
1946		pr_err("pktsched_init: "
1947		       "cannot initialize per netns operations\n");
1948		return err;
1949	}
1950
1951	register_qdisc(&pfifo_fast_ops);
1952	register_qdisc(&pfifo_qdisc_ops);
1953	register_qdisc(&bfifo_qdisc_ops);
1954	register_qdisc(&pfifo_head_drop_qdisc_ops);
1955	register_qdisc(&mq_qdisc_ops);
1956	register_qdisc(&noqueue_qdisc_ops);
1957
1958	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1959	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1960	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1961	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1962	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1963	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1964
1965	return 0;
1966}
1967
1968subsys_initcall(pktsched_init);