Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_generic.c	Generic packet scheduler routines.
   4 *
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
   7 *              - Ingress support
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/netdevice.h>
  18#include <linux/skbuff.h>
  19#include <linux/rtnetlink.h>
  20#include <linux/init.h>
  21#include <linux/rcupdate.h>
  22#include <linux/list.h>
  23#include <linux/slab.h>
  24#include <linux/if_vlan.h>
  25#include <linux/skb_array.h>
  26#include <linux/if_macvlan.h>
  27#include <net/sch_generic.h>
  28#include <net/pkt_sched.h>
  29#include <net/dst.h>
  30#include <trace/events/qdisc.h>
  31#include <trace/events/net.h>
  32#include <net/xfrm.h>
  33
  34/* Qdisc to use by default */
  35const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
  36EXPORT_SYMBOL(default_qdisc_ops);
  37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  38/* Main transmission queue. */
  39
  40/* Modifications to data participating in scheduling must be protected with
  41 * qdisc_lock(qdisc) spinlock.
  42 *
  43 * The idea is the following:
  44 * - enqueue, dequeue are serialized via qdisc root lock
  45 * - ingress filtering is also serialized via qdisc root lock
  46 * - updates to tree and tree walking are only done under the rtnl mutex.
  47 */
  48
  49#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
  50
  51static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
  52{
  53	const struct netdev_queue *txq = q->dev_queue;
  54	spinlock_t *lock = NULL;
  55	struct sk_buff *skb;
  56
  57	if (q->flags & TCQ_F_NOLOCK) {
  58		lock = qdisc_lock(q);
  59		spin_lock(lock);
  60	}
  61
  62	skb = skb_peek(&q->skb_bad_txq);
  63	if (skb) {
  64		/* check the reason of requeuing without tx lock first */
  65		txq = skb_get_tx_queue(txq->dev, skb);
  66		if (!netif_xmit_frozen_or_stopped(txq)) {
  67			skb = __skb_dequeue(&q->skb_bad_txq);
  68			if (qdisc_is_percpu_stats(q)) {
  69				qdisc_qstats_cpu_backlog_dec(q, skb);
  70				qdisc_qstats_cpu_qlen_dec(q);
  71			} else {
  72				qdisc_qstats_backlog_dec(q, skb);
  73				q->q.qlen--;
  74			}
  75		} else {
  76			skb = SKB_XOFF_MAGIC;
 
  77		}
  78	}
  79
  80	if (lock)
  81		spin_unlock(lock);
  82
  83	return skb;
  84}
  85
  86static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
  87{
  88	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
  89
  90	if (unlikely(skb))
  91		skb = __skb_dequeue_bad_txq(q);
  92
  93	return skb;
  94}
  95
  96static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
  97					     struct sk_buff *skb)
  98{
  99	spinlock_t *lock = NULL;
 100
 101	if (q->flags & TCQ_F_NOLOCK) {
 102		lock = qdisc_lock(q);
 103		spin_lock(lock);
 104	}
 105
 106	__skb_queue_tail(&q->skb_bad_txq, skb);
 107
 108	if (qdisc_is_percpu_stats(q)) {
 109		qdisc_qstats_cpu_backlog_inc(q, skb);
 110		qdisc_qstats_cpu_qlen_inc(q);
 111	} else {
 112		qdisc_qstats_backlog_inc(q, skb);
 113		q->q.qlen++;
 114	}
 115
 116	if (lock)
 117		spin_unlock(lock);
 118}
 119
 120static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 121{
 122	spinlock_t *lock = NULL;
 123
 124	if (q->flags & TCQ_F_NOLOCK) {
 125		lock = qdisc_lock(q);
 126		spin_lock(lock);
 127	}
 128
 129	while (skb) {
 130		struct sk_buff *next = skb->next;
 131
 132		__skb_queue_tail(&q->gso_skb, skb);
 133
 134		/* it's still part of the queue */
 135		if (qdisc_is_percpu_stats(q)) {
 136			qdisc_qstats_cpu_requeues_inc(q);
 137			qdisc_qstats_cpu_backlog_inc(q, skb);
 138			qdisc_qstats_cpu_qlen_inc(q);
 139		} else {
 140			q->qstats.requeues++;
 141			qdisc_qstats_backlog_inc(q, skb);
 142			q->q.qlen++;
 143		}
 144
 145		skb = next;
 146	}
 147	if (lock)
 
 148		spin_unlock(lock);
 149	__netif_schedule(q);
 
 
 
 150}
 151
 152static void try_bulk_dequeue_skb(struct Qdisc *q,
 153				 struct sk_buff *skb,
 154				 const struct netdev_queue *txq,
 155				 int *packets)
 156{
 157	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
 158
 159	while (bytelimit > 0) {
 160		struct sk_buff *nskb = q->dequeue(q);
 161
 162		if (!nskb)
 163			break;
 164
 165		bytelimit -= nskb->len; /* covers GSO len */
 166		skb->next = nskb;
 167		skb = nskb;
 168		(*packets)++; /* GSO counts as one pkt */
 169	}
 170	skb_mark_not_on_list(skb);
 171}
 172
 173/* This variant of try_bulk_dequeue_skb() makes sure
 174 * all skbs in the chain are for the same txq
 175 */
 176static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 177				      struct sk_buff *skb,
 178				      int *packets)
 179{
 180	int mapping = skb_get_queue_mapping(skb);
 181	struct sk_buff *nskb;
 182	int cnt = 0;
 183
 184	do {
 185		nskb = q->dequeue(q);
 186		if (!nskb)
 187			break;
 188		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
 189			qdisc_enqueue_skb_bad_txq(q, nskb);
 190			break;
 191		}
 192		skb->next = nskb;
 193		skb = nskb;
 194	} while (++cnt < 8);
 195	(*packets) += cnt;
 196	skb_mark_not_on_list(skb);
 197}
 198
 199/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 200 * A requeued skb (via q->gso_skb) can also be a SKB list.
 201 */
 202static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 203				   int *packets)
 204{
 205	const struct netdev_queue *txq = q->dev_queue;
 206	struct sk_buff *skb = NULL;
 207
 208	*packets = 1;
 209	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
 210		spinlock_t *lock = NULL;
 211
 212		if (q->flags & TCQ_F_NOLOCK) {
 213			lock = qdisc_lock(q);
 214			spin_lock(lock);
 215		}
 216
 217		skb = skb_peek(&q->gso_skb);
 218
 219		/* skb may be null if another cpu pulls gso_skb off in between
 220		 * empty check and lock.
 221		 */
 222		if (!skb) {
 223			if (lock)
 224				spin_unlock(lock);
 225			goto validate;
 226		}
 227
 228		/* skb in gso_skb were already validated */
 229		*validate = false;
 230		if (xfrm_offload(skb))
 231			*validate = true;
 232		/* check the reason of requeuing without tx lock first */
 233		txq = skb_get_tx_queue(txq->dev, skb);
 234		if (!netif_xmit_frozen_or_stopped(txq)) {
 235			skb = __skb_dequeue(&q->gso_skb);
 236			if (qdisc_is_percpu_stats(q)) {
 237				qdisc_qstats_cpu_backlog_dec(q, skb);
 238				qdisc_qstats_cpu_qlen_dec(q);
 239			} else {
 240				qdisc_qstats_backlog_dec(q, skb);
 241				q->q.qlen--;
 242			}
 243		} else {
 244			skb = NULL;
 
 245		}
 246		if (lock)
 247			spin_unlock(lock);
 248		goto trace;
 249	}
 250validate:
 251	*validate = true;
 252
 253	if ((q->flags & TCQ_F_ONETXQUEUE) &&
 254	    netif_xmit_frozen_or_stopped(txq))
 
 255		return skb;
 
 256
 257	skb = qdisc_dequeue_skb_bad_txq(q);
 258	if (unlikely(skb)) {
 259		if (skb == SKB_XOFF_MAGIC)
 260			return NULL;
 261		goto bulk;
 262	}
 263	skb = q->dequeue(q);
 264	if (skb) {
 265bulk:
 266		if (qdisc_may_bulk(q))
 267			try_bulk_dequeue_skb(q, skb, txq, packets);
 268		else
 269			try_bulk_dequeue_skb_slow(q, skb, packets);
 270	}
 271trace:
 272	trace_qdisc_dequeue(q, txq, *packets, skb);
 273	return skb;
 274}
 275
 276/*
 277 * Transmit possibly several skbs, and handle the return status as
 278 * required. Owning running seqcount bit guarantees that
 279 * only one CPU can execute this function.
 280 *
 281 * Returns to the caller:
 282 *				false  - hardware queue frozen backoff
 283 *				true   - feel free to send more pkts
 284 */
 285bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 286		     struct net_device *dev, struct netdev_queue *txq,
 287		     spinlock_t *root_lock, bool validate)
 288{
 289	int ret = NETDEV_TX_BUSY;
 290	bool again = false;
 291
 292	/* And release qdisc */
 293	if (root_lock)
 294		spin_unlock(root_lock);
 295
 296	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
 297	if (validate)
 298		skb = validate_xmit_skb_list(skb, dev, &again);
 299
 300#ifdef CONFIG_XFRM_OFFLOAD
 301	if (unlikely(again)) {
 302		if (root_lock)
 303			spin_lock(root_lock);
 304
 305		dev_requeue_skb(skb, q);
 306		return false;
 307	}
 308#endif
 309
 310	if (likely(skb)) {
 311		HARD_TX_LOCK(dev, txq, smp_processor_id());
 312		if (!netif_xmit_frozen_or_stopped(txq))
 313			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 
 
 314
 315		HARD_TX_UNLOCK(dev, txq);
 316	} else {
 317		if (root_lock)
 318			spin_lock(root_lock);
 319		return true;
 320	}
 321
 322	if (root_lock)
 323		spin_lock(root_lock);
 324
 325	if (!dev_xmit_complete(ret)) {
 326		/* Driver returned NETDEV_TX_BUSY - requeue skb */
 327		if (unlikely(ret != NETDEV_TX_BUSY))
 328			net_warn_ratelimited("BUG %s code %d qlen %d\n",
 329					     dev->name, ret, q->q.qlen);
 330
 331		dev_requeue_skb(skb, q);
 332		return false;
 333	}
 334
 335	return true;
 336}
 337
 338/*
 339 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 340 *
 341 * running seqcount guarantees only one CPU can process
 342 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 343 * this queue.
 344 *
 345 *  netif_tx_lock serializes accesses to device driver.
 346 *
 347 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 348 *  if one is grabbed, another must be free.
 349 *
 350 * Note, that this procedure can be called by a watchdog timer
 351 *
 352 * Returns to the caller:
 353 *				0  - queue is empty or throttled.
 354 *				>0 - queue is not empty.
 355 *
 356 */
 357static inline bool qdisc_restart(struct Qdisc *q, int *packets)
 358{
 359	spinlock_t *root_lock = NULL;
 360	struct netdev_queue *txq;
 361	struct net_device *dev;
 362	struct sk_buff *skb;
 363	bool validate;
 364
 365	/* Dequeue packet */
 366	skb = dequeue_skb(q, &validate, packets);
 367	if (unlikely(!skb))
 368		return false;
 369
 370	if (!(q->flags & TCQ_F_NOLOCK))
 371		root_lock = qdisc_lock(q);
 372
 373	dev = qdisc_dev(q);
 374	txq = skb_get_tx_queue(dev, skb);
 375
 376	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
 377}
 378
 379void __qdisc_run(struct Qdisc *q)
 380{
 381	int quota = dev_tx_weight;
 382	int packets;
 383
 384	while (qdisc_restart(q, &packets)) {
 385		/*
 386		 * Ordered by possible occurrence: Postpone processing if
 387		 * 1. we've exceeded packet quota
 388		 * 2. another process needs the CPU;
 389		 */
 390		quota -= packets;
 391		if (quota <= 0 || need_resched()) {
 392			__netif_schedule(q);
 
 
 
 
 393			break;
 394		}
 395	}
 396}
 397
 398unsigned long dev_trans_start(struct net_device *dev)
 399{
 400	unsigned long val, res;
 
 401	unsigned int i;
 402
 403	if (is_vlan_dev(dev))
 404		dev = vlan_dev_real_dev(dev);
 405	else if (netif_is_macvlan(dev))
 406		dev = macvlan_dev_real_dev(dev);
 407	res = netdev_get_tx_queue(dev, 0)->trans_start;
 408	for (i = 1; i < dev->num_tx_queues; i++) {
 409		val = netdev_get_tx_queue(dev, i)->trans_start;
 410		if (val && time_after(val, res))
 411			res = val;
 412	}
 413
 414	return res;
 415}
 416EXPORT_SYMBOL(dev_trans_start);
 417
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 418static void dev_watchdog(struct timer_list *t)
 419{
 420	struct net_device *dev = from_timer(dev, t, watchdog_timer);
 
 421
 422	netif_tx_lock(dev);
 423	if (!qdisc_tx_is_noop(dev)) {
 424		if (netif_device_present(dev) &&
 425		    netif_running(dev) &&
 426		    netif_carrier_ok(dev)) {
 427			int some_queue_timedout = 0;
 428			unsigned int i;
 429			unsigned long trans_start;
 430
 431			for (i = 0; i < dev->num_tx_queues; i++) {
 432				struct netdev_queue *txq;
 433
 434				txq = netdev_get_tx_queue(dev, i);
 435				trans_start = txq->trans_start;
 436				if (netif_xmit_stopped(txq) &&
 437				    time_after(jiffies, (trans_start +
 438							 dev->watchdog_timeo))) {
 439					some_queue_timedout = 1;
 440					txq->trans_timeout++;
 441					break;
 442				}
 443			}
 444
 445			if (some_queue_timedout) {
 446				trace_net_dev_xmit_timeout(dev, i);
 447				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
 448				       dev->name, netdev_drivername(dev), i);
 449				dev->netdev_ops->ndo_tx_timeout(dev);
 
 
 
 450			}
 451			if (!mod_timer(&dev->watchdog_timer,
 452				       round_jiffies(jiffies +
 453						     dev->watchdog_timeo)))
 454				dev_hold(dev);
 455		}
 456	}
 457	netif_tx_unlock(dev);
 458
 459	dev_put(dev);
 
 460}
 461
 462void __netdev_watchdog_up(struct net_device *dev)
 463{
 464	if (dev->netdev_ops->ndo_tx_timeout) {
 465		if (dev->watchdog_timeo <= 0)
 466			dev->watchdog_timeo = 5*HZ;
 467		if (!mod_timer(&dev->watchdog_timer,
 468			       round_jiffies(jiffies + dev->watchdog_timeo)))
 469			dev_hold(dev);
 
 470	}
 471}
 
 472
 473static void dev_watchdog_up(struct net_device *dev)
 474{
 475	__netdev_watchdog_up(dev);
 476}
 477
 478static void dev_watchdog_down(struct net_device *dev)
 479{
 480	netif_tx_lock_bh(dev);
 481	if (del_timer(&dev->watchdog_timer))
 482		dev_put(dev);
 483	netif_tx_unlock_bh(dev);
 484}
 485
 486/**
 487 *	netif_carrier_on - set carrier
 488 *	@dev: network device
 489 *
 490 * Device has detected acquisition of carrier.
 491 */
 492void netif_carrier_on(struct net_device *dev)
 493{
 494	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 495		if (dev->reg_state == NETREG_UNINITIALIZED)
 496			return;
 497		atomic_inc(&dev->carrier_up_count);
 498		linkwatch_fire_event(dev);
 499		if (netif_running(dev))
 500			__netdev_watchdog_up(dev);
 501	}
 502}
 503EXPORT_SYMBOL(netif_carrier_on);
 504
 505/**
 506 *	netif_carrier_off - clear carrier
 507 *	@dev: network device
 508 *
 509 * Device has detected loss of carrier.
 510 */
 511void netif_carrier_off(struct net_device *dev)
 512{
 513	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 514		if (dev->reg_state == NETREG_UNINITIALIZED)
 515			return;
 516		atomic_inc(&dev->carrier_down_count);
 517		linkwatch_fire_event(dev);
 518	}
 519}
 520EXPORT_SYMBOL(netif_carrier_off);
 521
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
 523   under all circumstances. It is difficult to invent anything faster or
 524   cheaper.
 525 */
 526
 527static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 528			struct sk_buff **to_free)
 529{
 530	__qdisc_drop(skb, to_free);
 531	return NET_XMIT_CN;
 532}
 533
 534static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
 535{
 536	return NULL;
 537}
 538
 539struct Qdisc_ops noop_qdisc_ops __read_mostly = {
 540	.id		=	"noop",
 541	.priv_size	=	0,
 542	.enqueue	=	noop_enqueue,
 543	.dequeue	=	noop_dequeue,
 544	.peek		=	noop_dequeue,
 545	.owner		=	THIS_MODULE,
 546};
 547
 548static struct netdev_queue noop_netdev_queue = {
 549	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
 550	.qdisc_sleeping	=	&noop_qdisc,
 551};
 552
 553struct Qdisc noop_qdisc = {
 554	.enqueue	=	noop_enqueue,
 555	.dequeue	=	noop_dequeue,
 556	.flags		=	TCQ_F_BUILTIN,
 557	.ops		=	&noop_qdisc_ops,
 558	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
 559	.dev_queue	=	&noop_netdev_queue,
 560	.running	=	SEQCNT_ZERO(noop_qdisc.running),
 561	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
 562	.gso_skb = {
 563		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
 564		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
 565		.qlen = 0,
 566		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
 567	},
 568	.skb_bad_txq = {
 569		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 570		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 571		.qlen = 0,
 572		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
 573	},
 574};
 575EXPORT_SYMBOL(noop_qdisc);
 576
 577static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
 578			struct netlink_ext_ack *extack)
 579{
 580	/* register_qdisc() assigns a default of noop_enqueue if unset,
 581	 * but __dev_queue_xmit() treats noqueue only as such
 582	 * if this is NULL - so clear it here. */
 583	qdisc->enqueue = NULL;
 584	return 0;
 585}
 586
 587struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
 588	.id		=	"noqueue",
 589	.priv_size	=	0,
 590	.init		=	noqueue_init,
 591	.enqueue	=	noop_enqueue,
 592	.dequeue	=	noop_dequeue,
 593	.peek		=	noop_dequeue,
 594	.owner		=	THIS_MODULE,
 595};
 596
 597static const u8 prio2band[TC_PRIO_MAX + 1] = {
 598	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
 599};
 
 600
 601/* 3-band FIFO queue: old style, but should be a bit faster than
 602   generic prio+fifo combination.
 603 */
 604
 605#define PFIFO_FAST_BANDS 3
 606
 607/*
 608 * Private data for a pfifo_fast scheduler containing:
 609 *	- rings for priority bands
 610 */
 611struct pfifo_fast_priv {
 612	struct skb_array q[PFIFO_FAST_BANDS];
 613};
 614
 615static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
 616					  int band)
 617{
 618	return &priv->q[band];
 619}
 620
 621static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 622			      struct sk_buff **to_free)
 623{
 624	int band = prio2band[skb->priority & TC_PRIO_MAX];
 625	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 626	struct skb_array *q = band2list(priv, band);
 627	unsigned int pkt_len = qdisc_pkt_len(skb);
 628	int err;
 629
 630	err = skb_array_produce(q, skb);
 631
 632	if (unlikely(err)) {
 633		if (qdisc_is_percpu_stats(qdisc))
 634			return qdisc_drop_cpu(skb, qdisc, to_free);
 635		else
 636			return qdisc_drop(skb, qdisc, to_free);
 637	}
 638
 639	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
 640	return NET_XMIT_SUCCESS;
 641}
 642
 643static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 644{
 645	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 646	struct sk_buff *skb = NULL;
 
 647	int band;
 648
 
 649	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 650		struct skb_array *q = band2list(priv, band);
 651
 652		if (__skb_array_empty(q))
 653			continue;
 654
 655		skb = __skb_array_consume(q);
 656	}
 657	if (likely(skb)) {
 658		qdisc_update_stats_at_dequeue(qdisc, skb);
 659	} else {
 660		qdisc->empty = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661	}
 662
 663	return skb;
 664}
 665
 666static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 667{
 668	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 669	struct sk_buff *skb = NULL;
 670	int band;
 671
 672	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 673		struct skb_array *q = band2list(priv, band);
 674
 675		skb = __skb_array_peek(q);
 676	}
 677
 678	return skb;
 679}
 680
 681static void pfifo_fast_reset(struct Qdisc *qdisc)
 682{
 683	int i, band;
 684	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 685
 686	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
 687		struct skb_array *q = band2list(priv, band);
 688		struct sk_buff *skb;
 689
 690		/* NULL ring is possible if destroy path is due to a failed
 691		 * skb_array_init() in pfifo_fast_init() case.
 692		 */
 693		if (!q->ring.queue)
 694			continue;
 695
 696		while ((skb = __skb_array_consume(q)) != NULL)
 697			kfree_skb(skb);
 698	}
 699
 700	if (qdisc_is_percpu_stats(qdisc)) {
 701		for_each_possible_cpu(i) {
 702			struct gnet_stats_queue *q;
 703
 704			q = per_cpu_ptr(qdisc->cpu_qstats, i);
 705			q->backlog = 0;
 706			q->qlen = 0;
 707		}
 708	}
 709}
 710
 711static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 712{
 713	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 714
 715	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
 716	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
 717		goto nla_put_failure;
 718	return skb->len;
 719
 720nla_put_failure:
 721	return -1;
 722}
 723
 724static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
 725			   struct netlink_ext_ack *extack)
 726{
 727	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
 728	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 729	int prio;
 730
 731	/* guard against zero length rings */
 732	if (!qlen)
 733		return -EINVAL;
 734
 735	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 736		struct skb_array *q = band2list(priv, prio);
 737		int err;
 738
 739		err = skb_array_init(q, qlen, GFP_KERNEL);
 740		if (err)
 741			return -ENOMEM;
 742	}
 743
 744	/* Can by-pass the queue discipline */
 745	qdisc->flags |= TCQ_F_CAN_BYPASS;
 746	return 0;
 747}
 748
 749static void pfifo_fast_destroy(struct Qdisc *sch)
 750{
 751	struct pfifo_fast_priv *priv = qdisc_priv(sch);
 752	int prio;
 753
 754	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 755		struct skb_array *q = band2list(priv, prio);
 756
 757		/* NULL ring is possible if destroy path is due to a failed
 758		 * skb_array_init() in pfifo_fast_init() case.
 759		 */
 760		if (!q->ring.queue)
 761			continue;
 762		/* Destroy ring but no need to kfree_skb because a call to
 763		 * pfifo_fast_reset() has already done that work.
 764		 */
 765		ptr_ring_cleanup(&q->ring, NULL);
 766	}
 767}
 768
 769static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
 770					  unsigned int new_len)
 771{
 772	struct pfifo_fast_priv *priv = qdisc_priv(sch);
 773	struct skb_array *bands[PFIFO_FAST_BANDS];
 774	int prio;
 775
 776	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 777		struct skb_array *q = band2list(priv, prio);
 778
 779		bands[prio] = q;
 780	}
 781
 782	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
 783					 GFP_KERNEL);
 784}
 785
 786struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 787	.id		=	"pfifo_fast",
 788	.priv_size	=	sizeof(struct pfifo_fast_priv),
 789	.enqueue	=	pfifo_fast_enqueue,
 790	.dequeue	=	pfifo_fast_dequeue,
 791	.peek		=	pfifo_fast_peek,
 792	.init		=	pfifo_fast_init,
 793	.destroy	=	pfifo_fast_destroy,
 794	.reset		=	pfifo_fast_reset,
 795	.dump		=	pfifo_fast_dump,
 796	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
 797	.owner		=	THIS_MODULE,
 798	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
 799};
 800EXPORT_SYMBOL(pfifo_fast_ops);
 801
 
 
 802struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 803			  const struct Qdisc_ops *ops,
 804			  struct netlink_ext_ack *extack)
 805{
 806	void *p;
 807	struct Qdisc *sch;
 808	unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
 809	int err = -ENOBUFS;
 810	struct net_device *dev;
 811
 812	if (!dev_queue) {
 813		NL_SET_ERR_MSG(extack, "No device queue given");
 814		err = -EINVAL;
 815		goto errout;
 816	}
 817
 818	dev = dev_queue->dev;
 819	p = kzalloc_node(size, GFP_KERNEL,
 820			 netdev_queue_numa_node_read(dev_queue));
 821
 822	if (!p)
 823		goto errout;
 824	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 825	/* if we got non aligned memory, ask more and do alignment ourself */
 826	if (sch != p) {
 827		kfree(p);
 828		p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
 829				 netdev_queue_numa_node_read(dev_queue));
 830		if (!p)
 831			goto errout;
 832		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 833		sch->padded = (char *) sch - (char *) p;
 834	}
 835	__skb_queue_head_init(&sch->gso_skb);
 836	__skb_queue_head_init(&sch->skb_bad_txq);
 837	qdisc_skb_head_init(&sch->q);
 838	spin_lock_init(&sch->q.lock);
 839
 840	if (ops->static_flags & TCQ_F_CPUSTATS) {
 841		sch->cpu_bstats =
 842			netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
 843		if (!sch->cpu_bstats)
 844			goto errout1;
 845
 846		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 847		if (!sch->cpu_qstats) {
 848			free_percpu(sch->cpu_bstats);
 849			goto errout1;
 850		}
 851	}
 852
 853	spin_lock_init(&sch->busylock);
 
 
 
 854	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
 855	spin_lock_init(&sch->seqlock);
 856	seqcount_init(&sch->running);
 
 857
 858	sch->ops = ops;
 859	sch->flags = ops->static_flags;
 860	sch->enqueue = ops->enqueue;
 861	sch->dequeue = ops->dequeue;
 862	sch->dev_queue = dev_queue;
 863	sch->empty = true;
 864	dev_hold(dev);
 865	refcount_set(&sch->refcnt, 1);
 866
 867	if (sch != &noop_qdisc) {
 868		lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
 869		lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
 870		lockdep_set_class(&sch->running, &dev->qdisc_running_key);
 871	}
 872
 873	return sch;
 874errout1:
 875	kfree(p);
 876errout:
 877	return ERR_PTR(err);
 878}
 879
 880struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 881				const struct Qdisc_ops *ops,
 882				unsigned int parentid,
 883				struct netlink_ext_ack *extack)
 884{
 885	struct Qdisc *sch;
 886
 887	if (!try_module_get(ops->owner)) {
 888		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
 889		return NULL;
 890	}
 891
 892	sch = qdisc_alloc(dev_queue, ops, extack);
 893	if (IS_ERR(sch)) {
 894		module_put(ops->owner);
 895		return NULL;
 896	}
 897	sch->parent = parentid;
 898
 899	if (!ops->init || ops->init(sch, NULL, extack) == 0)
 
 900		return sch;
 
 901
 902	qdisc_put(sch);
 903	return NULL;
 904}
 905EXPORT_SYMBOL(qdisc_create_dflt);
 906
 907/* Under qdisc_lock(qdisc) and BH! */
 908
 909void qdisc_reset(struct Qdisc *qdisc)
 910{
 911	const struct Qdisc_ops *ops = qdisc->ops;
 912	struct sk_buff *skb, *tmp;
 
 913
 914	if (ops->reset)
 915		ops->reset(qdisc);
 916
 917	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
 918		__skb_unlink(skb, &qdisc->gso_skb);
 919		kfree_skb_list(skb);
 920	}
 921
 922	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
 923		__skb_unlink(skb, &qdisc->skb_bad_txq);
 924		kfree_skb_list(skb);
 925	}
 926
 927	qdisc->q.qlen = 0;
 928	qdisc->qstats.backlog = 0;
 929}
 930EXPORT_SYMBOL(qdisc_reset);
 931
 932void qdisc_free(struct Qdisc *qdisc)
 933{
 934	if (qdisc_is_percpu_stats(qdisc)) {
 935		free_percpu(qdisc->cpu_bstats);
 936		free_percpu(qdisc->cpu_qstats);
 937	}
 938
 939	kfree((char *) qdisc - qdisc->padded);
 940}
 941
 942static void qdisc_free_cb(struct rcu_head *head)
 943{
 944	struct Qdisc *q = container_of(head, struct Qdisc, rcu);
 945
 946	qdisc_free(q);
 947}
 948
 949static void qdisc_destroy(struct Qdisc *qdisc)
 950{
 951	const struct Qdisc_ops  *ops = qdisc->ops;
 952	struct sk_buff *skb, *tmp;
 953
 954#ifdef CONFIG_NET_SCHED
 955	qdisc_hash_del(qdisc);
 956
 957	qdisc_put_stab(rtnl_dereference(qdisc->stab));
 958#endif
 959	gen_kill_estimator(&qdisc->rate_est);
 960	if (ops->reset)
 961		ops->reset(qdisc);
 
 
 962	if (ops->destroy)
 963		ops->destroy(qdisc);
 964
 965	module_put(ops->owner);
 966	dev_put(qdisc_dev(qdisc));
 967
 968	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
 969		__skb_unlink(skb, &qdisc->gso_skb);
 970		kfree_skb_list(skb);
 971	}
 972
 973	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
 974		__skb_unlink(skb, &qdisc->skb_bad_txq);
 975		kfree_skb_list(skb);
 976	}
 977
 978	call_rcu(&qdisc->rcu, qdisc_free_cb);
 979}
 980
 
 
 
 
 
 
 
 
 981void qdisc_put(struct Qdisc *qdisc)
 982{
 983	if (!qdisc)
 984		return;
 985
 986	if (qdisc->flags & TCQ_F_BUILTIN ||
 987	    !refcount_dec_and_test(&qdisc->refcnt))
 988		return;
 989
 990	qdisc_destroy(qdisc);
 991}
 992EXPORT_SYMBOL(qdisc_put);
 993
 994/* Version of qdisc_put() that is called with rtnl mutex unlocked.
 995 * Intended to be used as optimization, this function only takes rtnl lock if
 996 * qdisc reference counter reached zero.
 997 */
 998
 999void qdisc_put_unlocked(struct Qdisc *qdisc)
1000{
1001	if (qdisc->flags & TCQ_F_BUILTIN ||
1002	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1003		return;
1004
1005	qdisc_destroy(qdisc);
1006	rtnl_unlock();
1007}
1008EXPORT_SYMBOL(qdisc_put_unlocked);
1009
1010/* Attach toplevel qdisc to device queue. */
1011struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1012			      struct Qdisc *qdisc)
1013{
1014	struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
1015	spinlock_t *root_lock;
1016
1017	root_lock = qdisc_lock(oqdisc);
1018	spin_lock_bh(root_lock);
1019
1020	/* ... and graft new one */
1021	if (qdisc == NULL)
1022		qdisc = &noop_qdisc;
1023	dev_queue->qdisc_sleeping = qdisc;
1024	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1025
1026	spin_unlock_bh(root_lock);
1027
1028	return oqdisc;
1029}
1030EXPORT_SYMBOL(dev_graft_qdisc);
1031
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1032static void attach_one_default_qdisc(struct net_device *dev,
1033				     struct netdev_queue *dev_queue,
1034				     void *_unused)
1035{
1036	struct Qdisc *qdisc;
1037	const struct Qdisc_ops *ops = default_qdisc_ops;
1038
1039	if (dev->priv_flags & IFF_NO_QUEUE)
1040		ops = &noqueue_qdisc_ops;
1041	else if(dev->type == ARPHRD_CAN)
1042		ops = &pfifo_fast_ops;
1043
1044	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1045	if (!qdisc) {
1046		netdev_info(dev, "activation failed\n");
1047		return;
1048	}
1049	if (!netif_is_multiqueue(dev))
1050		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1051	dev_queue->qdisc_sleeping = qdisc;
1052}
1053
1054static void attach_default_qdiscs(struct net_device *dev)
1055{
1056	struct netdev_queue *txq;
1057	struct Qdisc *qdisc;
1058
1059	txq = netdev_get_tx_queue(dev, 0);
1060
1061	if (!netif_is_multiqueue(dev) ||
1062	    dev->priv_flags & IFF_NO_QUEUE) {
1063		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1064		dev->qdisc = txq->qdisc_sleeping;
1065		qdisc_refcount_inc(dev->qdisc);
 
1066	} else {
1067		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1068		if (qdisc) {
1069			dev->qdisc = qdisc;
1070			qdisc->ops->attach(qdisc);
1071		}
1072	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073#ifdef CONFIG_NET_SCHED
1074	if (dev->qdisc != &noop_qdisc)
1075		qdisc_hash_add(dev->qdisc, false);
1076#endif
1077}
1078
1079static void transition_one_qdisc(struct net_device *dev,
1080				 struct netdev_queue *dev_queue,
1081				 void *_need_watchdog)
1082{
1083	struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
1084	int *need_watchdog_p = _need_watchdog;
1085
1086	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1087		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1088
1089	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1090	if (need_watchdog_p) {
1091		dev_queue->trans_start = 0;
1092		*need_watchdog_p = 1;
1093	}
1094}
1095
1096void dev_activate(struct net_device *dev)
1097{
1098	int need_watchdog;
1099
1100	/* No queueing discipline is attached to device;
1101	 * create default one for devices, which need queueing
1102	 * and noqueue_qdisc for virtual interfaces
1103	 */
1104
1105	if (dev->qdisc == &noop_qdisc)
1106		attach_default_qdiscs(dev);
1107
1108	if (!netif_carrier_ok(dev))
1109		/* Delay activation until next carrier-on event */
1110		return;
1111
1112	need_watchdog = 0;
1113	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1114	if (dev_ingress_queue(dev))
1115		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1116
1117	if (need_watchdog) {
1118		netif_trans_update(dev);
1119		dev_watchdog_up(dev);
1120	}
1121}
1122EXPORT_SYMBOL(dev_activate);
1123
 
 
 
 
 
 
 
 
1124static void dev_deactivate_queue(struct net_device *dev,
1125				 struct netdev_queue *dev_queue,
1126				 void *_qdisc_default)
1127{
1128	struct Qdisc *qdisc_default = _qdisc_default;
1129	struct Qdisc *qdisc;
1130
1131	qdisc = rtnl_dereference(dev_queue->qdisc);
1132	if (qdisc) {
1133		bool nolock = qdisc->flags & TCQ_F_NOLOCK;
 
 
 
1134
1135		if (nolock)
1136			spin_lock_bh(&qdisc->seqlock);
1137		spin_lock_bh(qdisc_lock(qdisc));
 
 
 
1138
1139		if (!(qdisc->flags & TCQ_F_BUILTIN))
1140			set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
 
1141
1142		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1143		qdisc_reset(qdisc);
1144
1145		spin_unlock_bh(qdisc_lock(qdisc));
1146		if (nolock)
1147			spin_unlock_bh(&qdisc->seqlock);
 
 
 
 
 
 
 
 
1148	}
1149}
1150
1151static bool some_qdisc_is_busy(struct net_device *dev)
1152{
1153	unsigned int i;
1154
1155	for (i = 0; i < dev->num_tx_queues; i++) {
1156		struct netdev_queue *dev_queue;
1157		spinlock_t *root_lock;
1158		struct Qdisc *q;
1159		int val;
1160
1161		dev_queue = netdev_get_tx_queue(dev, i);
1162		q = dev_queue->qdisc_sleeping;
1163
1164		root_lock = qdisc_lock(q);
1165		spin_lock_bh(root_lock);
1166
1167		val = (qdisc_is_running(q) ||
1168		       test_bit(__QDISC_STATE_SCHED, &q->state));
1169
1170		spin_unlock_bh(root_lock);
1171
1172		if (val)
1173			return true;
1174	}
1175	return false;
1176}
1177
1178static void dev_qdisc_reset(struct net_device *dev,
1179			    struct netdev_queue *dev_queue,
1180			    void *none)
1181{
1182	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1183
1184	if (qdisc)
1185		qdisc_reset(qdisc);
1186}
1187
1188/**
1189 * 	dev_deactivate_many - deactivate transmissions on several devices
1190 * 	@head: list of devices to deactivate
1191 *
1192 *	This function returns only when all outstanding transmissions
1193 *	have completed, unless all devices are in dismantle phase.
1194 */
1195void dev_deactivate_many(struct list_head *head)
1196{
1197	struct net_device *dev;
1198
1199	list_for_each_entry(dev, head, close_list) {
1200		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1201					 &noop_qdisc);
1202		if (dev_ingress_queue(dev))
1203			dev_deactivate_queue(dev, dev_ingress_queue(dev),
1204					     &noop_qdisc);
1205
1206		dev_watchdog_down(dev);
1207	}
1208
1209	/* Wait for outstanding qdisc-less dev_queue_xmit calls.
 
1210	 * This is avoided if all devices are in dismantle phase :
1211	 * Caller will call synchronize_net() for us
1212	 */
1213	synchronize_net();
1214
1215	/* Wait for outstanding qdisc_run calls. */
1216	list_for_each_entry(dev, head, close_list) {
1217		while (some_qdisc_is_busy(dev))
1218			yield();
1219		/* The new qdisc is assigned at this point so we can safely
1220		 * unwind stale skb lists and qdisc statistics
1221		 */
1222		netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
1223		if (dev_ingress_queue(dev))
1224			dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
 
 
 
 
 
 
 
 
 
 
 
1225	}
1226}
1227
1228void dev_deactivate(struct net_device *dev)
1229{
1230	LIST_HEAD(single);
1231
1232	list_add(&dev->close_list, &single);
1233	dev_deactivate_many(&single);
1234	list_del(&single);
1235}
1236EXPORT_SYMBOL(dev_deactivate);
1237
1238static int qdisc_change_tx_queue_len(struct net_device *dev,
1239				     struct netdev_queue *dev_queue)
1240{
1241	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1242	const struct Qdisc_ops *ops = qdisc->ops;
1243
1244	if (ops->change_tx_queue_len)
1245		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1246	return 0;
1247}
1248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1249int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1250{
1251	bool up = dev->flags & IFF_UP;
1252	unsigned int i;
1253	int ret = 0;
1254
1255	if (up)
1256		dev_deactivate(dev);
1257
1258	for (i = 0; i < dev->num_tx_queues; i++) {
1259		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1260
1261		/* TODO: revert changes on a partial failure */
1262		if (ret)
1263			break;
1264	}
1265
1266	if (up)
1267		dev_activate(dev);
1268	return ret;
1269}
1270
1271static void dev_init_scheduler_queue(struct net_device *dev,
1272				     struct netdev_queue *dev_queue,
1273				     void *_qdisc)
1274{
1275	struct Qdisc *qdisc = _qdisc;
1276
1277	rcu_assign_pointer(dev_queue->qdisc, qdisc);
1278	dev_queue->qdisc_sleeping = qdisc;
1279}
1280
1281void dev_init_scheduler(struct net_device *dev)
1282{
1283	dev->qdisc = &noop_qdisc;
1284	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1285	if (dev_ingress_queue(dev))
1286		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1287
1288	timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1289}
1290
1291static void shutdown_scheduler_queue(struct net_device *dev,
1292				     struct netdev_queue *dev_queue,
1293				     void *_qdisc_default)
1294{
1295	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1296	struct Qdisc *qdisc_default = _qdisc_default;
1297
1298	if (qdisc) {
1299		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1300		dev_queue->qdisc_sleeping = qdisc_default;
1301
1302		qdisc_put(qdisc);
1303	}
1304}
1305
1306void dev_shutdown(struct net_device *dev)
1307{
1308	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1309	if (dev_ingress_queue(dev))
1310		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1311	qdisc_put(dev->qdisc);
1312	dev->qdisc = &noop_qdisc;
1313
1314	WARN_ON(timer_pending(&dev->watchdog_timer));
1315}
1316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1317void psched_ratecfg_precompute(struct psched_ratecfg *r,
1318			       const struct tc_ratespec *conf,
1319			       u64 rate64)
1320{
1321	memset(r, 0, sizeof(*r));
1322	r->overhead = conf->overhead;
 
1323	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1324	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1325	r->mult = 1;
1326	/*
1327	 * The deal here is to replace a divide by a reciprocal one
1328	 * in fast path (a reciprocal divide is a multiply and a shift)
1329	 *
1330	 * Normal formula would be :
1331	 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1332	 *
1333	 * We compute mult/shift to use instead :
1334	 *  time_in_ns = (len * mult) >> shift;
1335	 *
1336	 * We try to get the highest possible mult value for accuracy,
1337	 * but have to make sure no overflows will ever happen.
1338	 */
1339	if (r->rate_bytes_ps > 0) {
1340		u64 factor = NSEC_PER_SEC;
1341
1342		for (;;) {
1343			r->mult = div64_u64(factor, r->rate_bytes_ps);
1344			if (r->mult & (1U << 31) || factor & (1ULL << 63))
1345				break;
1346			factor <<= 1;
1347			r->shift++;
1348		}
1349	}
1350}
1351EXPORT_SYMBOL(psched_ratecfg_precompute);
1352
1353static void mini_qdisc_rcu_func(struct rcu_head *head)
1354{
 
 
1355}
 
1356
1357void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1358			  struct tcf_proto *tp_head)
1359{
1360	/* Protected with chain0->filter_chain_lock.
1361	 * Can't access chain directly because tp_head can be NULL.
1362	 */
1363	struct mini_Qdisc *miniq_old =
1364		rcu_dereference_protected(*miniqp->p_miniq, 1);
1365	struct mini_Qdisc *miniq;
1366
1367	if (!tp_head) {
1368		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1369		/* Wait for flying RCU callback before it is freed. */
1370		rcu_barrier();
1371		return;
1372	}
1373
1374	miniq = !miniq_old || miniq_old == &miniqp->miniq2 ?
1375		&miniqp->miniq1 : &miniqp->miniq2;
 
 
 
 
 
 
 
1376
1377	/* We need to make sure that readers won't see the miniq
1378	 * we are about to modify. So wait until previous call_rcu callback
1379	 * is done.
1380	 */
1381	rcu_barrier();
1382	miniq->filter_list = tp_head;
1383	rcu_assign_pointer(*miniqp->p_miniq, miniq);
1384
1385	if (miniq_old)
1386		/* This is counterpart of the rcu barriers above. We need to
1387		 * block potential new user of miniq_old until all readers
1388		 * are not seeing it.
1389		 */
1390		call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func);
1391}
1392EXPORT_SYMBOL(mini_qdisc_pair_swap);
1393
 
 
 
 
 
 
 
 
1394void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1395			  struct mini_Qdisc __rcu **p_miniq)
1396{
1397	miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1398	miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1399	miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1400	miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
 
 
1401	miniqp->p_miniq = p_miniq;
1402}
1403EXPORT_SYMBOL(mini_qdisc_pair_init);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_generic.c	Generic packet scheduler routines.
   4 *
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
   7 *              - Ingress support
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/netdevice.h>
  18#include <linux/skbuff.h>
  19#include <linux/rtnetlink.h>
  20#include <linux/init.h>
  21#include <linux/rcupdate.h>
  22#include <linux/list.h>
  23#include <linux/slab.h>
  24#include <linux/if_vlan.h>
  25#include <linux/skb_array.h>
  26#include <linux/if_macvlan.h>
  27#include <net/sch_generic.h>
  28#include <net/pkt_sched.h>
  29#include <net/dst.h>
  30#include <trace/events/qdisc.h>
  31#include <trace/events/net.h>
  32#include <net/xfrm.h>
  33
  34/* Qdisc to use by default */
  35const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
  36EXPORT_SYMBOL(default_qdisc_ops);
  37
  38static void qdisc_maybe_clear_missed(struct Qdisc *q,
  39				     const struct netdev_queue *txq)
  40{
  41	clear_bit(__QDISC_STATE_MISSED, &q->state);
  42
  43	/* Make sure the below netif_xmit_frozen_or_stopped()
  44	 * checking happens after clearing STATE_MISSED.
  45	 */
  46	smp_mb__after_atomic();
  47
  48	/* Checking netif_xmit_frozen_or_stopped() again to
  49	 * make sure STATE_MISSED is set if the STATE_MISSED
  50	 * set by netif_tx_wake_queue()'s rescheduling of
  51	 * net_tx_action() is cleared by the above clear_bit().
  52	 */
  53	if (!netif_xmit_frozen_or_stopped(txq))
  54		set_bit(__QDISC_STATE_MISSED, &q->state);
  55	else
  56		set_bit(__QDISC_STATE_DRAINING, &q->state);
  57}
  58
  59/* Main transmission queue. */
  60
  61/* Modifications to data participating in scheduling must be protected with
  62 * qdisc_lock(qdisc) spinlock.
  63 *
  64 * The idea is the following:
  65 * - enqueue, dequeue are serialized via qdisc root lock
  66 * - ingress filtering is also serialized via qdisc root lock
  67 * - updates to tree and tree walking are only done under the rtnl mutex.
  68 */
  69
  70#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
  71
  72static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
  73{
  74	const struct netdev_queue *txq = q->dev_queue;
  75	spinlock_t *lock = NULL;
  76	struct sk_buff *skb;
  77
  78	if (q->flags & TCQ_F_NOLOCK) {
  79		lock = qdisc_lock(q);
  80		spin_lock(lock);
  81	}
  82
  83	skb = skb_peek(&q->skb_bad_txq);
  84	if (skb) {
  85		/* check the reason of requeuing without tx lock first */
  86		txq = skb_get_tx_queue(txq->dev, skb);
  87		if (!netif_xmit_frozen_or_stopped(txq)) {
  88			skb = __skb_dequeue(&q->skb_bad_txq);
  89			if (qdisc_is_percpu_stats(q)) {
  90				qdisc_qstats_cpu_backlog_dec(q, skb);
  91				qdisc_qstats_cpu_qlen_dec(q);
  92			} else {
  93				qdisc_qstats_backlog_dec(q, skb);
  94				q->q.qlen--;
  95			}
  96		} else {
  97			skb = SKB_XOFF_MAGIC;
  98			qdisc_maybe_clear_missed(q, txq);
  99		}
 100	}
 101
 102	if (lock)
 103		spin_unlock(lock);
 104
 105	return skb;
 106}
 107
 108static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
 109{
 110	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
 111
 112	if (unlikely(skb))
 113		skb = __skb_dequeue_bad_txq(q);
 114
 115	return skb;
 116}
 117
 118static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
 119					     struct sk_buff *skb)
 120{
 121	spinlock_t *lock = NULL;
 122
 123	if (q->flags & TCQ_F_NOLOCK) {
 124		lock = qdisc_lock(q);
 125		spin_lock(lock);
 126	}
 127
 128	__skb_queue_tail(&q->skb_bad_txq, skb);
 129
 130	if (qdisc_is_percpu_stats(q)) {
 131		qdisc_qstats_cpu_backlog_inc(q, skb);
 132		qdisc_qstats_cpu_qlen_inc(q);
 133	} else {
 134		qdisc_qstats_backlog_inc(q, skb);
 135		q->q.qlen++;
 136	}
 137
 138	if (lock)
 139		spin_unlock(lock);
 140}
 141
 142static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 143{
 144	spinlock_t *lock = NULL;
 145
 146	if (q->flags & TCQ_F_NOLOCK) {
 147		lock = qdisc_lock(q);
 148		spin_lock(lock);
 149	}
 150
 151	while (skb) {
 152		struct sk_buff *next = skb->next;
 153
 154		__skb_queue_tail(&q->gso_skb, skb);
 155
 156		/* it's still part of the queue */
 157		if (qdisc_is_percpu_stats(q)) {
 158			qdisc_qstats_cpu_requeues_inc(q);
 159			qdisc_qstats_cpu_backlog_inc(q, skb);
 160			qdisc_qstats_cpu_qlen_inc(q);
 161		} else {
 162			q->qstats.requeues++;
 163			qdisc_qstats_backlog_inc(q, skb);
 164			q->q.qlen++;
 165		}
 166
 167		skb = next;
 168	}
 169
 170	if (lock) {
 171		spin_unlock(lock);
 172		set_bit(__QDISC_STATE_MISSED, &q->state);
 173	} else {
 174		__netif_schedule(q);
 175	}
 176}
 177
 178static void try_bulk_dequeue_skb(struct Qdisc *q,
 179				 struct sk_buff *skb,
 180				 const struct netdev_queue *txq,
 181				 int *packets)
 182{
 183	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
 184
 185	while (bytelimit > 0) {
 186		struct sk_buff *nskb = q->dequeue(q);
 187
 188		if (!nskb)
 189			break;
 190
 191		bytelimit -= nskb->len; /* covers GSO len */
 192		skb->next = nskb;
 193		skb = nskb;
 194		(*packets)++; /* GSO counts as one pkt */
 195	}
 196	skb_mark_not_on_list(skb);
 197}
 198
 199/* This variant of try_bulk_dequeue_skb() makes sure
 200 * all skbs in the chain are for the same txq
 201 */
 202static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 203				      struct sk_buff *skb,
 204				      int *packets)
 205{
 206	int mapping = skb_get_queue_mapping(skb);
 207	struct sk_buff *nskb;
 208	int cnt = 0;
 209
 210	do {
 211		nskb = q->dequeue(q);
 212		if (!nskb)
 213			break;
 214		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
 215			qdisc_enqueue_skb_bad_txq(q, nskb);
 216			break;
 217		}
 218		skb->next = nskb;
 219		skb = nskb;
 220	} while (++cnt < 8);
 221	(*packets) += cnt;
 222	skb_mark_not_on_list(skb);
 223}
 224
 225/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 226 * A requeued skb (via q->gso_skb) can also be a SKB list.
 227 */
 228static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 229				   int *packets)
 230{
 231	const struct netdev_queue *txq = q->dev_queue;
 232	struct sk_buff *skb = NULL;
 233
 234	*packets = 1;
 235	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
 236		spinlock_t *lock = NULL;
 237
 238		if (q->flags & TCQ_F_NOLOCK) {
 239			lock = qdisc_lock(q);
 240			spin_lock(lock);
 241		}
 242
 243		skb = skb_peek(&q->gso_skb);
 244
 245		/* skb may be null if another cpu pulls gso_skb off in between
 246		 * empty check and lock.
 247		 */
 248		if (!skb) {
 249			if (lock)
 250				spin_unlock(lock);
 251			goto validate;
 252		}
 253
 254		/* skb in gso_skb were already validated */
 255		*validate = false;
 256		if (xfrm_offload(skb))
 257			*validate = true;
 258		/* check the reason of requeuing without tx lock first */
 259		txq = skb_get_tx_queue(txq->dev, skb);
 260		if (!netif_xmit_frozen_or_stopped(txq)) {
 261			skb = __skb_dequeue(&q->gso_skb);
 262			if (qdisc_is_percpu_stats(q)) {
 263				qdisc_qstats_cpu_backlog_dec(q, skb);
 264				qdisc_qstats_cpu_qlen_dec(q);
 265			} else {
 266				qdisc_qstats_backlog_dec(q, skb);
 267				q->q.qlen--;
 268			}
 269		} else {
 270			skb = NULL;
 271			qdisc_maybe_clear_missed(q, txq);
 272		}
 273		if (lock)
 274			spin_unlock(lock);
 275		goto trace;
 276	}
 277validate:
 278	*validate = true;
 279
 280	if ((q->flags & TCQ_F_ONETXQUEUE) &&
 281	    netif_xmit_frozen_or_stopped(txq)) {
 282		qdisc_maybe_clear_missed(q, txq);
 283		return skb;
 284	}
 285
 286	skb = qdisc_dequeue_skb_bad_txq(q);
 287	if (unlikely(skb)) {
 288		if (skb == SKB_XOFF_MAGIC)
 289			return NULL;
 290		goto bulk;
 291	}
 292	skb = q->dequeue(q);
 293	if (skb) {
 294bulk:
 295		if (qdisc_may_bulk(q))
 296			try_bulk_dequeue_skb(q, skb, txq, packets);
 297		else
 298			try_bulk_dequeue_skb_slow(q, skb, packets);
 299	}
 300trace:
 301	trace_qdisc_dequeue(q, txq, *packets, skb);
 302	return skb;
 303}
 304
 305/*
 306 * Transmit possibly several skbs, and handle the return status as
 307 * required. Owning qdisc running bit guarantees that only one CPU
 308 * can execute this function.
 309 *
 310 * Returns to the caller:
 311 *				false  - hardware queue frozen backoff
 312 *				true   - feel free to send more pkts
 313 */
 314bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 315		     struct net_device *dev, struct netdev_queue *txq,
 316		     spinlock_t *root_lock, bool validate)
 317{
 318	int ret = NETDEV_TX_BUSY;
 319	bool again = false;
 320
 321	/* And release qdisc */
 322	if (root_lock)
 323		spin_unlock(root_lock);
 324
 325	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
 326	if (validate)
 327		skb = validate_xmit_skb_list(skb, dev, &again);
 328
 329#ifdef CONFIG_XFRM_OFFLOAD
 330	if (unlikely(again)) {
 331		if (root_lock)
 332			spin_lock(root_lock);
 333
 334		dev_requeue_skb(skb, q);
 335		return false;
 336	}
 337#endif
 338
 339	if (likely(skb)) {
 340		HARD_TX_LOCK(dev, txq, smp_processor_id());
 341		if (!netif_xmit_frozen_or_stopped(txq))
 342			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 343		else
 344			qdisc_maybe_clear_missed(q, txq);
 345
 346		HARD_TX_UNLOCK(dev, txq);
 347	} else {
 348		if (root_lock)
 349			spin_lock(root_lock);
 350		return true;
 351	}
 352
 353	if (root_lock)
 354		spin_lock(root_lock);
 355
 356	if (!dev_xmit_complete(ret)) {
 357		/* Driver returned NETDEV_TX_BUSY - requeue skb */
 358		if (unlikely(ret != NETDEV_TX_BUSY))
 359			net_warn_ratelimited("BUG %s code %d qlen %d\n",
 360					     dev->name, ret, q->q.qlen);
 361
 362		dev_requeue_skb(skb, q);
 363		return false;
 364	}
 365
 366	return true;
 367}
 368
 369/*
 370 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 371 *
 372 * running seqcount guarantees only one CPU can process
 373 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 374 * this queue.
 375 *
 376 *  netif_tx_lock serializes accesses to device driver.
 377 *
 378 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 379 *  if one is grabbed, another must be free.
 380 *
 381 * Note, that this procedure can be called by a watchdog timer
 382 *
 383 * Returns to the caller:
 384 *				0  - queue is empty or throttled.
 385 *				>0 - queue is not empty.
 386 *
 387 */
 388static inline bool qdisc_restart(struct Qdisc *q, int *packets)
 389{
 390	spinlock_t *root_lock = NULL;
 391	struct netdev_queue *txq;
 392	struct net_device *dev;
 393	struct sk_buff *skb;
 394	bool validate;
 395
 396	/* Dequeue packet */
 397	skb = dequeue_skb(q, &validate, packets);
 398	if (unlikely(!skb))
 399		return false;
 400
 401	if (!(q->flags & TCQ_F_NOLOCK))
 402		root_lock = qdisc_lock(q);
 403
 404	dev = qdisc_dev(q);
 405	txq = skb_get_tx_queue(dev, skb);
 406
 407	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
 408}
 409
 410void __qdisc_run(struct Qdisc *q)
 411{
 412	int quota = READ_ONCE(dev_tx_weight);
 413	int packets;
 414
 415	while (qdisc_restart(q, &packets)) {
 
 
 
 
 
 416		quota -= packets;
 417		if (quota <= 0) {
 418			if (q->flags & TCQ_F_NOLOCK)
 419				set_bit(__QDISC_STATE_MISSED, &q->state);
 420			else
 421				__netif_schedule(q);
 422
 423			break;
 424		}
 425	}
 426}
 427
 428unsigned long dev_trans_start(struct net_device *dev)
 429{
 430	unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
 431	unsigned long val;
 432	unsigned int i;
 433
 
 
 
 
 
 434	for (i = 1; i < dev->num_tx_queues; i++) {
 435		val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
 436		if (val && time_after(val, res))
 437			res = val;
 438	}
 439
 440	return res;
 441}
 442EXPORT_SYMBOL(dev_trans_start);
 443
 444static void netif_freeze_queues(struct net_device *dev)
 445{
 446	unsigned int i;
 447	int cpu;
 448
 449	cpu = smp_processor_id();
 450	for (i = 0; i < dev->num_tx_queues; i++) {
 451		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 452
 453		/* We are the only thread of execution doing a
 454		 * freeze, but we have to grab the _xmit_lock in
 455		 * order to synchronize with threads which are in
 456		 * the ->hard_start_xmit() handler and already
 457		 * checked the frozen bit.
 458		 */
 459		__netif_tx_lock(txq, cpu);
 460		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
 461		__netif_tx_unlock(txq);
 462	}
 463}
 464
 465void netif_tx_lock(struct net_device *dev)
 466{
 467	spin_lock(&dev->tx_global_lock);
 468	netif_freeze_queues(dev);
 469}
 470EXPORT_SYMBOL(netif_tx_lock);
 471
 472static void netif_unfreeze_queues(struct net_device *dev)
 473{
 474	unsigned int i;
 475
 476	for (i = 0; i < dev->num_tx_queues; i++) {
 477		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 478
 479		/* No need to grab the _xmit_lock here.  If the
 480		 * queue is not stopped for another reason, we
 481		 * force a schedule.
 482		 */
 483		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
 484		netif_schedule_queue(txq);
 485	}
 486}
 487
 488void netif_tx_unlock(struct net_device *dev)
 489{
 490	netif_unfreeze_queues(dev);
 491	spin_unlock(&dev->tx_global_lock);
 492}
 493EXPORT_SYMBOL(netif_tx_unlock);
 494
 495static void dev_watchdog(struct timer_list *t)
 496{
 497	struct net_device *dev = from_timer(dev, t, watchdog_timer);
 498	bool release = true;
 499
 500	spin_lock(&dev->tx_global_lock);
 501	if (!qdisc_tx_is_noop(dev)) {
 502		if (netif_device_present(dev) &&
 503		    netif_running(dev) &&
 504		    netif_carrier_ok(dev)) {
 505			unsigned int timedout_ms = 0;
 506			unsigned int i;
 507			unsigned long trans_start;
 508
 509			for (i = 0; i < dev->num_tx_queues; i++) {
 510				struct netdev_queue *txq;
 511
 512				txq = netdev_get_tx_queue(dev, i);
 513				trans_start = READ_ONCE(txq->trans_start);
 514				if (netif_xmit_stopped(txq) &&
 515				    time_after(jiffies, (trans_start +
 516							 dev->watchdog_timeo))) {
 517					timedout_ms = jiffies_to_msecs(jiffies - trans_start);
 518					atomic_long_inc(&txq->trans_timeout);
 519					break;
 520				}
 521			}
 522
 523			if (unlikely(timedout_ms)) {
 524				trace_net_dev_xmit_timeout(dev, i);
 525				netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n",
 526					    raw_smp_processor_id(),
 527					    i, timedout_ms);
 528				netif_freeze_queues(dev);
 529				dev->netdev_ops->ndo_tx_timeout(dev, i);
 530				netif_unfreeze_queues(dev);
 531			}
 532			if (!mod_timer(&dev->watchdog_timer,
 533				       round_jiffies(jiffies +
 534						     dev->watchdog_timeo)))
 535				release = false;
 536		}
 537	}
 538	spin_unlock(&dev->tx_global_lock);
 539
 540	if (release)
 541		netdev_put(dev, &dev->watchdog_dev_tracker);
 542}
 543
 544void __netdev_watchdog_up(struct net_device *dev)
 545{
 546	if (dev->netdev_ops->ndo_tx_timeout) {
 547		if (dev->watchdog_timeo <= 0)
 548			dev->watchdog_timeo = 5*HZ;
 549		if (!mod_timer(&dev->watchdog_timer,
 550			       round_jiffies(jiffies + dev->watchdog_timeo)))
 551			netdev_hold(dev, &dev->watchdog_dev_tracker,
 552				    GFP_ATOMIC);
 553	}
 554}
 555EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
 556
 557static void dev_watchdog_up(struct net_device *dev)
 558{
 559	__netdev_watchdog_up(dev);
 560}
 561
 562static void dev_watchdog_down(struct net_device *dev)
 563{
 564	netif_tx_lock_bh(dev);
 565	if (del_timer(&dev->watchdog_timer))
 566		netdev_put(dev, &dev->watchdog_dev_tracker);
 567	netif_tx_unlock_bh(dev);
 568}
 569
 570/**
 571 *	netif_carrier_on - set carrier
 572 *	@dev: network device
 573 *
 574 * Device has detected acquisition of carrier.
 575 */
 576void netif_carrier_on(struct net_device *dev)
 577{
 578	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 579		if (dev->reg_state == NETREG_UNINITIALIZED)
 580			return;
 581		atomic_inc(&dev->carrier_up_count);
 582		linkwatch_fire_event(dev);
 583		if (netif_running(dev))
 584			__netdev_watchdog_up(dev);
 585	}
 586}
 587EXPORT_SYMBOL(netif_carrier_on);
 588
 589/**
 590 *	netif_carrier_off - clear carrier
 591 *	@dev: network device
 592 *
 593 * Device has detected loss of carrier.
 594 */
 595void netif_carrier_off(struct net_device *dev)
 596{
 597	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 598		if (dev->reg_state == NETREG_UNINITIALIZED)
 599			return;
 600		atomic_inc(&dev->carrier_down_count);
 601		linkwatch_fire_event(dev);
 602	}
 603}
 604EXPORT_SYMBOL(netif_carrier_off);
 605
 606/**
 607 *	netif_carrier_event - report carrier state event
 608 *	@dev: network device
 609 *
 610 * Device has detected a carrier event but the carrier state wasn't changed.
 611 * Use in drivers when querying carrier state asynchronously, to avoid missing
 612 * events (link flaps) if link recovers before it's queried.
 613 */
 614void netif_carrier_event(struct net_device *dev)
 615{
 616	if (dev->reg_state == NETREG_UNINITIALIZED)
 617		return;
 618	atomic_inc(&dev->carrier_up_count);
 619	atomic_inc(&dev->carrier_down_count);
 620	linkwatch_fire_event(dev);
 621}
 622EXPORT_SYMBOL_GPL(netif_carrier_event);
 623
 624/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
 625   under all circumstances. It is difficult to invent anything faster or
 626   cheaper.
 627 */
 628
 629static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 630			struct sk_buff **to_free)
 631{
 632	__qdisc_drop(skb, to_free);
 633	return NET_XMIT_CN;
 634}
 635
 636static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
 637{
 638	return NULL;
 639}
 640
 641struct Qdisc_ops noop_qdisc_ops __read_mostly = {
 642	.id		=	"noop",
 643	.priv_size	=	0,
 644	.enqueue	=	noop_enqueue,
 645	.dequeue	=	noop_dequeue,
 646	.peek		=	noop_dequeue,
 647	.owner		=	THIS_MODULE,
 648};
 649
 650static struct netdev_queue noop_netdev_queue = {
 651	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
 652	RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
 653};
 654
 655struct Qdisc noop_qdisc = {
 656	.enqueue	=	noop_enqueue,
 657	.dequeue	=	noop_dequeue,
 658	.flags		=	TCQ_F_BUILTIN,
 659	.ops		=	&noop_qdisc_ops,
 660	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
 661	.dev_queue	=	&noop_netdev_queue,
 
 662	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
 663	.gso_skb = {
 664		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
 665		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
 666		.qlen = 0,
 667		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
 668	},
 669	.skb_bad_txq = {
 670		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 671		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 672		.qlen = 0,
 673		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
 674	},
 675};
 676EXPORT_SYMBOL(noop_qdisc);
 677
 678static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
 679			struct netlink_ext_ack *extack)
 680{
 681	/* register_qdisc() assigns a default of noop_enqueue if unset,
 682	 * but __dev_queue_xmit() treats noqueue only as such
 683	 * if this is NULL - so clear it here. */
 684	qdisc->enqueue = NULL;
 685	return 0;
 686}
 687
 688struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
 689	.id		=	"noqueue",
 690	.priv_size	=	0,
 691	.init		=	noqueue_init,
 692	.enqueue	=	noop_enqueue,
 693	.dequeue	=	noop_dequeue,
 694	.peek		=	noop_dequeue,
 695	.owner		=	THIS_MODULE,
 696};
 697
 698const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = {
 699	1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1
 700};
 701EXPORT_SYMBOL(sch_default_prio2band);
 702
 703/* 3-band FIFO queue: old style, but should be a bit faster than
 704   generic prio+fifo combination.
 705 */
 706
 707#define PFIFO_FAST_BANDS 3
 708
 709/*
 710 * Private data for a pfifo_fast scheduler containing:
 711 *	- rings for priority bands
 712 */
 713struct pfifo_fast_priv {
 714	struct skb_array q[PFIFO_FAST_BANDS];
 715};
 716
 717static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
 718					  int band)
 719{
 720	return &priv->q[band];
 721}
 722
 723static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 724			      struct sk_buff **to_free)
 725{
 726	int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX];
 727	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 728	struct skb_array *q = band2list(priv, band);
 729	unsigned int pkt_len = qdisc_pkt_len(skb);
 730	int err;
 731
 732	err = skb_array_produce(q, skb);
 733
 734	if (unlikely(err)) {
 735		if (qdisc_is_percpu_stats(qdisc))
 736			return qdisc_drop_cpu(skb, qdisc, to_free);
 737		else
 738			return qdisc_drop(skb, qdisc, to_free);
 739	}
 740
 741	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
 742	return NET_XMIT_SUCCESS;
 743}
 744
 745static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 746{
 747	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 748	struct sk_buff *skb = NULL;
 749	bool need_retry = true;
 750	int band;
 751
 752retry:
 753	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 754		struct skb_array *q = band2list(priv, band);
 755
 756		if (__skb_array_empty(q))
 757			continue;
 758
 759		skb = __skb_array_consume(q);
 760	}
 761	if (likely(skb)) {
 762		qdisc_update_stats_at_dequeue(qdisc, skb);
 763	} else if (need_retry &&
 764		   READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
 765		/* Delay clearing the STATE_MISSED here to reduce
 766		 * the overhead of the second spin_trylock() in
 767		 * qdisc_run_begin() and __netif_schedule() calling
 768		 * in qdisc_run_end().
 769		 */
 770		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
 771		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
 772
 773		/* Make sure dequeuing happens after clearing
 774		 * STATE_MISSED.
 775		 */
 776		smp_mb__after_atomic();
 777
 778		need_retry = false;
 779
 780		goto retry;
 781	}
 782
 783	return skb;
 784}
 785
 786static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 787{
 788	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 789	struct sk_buff *skb = NULL;
 790	int band;
 791
 792	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 793		struct skb_array *q = band2list(priv, band);
 794
 795		skb = __skb_array_peek(q);
 796	}
 797
 798	return skb;
 799}
 800
 801static void pfifo_fast_reset(struct Qdisc *qdisc)
 802{
 803	int i, band;
 804	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 805
 806	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
 807		struct skb_array *q = band2list(priv, band);
 808		struct sk_buff *skb;
 809
 810		/* NULL ring is possible if destroy path is due to a failed
 811		 * skb_array_init() in pfifo_fast_init() case.
 812		 */
 813		if (!q->ring.queue)
 814			continue;
 815
 816		while ((skb = __skb_array_consume(q)) != NULL)
 817			kfree_skb(skb);
 818	}
 819
 820	if (qdisc_is_percpu_stats(qdisc)) {
 821		for_each_possible_cpu(i) {
 822			struct gnet_stats_queue *q;
 823
 824			q = per_cpu_ptr(qdisc->cpu_qstats, i);
 825			q->backlog = 0;
 826			q->qlen = 0;
 827		}
 828	}
 829}
 830
 831static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 832{
 833	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 834
 835	memcpy(&opt.priomap, sch_default_prio2band, TC_PRIO_MAX + 1);
 836	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
 837		goto nla_put_failure;
 838	return skb->len;
 839
 840nla_put_failure:
 841	return -1;
 842}
 843
 844static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
 845			   struct netlink_ext_ack *extack)
 846{
 847	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
 848	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 849	int prio;
 850
 851	/* guard against zero length rings */
 852	if (!qlen)
 853		return -EINVAL;
 854
 855	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 856		struct skb_array *q = band2list(priv, prio);
 857		int err;
 858
 859		err = skb_array_init(q, qlen, GFP_KERNEL);
 860		if (err)
 861			return -ENOMEM;
 862	}
 863
 864	/* Can by-pass the queue discipline */
 865	qdisc->flags |= TCQ_F_CAN_BYPASS;
 866	return 0;
 867}
 868
 869static void pfifo_fast_destroy(struct Qdisc *sch)
 870{
 871	struct pfifo_fast_priv *priv = qdisc_priv(sch);
 872	int prio;
 873
 874	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 875		struct skb_array *q = band2list(priv, prio);
 876
 877		/* NULL ring is possible if destroy path is due to a failed
 878		 * skb_array_init() in pfifo_fast_init() case.
 879		 */
 880		if (!q->ring.queue)
 881			continue;
 882		/* Destroy ring but no need to kfree_skb because a call to
 883		 * pfifo_fast_reset() has already done that work.
 884		 */
 885		ptr_ring_cleanup(&q->ring, NULL);
 886	}
 887}
 888
 889static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
 890					  unsigned int new_len)
 891{
 892	struct pfifo_fast_priv *priv = qdisc_priv(sch);
 893	struct skb_array *bands[PFIFO_FAST_BANDS];
 894	int prio;
 895
 896	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 897		struct skb_array *q = band2list(priv, prio);
 898
 899		bands[prio] = q;
 900	}
 901
 902	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
 903					 GFP_KERNEL);
 904}
 905
 906struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 907	.id		=	"pfifo_fast",
 908	.priv_size	=	sizeof(struct pfifo_fast_priv),
 909	.enqueue	=	pfifo_fast_enqueue,
 910	.dequeue	=	pfifo_fast_dequeue,
 911	.peek		=	pfifo_fast_peek,
 912	.init		=	pfifo_fast_init,
 913	.destroy	=	pfifo_fast_destroy,
 914	.reset		=	pfifo_fast_reset,
 915	.dump		=	pfifo_fast_dump,
 916	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
 917	.owner		=	THIS_MODULE,
 918	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
 919};
 920EXPORT_SYMBOL(pfifo_fast_ops);
 921
 922static struct lock_class_key qdisc_tx_busylock;
 923
 924struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 925			  const struct Qdisc_ops *ops,
 926			  struct netlink_ext_ack *extack)
 927{
 
 928	struct Qdisc *sch;
 929	unsigned int size = sizeof(*sch) + ops->priv_size;
 930	int err = -ENOBUFS;
 931	struct net_device *dev;
 932
 933	if (!dev_queue) {
 934		NL_SET_ERR_MSG(extack, "No device queue given");
 935		err = -EINVAL;
 936		goto errout;
 937	}
 938
 939	dev = dev_queue->dev;
 940	sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
 
 941
 942	if (!sch)
 943		goto errout;
 
 
 
 
 
 
 
 
 
 
 
 944	__skb_queue_head_init(&sch->gso_skb);
 945	__skb_queue_head_init(&sch->skb_bad_txq);
 946	gnet_stats_basic_sync_init(&sch->bstats);
 947	spin_lock_init(&sch->q.lock);
 948
 949	if (ops->static_flags & TCQ_F_CPUSTATS) {
 950		sch->cpu_bstats =
 951			netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
 952		if (!sch->cpu_bstats)
 953			goto errout1;
 954
 955		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 956		if (!sch->cpu_qstats) {
 957			free_percpu(sch->cpu_bstats);
 958			goto errout1;
 959		}
 960	}
 961
 962	spin_lock_init(&sch->busylock);
 963	lockdep_set_class(&sch->busylock,
 964			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 965
 966	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
 967	spin_lock_init(&sch->seqlock);
 968	lockdep_set_class(&sch->seqlock,
 969			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 970
 971	sch->ops = ops;
 972	sch->flags = ops->static_flags;
 973	sch->enqueue = ops->enqueue;
 974	sch->dequeue = ops->dequeue;
 975	sch->dev_queue = dev_queue;
 976	netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
 
 977	refcount_set(&sch->refcnt, 1);
 978
 
 
 
 
 
 
 979	return sch;
 980errout1:
 981	kfree(sch);
 982errout:
 983	return ERR_PTR(err);
 984}
 985
 986struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 987				const struct Qdisc_ops *ops,
 988				unsigned int parentid,
 989				struct netlink_ext_ack *extack)
 990{
 991	struct Qdisc *sch;
 992
 993	if (!try_module_get(ops->owner)) {
 994		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
 995		return NULL;
 996	}
 997
 998	sch = qdisc_alloc(dev_queue, ops, extack);
 999	if (IS_ERR(sch)) {
1000		module_put(ops->owner);
1001		return NULL;
1002	}
1003	sch->parent = parentid;
1004
1005	if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1006		trace_qdisc_create(ops, dev_queue->dev, parentid);
1007		return sch;
1008	}
1009
1010	qdisc_put(sch);
1011	return NULL;
1012}
1013EXPORT_SYMBOL(qdisc_create_dflt);
1014
1015/* Under qdisc_lock(qdisc) and BH! */
1016
1017void qdisc_reset(struct Qdisc *qdisc)
1018{
1019	const struct Qdisc_ops *ops = qdisc->ops;
1020
1021	trace_qdisc_reset(qdisc);
1022
1023	if (ops->reset)
1024		ops->reset(qdisc);
1025
1026	__skb_queue_purge(&qdisc->gso_skb);
1027	__skb_queue_purge(&qdisc->skb_bad_txq);
 
 
 
 
 
 
 
1028
1029	qdisc->q.qlen = 0;
1030	qdisc->qstats.backlog = 0;
1031}
1032EXPORT_SYMBOL(qdisc_reset);
1033
1034void qdisc_free(struct Qdisc *qdisc)
1035{
1036	if (qdisc_is_percpu_stats(qdisc)) {
1037		free_percpu(qdisc->cpu_bstats);
1038		free_percpu(qdisc->cpu_qstats);
1039	}
1040
1041	kfree(qdisc);
1042}
1043
1044static void qdisc_free_cb(struct rcu_head *head)
1045{
1046	struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1047
1048	qdisc_free(q);
1049}
1050
1051static void __qdisc_destroy(struct Qdisc *qdisc)
1052{
1053	const struct Qdisc_ops  *ops = qdisc->ops;
1054	struct net_device *dev = qdisc_dev(qdisc);
1055
1056#ifdef CONFIG_NET_SCHED
1057	qdisc_hash_del(qdisc);
1058
1059	qdisc_put_stab(rtnl_dereference(qdisc->stab));
1060#endif
1061	gen_kill_estimator(&qdisc->rate_est);
1062
1063	qdisc_reset(qdisc);
1064
1065
1066	if (ops->destroy)
1067		ops->destroy(qdisc);
1068
1069	module_put(ops->owner);
1070	netdev_put(dev, &qdisc->dev_tracker);
1071
1072	trace_qdisc_destroy(qdisc);
 
 
 
 
 
 
 
 
1073
1074	call_rcu(&qdisc->rcu, qdisc_free_cb);
1075}
1076
1077void qdisc_destroy(struct Qdisc *qdisc)
1078{
1079	if (qdisc->flags & TCQ_F_BUILTIN)
1080		return;
1081
1082	__qdisc_destroy(qdisc);
1083}
1084
1085void qdisc_put(struct Qdisc *qdisc)
1086{
1087	if (!qdisc)
1088		return;
1089
1090	if (qdisc->flags & TCQ_F_BUILTIN ||
1091	    !refcount_dec_and_test(&qdisc->refcnt))
1092		return;
1093
1094	__qdisc_destroy(qdisc);
1095}
1096EXPORT_SYMBOL(qdisc_put);
1097
1098/* Version of qdisc_put() that is called with rtnl mutex unlocked.
1099 * Intended to be used as optimization, this function only takes rtnl lock if
1100 * qdisc reference counter reached zero.
1101 */
1102
1103void qdisc_put_unlocked(struct Qdisc *qdisc)
1104{
1105	if (qdisc->flags & TCQ_F_BUILTIN ||
1106	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1107		return;
1108
1109	__qdisc_destroy(qdisc);
1110	rtnl_unlock();
1111}
1112EXPORT_SYMBOL(qdisc_put_unlocked);
1113
1114/* Attach toplevel qdisc to device queue. */
1115struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1116			      struct Qdisc *qdisc)
1117{
1118	struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1119	spinlock_t *root_lock;
1120
1121	root_lock = qdisc_lock(oqdisc);
1122	spin_lock_bh(root_lock);
1123
1124	/* ... and graft new one */
1125	if (qdisc == NULL)
1126		qdisc = &noop_qdisc;
1127	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1128	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1129
1130	spin_unlock_bh(root_lock);
1131
1132	return oqdisc;
1133}
1134EXPORT_SYMBOL(dev_graft_qdisc);
1135
1136static void shutdown_scheduler_queue(struct net_device *dev,
1137				     struct netdev_queue *dev_queue,
1138				     void *_qdisc_default)
1139{
1140	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1141	struct Qdisc *qdisc_default = _qdisc_default;
1142
1143	if (qdisc) {
1144		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1145		rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
1146
1147		qdisc_put(qdisc);
1148	}
1149}
1150
1151static void attach_one_default_qdisc(struct net_device *dev,
1152				     struct netdev_queue *dev_queue,
1153				     void *_unused)
1154{
1155	struct Qdisc *qdisc;
1156	const struct Qdisc_ops *ops = default_qdisc_ops;
1157
1158	if (dev->priv_flags & IFF_NO_QUEUE)
1159		ops = &noqueue_qdisc_ops;
1160	else if(dev->type == ARPHRD_CAN)
1161		ops = &pfifo_fast_ops;
1162
1163	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1164	if (!qdisc)
 
1165		return;
1166
1167	if (!netif_is_multiqueue(dev))
1168		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1169	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1170}
1171
1172static void attach_default_qdiscs(struct net_device *dev)
1173{
1174	struct netdev_queue *txq;
1175	struct Qdisc *qdisc;
1176
1177	txq = netdev_get_tx_queue(dev, 0);
1178
1179	if (!netif_is_multiqueue(dev) ||
1180	    dev->priv_flags & IFF_NO_QUEUE) {
1181		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1182		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1183		rcu_assign_pointer(dev->qdisc, qdisc);
1184		qdisc_refcount_inc(qdisc);
1185	} else {
1186		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1187		if (qdisc) {
1188			rcu_assign_pointer(dev->qdisc, qdisc);
1189			qdisc->ops->attach(qdisc);
1190		}
1191	}
1192	qdisc = rtnl_dereference(dev->qdisc);
1193
1194	/* Detect default qdisc setup/init failed and fallback to "noqueue" */
1195	if (qdisc == &noop_qdisc) {
1196		netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1197			    default_qdisc_ops->id, noqueue_qdisc_ops.id);
1198		netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1199		dev->priv_flags |= IFF_NO_QUEUE;
1200		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1201		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1202		rcu_assign_pointer(dev->qdisc, qdisc);
1203		qdisc_refcount_inc(qdisc);
1204		dev->priv_flags ^= IFF_NO_QUEUE;
1205	}
1206
1207#ifdef CONFIG_NET_SCHED
1208	if (qdisc != &noop_qdisc)
1209		qdisc_hash_add(qdisc, false);
1210#endif
1211}
1212
1213static void transition_one_qdisc(struct net_device *dev,
1214				 struct netdev_queue *dev_queue,
1215				 void *_need_watchdog)
1216{
1217	struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1218	int *need_watchdog_p = _need_watchdog;
1219
1220	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1221		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1222
1223	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1224	if (need_watchdog_p) {
1225		WRITE_ONCE(dev_queue->trans_start, 0);
1226		*need_watchdog_p = 1;
1227	}
1228}
1229
1230void dev_activate(struct net_device *dev)
1231{
1232	int need_watchdog;
1233
1234	/* No queueing discipline is attached to device;
1235	 * create default one for devices, which need queueing
1236	 * and noqueue_qdisc for virtual interfaces
1237	 */
1238
1239	if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
1240		attach_default_qdiscs(dev);
1241
1242	if (!netif_carrier_ok(dev))
1243		/* Delay activation until next carrier-on event */
1244		return;
1245
1246	need_watchdog = 0;
1247	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1248	if (dev_ingress_queue(dev))
1249		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1250
1251	if (need_watchdog) {
1252		netif_trans_update(dev);
1253		dev_watchdog_up(dev);
1254	}
1255}
1256EXPORT_SYMBOL(dev_activate);
1257
1258static void qdisc_deactivate(struct Qdisc *qdisc)
1259{
1260	if (qdisc->flags & TCQ_F_BUILTIN)
1261		return;
1262
1263	set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1264}
1265
1266static void dev_deactivate_queue(struct net_device *dev,
1267				 struct netdev_queue *dev_queue,
1268				 void *_qdisc_default)
1269{
1270	struct Qdisc *qdisc_default = _qdisc_default;
1271	struct Qdisc *qdisc;
1272
1273	qdisc = rtnl_dereference(dev_queue->qdisc);
1274	if (qdisc) {
1275		qdisc_deactivate(qdisc);
1276		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1277	}
1278}
1279
1280static void dev_reset_queue(struct net_device *dev,
1281			    struct netdev_queue *dev_queue,
1282			    void *_unused)
1283{
1284	struct Qdisc *qdisc;
1285	bool nolock;
1286
1287	qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1288	if (!qdisc)
1289		return;
1290
1291	nolock = qdisc->flags & TCQ_F_NOLOCK;
 
1292
1293	if (nolock)
1294		spin_lock_bh(&qdisc->seqlock);
1295	spin_lock_bh(qdisc_lock(qdisc));
1296
1297	qdisc_reset(qdisc);
1298
1299	spin_unlock_bh(qdisc_lock(qdisc));
1300	if (nolock) {
1301		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
1302		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
1303		spin_unlock_bh(&qdisc->seqlock);
1304	}
1305}
1306
1307static bool some_qdisc_is_busy(struct net_device *dev)
1308{
1309	unsigned int i;
1310
1311	for (i = 0; i < dev->num_tx_queues; i++) {
1312		struct netdev_queue *dev_queue;
1313		spinlock_t *root_lock;
1314		struct Qdisc *q;
1315		int val;
1316
1317		dev_queue = netdev_get_tx_queue(dev, i);
1318		q = rtnl_dereference(dev_queue->qdisc_sleeping);
1319
1320		root_lock = qdisc_lock(q);
1321		spin_lock_bh(root_lock);
1322
1323		val = (qdisc_is_running(q) ||
1324		       test_bit(__QDISC_STATE_SCHED, &q->state));
1325
1326		spin_unlock_bh(root_lock);
1327
1328		if (val)
1329			return true;
1330	}
1331	return false;
1332}
1333
 
 
 
 
 
 
 
 
 
 
1334/**
1335 * 	dev_deactivate_many - deactivate transmissions on several devices
1336 * 	@head: list of devices to deactivate
1337 *
1338 *	This function returns only when all outstanding transmissions
1339 *	have completed, unless all devices are in dismantle phase.
1340 */
1341void dev_deactivate_many(struct list_head *head)
1342{
1343	struct net_device *dev;
1344
1345	list_for_each_entry(dev, head, close_list) {
1346		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1347					 &noop_qdisc);
1348		if (dev_ingress_queue(dev))
1349			dev_deactivate_queue(dev, dev_ingress_queue(dev),
1350					     &noop_qdisc);
1351
1352		dev_watchdog_down(dev);
1353	}
1354
1355	/* Wait for outstanding qdisc-less dev_queue_xmit calls or
1356	 * outstanding qdisc enqueuing calls.
1357	 * This is avoided if all devices are in dismantle phase :
1358	 * Caller will call synchronize_net() for us
1359	 */
1360	synchronize_net();
1361
 
1362	list_for_each_entry(dev, head, close_list) {
1363		netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1364
 
 
 
 
1365		if (dev_ingress_queue(dev))
1366			dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1367	}
1368
1369	/* Wait for outstanding qdisc_run calls. */
1370	list_for_each_entry(dev, head, close_list) {
1371		while (some_qdisc_is_busy(dev)) {
1372			/* wait_event() would avoid this sleep-loop but would
1373			 * require expensive checks in the fast paths of packet
1374			 * processing which isn't worth it.
1375			 */
1376			schedule_timeout_uninterruptible(1);
1377		}
1378	}
1379}
1380
1381void dev_deactivate(struct net_device *dev)
1382{
1383	LIST_HEAD(single);
1384
1385	list_add(&dev->close_list, &single);
1386	dev_deactivate_many(&single);
1387	list_del(&single);
1388}
1389EXPORT_SYMBOL(dev_deactivate);
1390
1391static int qdisc_change_tx_queue_len(struct net_device *dev,
1392				     struct netdev_queue *dev_queue)
1393{
1394	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1395	const struct Qdisc_ops *ops = qdisc->ops;
1396
1397	if (ops->change_tx_queue_len)
1398		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1399	return 0;
1400}
1401
1402void dev_qdisc_change_real_num_tx(struct net_device *dev,
1403				  unsigned int new_real_tx)
1404{
1405	struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
1406
1407	if (qdisc->ops->change_real_num_tx)
1408		qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1409}
1410
1411void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1412{
1413#ifdef CONFIG_NET_SCHED
1414	struct net_device *dev = qdisc_dev(sch);
1415	struct Qdisc *qdisc;
1416	unsigned int i;
1417
1418	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
1419		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1420		/* Only update the default qdiscs we created,
1421		 * qdiscs with handles are always hashed.
1422		 */
1423		if (qdisc != &noop_qdisc && !qdisc->handle)
1424			qdisc_hash_del(qdisc);
1425	}
1426	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
1427		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1428		if (qdisc != &noop_qdisc && !qdisc->handle)
1429			qdisc_hash_add(qdisc, false);
1430	}
1431#endif
1432}
1433EXPORT_SYMBOL(mq_change_real_num_tx);
1434
1435int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1436{
1437	bool up = dev->flags & IFF_UP;
1438	unsigned int i;
1439	int ret = 0;
1440
1441	if (up)
1442		dev_deactivate(dev);
1443
1444	for (i = 0; i < dev->num_tx_queues; i++) {
1445		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1446
1447		/* TODO: revert changes on a partial failure */
1448		if (ret)
1449			break;
1450	}
1451
1452	if (up)
1453		dev_activate(dev);
1454	return ret;
1455}
1456
1457static void dev_init_scheduler_queue(struct net_device *dev,
1458				     struct netdev_queue *dev_queue,
1459				     void *_qdisc)
1460{
1461	struct Qdisc *qdisc = _qdisc;
1462
1463	rcu_assign_pointer(dev_queue->qdisc, qdisc);
1464	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1465}
1466
1467void dev_init_scheduler(struct net_device *dev)
1468{
1469	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1470	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1471	if (dev_ingress_queue(dev))
1472		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1473
1474	timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1475}
1476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477void dev_shutdown(struct net_device *dev)
1478{
1479	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1480	if (dev_ingress_queue(dev))
1481		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1482	qdisc_put(rtnl_dereference(dev->qdisc));
1483	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1484
1485	WARN_ON(timer_pending(&dev->watchdog_timer));
1486}
1487
1488/**
1489 * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1490 * @rate:   Rate to compute reciprocal division values of
1491 * @mult:   Multiplier for reciprocal division
1492 * @shift:  Shift for reciprocal division
1493 *
1494 * The multiplier and shift for reciprocal division by rate are stored
1495 * in mult and shift.
1496 *
1497 * The deal here is to replace a divide by a reciprocal one
1498 * in fast path (a reciprocal divide is a multiply and a shift)
1499 *
1500 * Normal formula would be :
1501 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1502 *
1503 * We compute mult/shift to use instead :
1504 *  time_in_ns = (len * mult) >> shift;
1505 *
1506 * We try to get the highest possible mult value for accuracy,
1507 * but have to make sure no overflows will ever happen.
1508 *
1509 * reciprocal_value() is not used here it doesn't handle 64-bit values.
1510 */
1511static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1512{
1513	u64 factor = NSEC_PER_SEC;
1514
1515	*mult = 1;
1516	*shift = 0;
1517
1518	if (rate <= 0)
1519		return;
1520
1521	for (;;) {
1522		*mult = div64_u64(factor, rate);
1523		if (*mult & (1U << 31) || factor & (1ULL << 63))
1524			break;
1525		factor <<= 1;
1526		(*shift)++;
1527	}
1528}
1529
1530void psched_ratecfg_precompute(struct psched_ratecfg *r,
1531			       const struct tc_ratespec *conf,
1532			       u64 rate64)
1533{
1534	memset(r, 0, sizeof(*r));
1535	r->overhead = conf->overhead;
1536	r->mpu = conf->mpu;
1537	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1538	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1539	psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1540}
1541EXPORT_SYMBOL(psched_ratecfg_precompute);
1542
1543void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1544{
1545	r->rate_pkts_ps = pktrate64;
1546	psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1547}
1548EXPORT_SYMBOL(psched_ppscfg_precompute);
1549
1550void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1551			  struct tcf_proto *tp_head)
1552{
1553	/* Protected with chain0->filter_chain_lock.
1554	 * Can't access chain directly because tp_head can be NULL.
1555	 */
1556	struct mini_Qdisc *miniq_old =
1557		rcu_dereference_protected(*miniqp->p_miniq, 1);
1558	struct mini_Qdisc *miniq;
1559
1560	if (!tp_head) {
1561		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1562	} else {
1563		miniq = miniq_old != &miniqp->miniq1 ?
1564			&miniqp->miniq1 : &miniqp->miniq2;
 
1565
1566		/* We need to make sure that readers won't see the miniq
1567		 * we are about to modify. So ensure that at least one RCU
1568		 * grace period has elapsed since the miniq was made
1569		 * inactive.
1570		 */
1571		if (IS_ENABLED(CONFIG_PREEMPT_RT))
1572			cond_synchronize_rcu(miniq->rcu_state);
1573		else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1574			synchronize_rcu_expedited();
1575
1576		miniq->filter_list = tp_head;
1577		rcu_assign_pointer(*miniqp->p_miniq, miniq);
1578	}
 
 
 
 
1579
1580	if (miniq_old)
1581		/* This is counterpart of the rcu sync above. We need to
1582		 * block potential new user of miniq_old until all readers
1583		 * are not seeing it.
1584		 */
1585		miniq_old->rcu_state = start_poll_synchronize_rcu();
1586}
1587EXPORT_SYMBOL(mini_qdisc_pair_swap);
1588
1589void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1590				struct tcf_block *block)
1591{
1592	miniqp->miniq1.block = block;
1593	miniqp->miniq2.block = block;
1594}
1595EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1596
1597void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1598			  struct mini_Qdisc __rcu **p_miniq)
1599{
1600	miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1601	miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1602	miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1603	miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1604	miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1605	miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
1606	miniqp->p_miniq = p_miniq;
1607}
1608EXPORT_SYMBOL(mini_qdisc_pair_init);