Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_generic.c	Generic packet scheduler routines.
   4 *
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
   7 *              - Ingress support
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/netdevice.h>
  18#include <linux/skbuff.h>
  19#include <linux/rtnetlink.h>
  20#include <linux/init.h>
  21#include <linux/rcupdate.h>
  22#include <linux/list.h>
  23#include <linux/slab.h>
  24#include <linux/if_vlan.h>
  25#include <linux/skb_array.h>
  26#include <linux/if_macvlan.h>
  27#include <net/sch_generic.h>
  28#include <net/pkt_sched.h>
  29#include <net/dst.h>
 
  30#include <trace/events/qdisc.h>
  31#include <trace/events/net.h>
  32#include <net/xfrm.h>
  33
  34/* Qdisc to use by default */
  35const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
  36EXPORT_SYMBOL(default_qdisc_ops);
  37
  38static void qdisc_maybe_clear_missed(struct Qdisc *q,
  39				     const struct netdev_queue *txq)
  40{
  41	clear_bit(__QDISC_STATE_MISSED, &q->state);
  42
  43	/* Make sure the below netif_xmit_frozen_or_stopped()
  44	 * checking happens after clearing STATE_MISSED.
  45	 */
  46	smp_mb__after_atomic();
  47
  48	/* Checking netif_xmit_frozen_or_stopped() again to
  49	 * make sure STATE_MISSED is set if the STATE_MISSED
  50	 * set by netif_tx_wake_queue()'s rescheduling of
  51	 * net_tx_action() is cleared by the above clear_bit().
  52	 */
  53	if (!netif_xmit_frozen_or_stopped(txq))
  54		set_bit(__QDISC_STATE_MISSED, &q->state);
  55	else
  56		set_bit(__QDISC_STATE_DRAINING, &q->state);
  57}
  58
  59/* Main transmission queue. */
  60
  61/* Modifications to data participating in scheduling must be protected with
  62 * qdisc_lock(qdisc) spinlock.
  63 *
  64 * The idea is the following:
  65 * - enqueue, dequeue are serialized via qdisc root lock
  66 * - ingress filtering is also serialized via qdisc root lock
  67 * - updates to tree and tree walking are only done under the rtnl mutex.
  68 */
  69
  70#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
  71
  72static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
  73{
  74	const struct netdev_queue *txq = q->dev_queue;
  75	spinlock_t *lock = NULL;
  76	struct sk_buff *skb;
  77
  78	if (q->flags & TCQ_F_NOLOCK) {
  79		lock = qdisc_lock(q);
  80		spin_lock(lock);
  81	}
  82
  83	skb = skb_peek(&q->skb_bad_txq);
  84	if (skb) {
  85		/* check the reason of requeuing without tx lock first */
  86		txq = skb_get_tx_queue(txq->dev, skb);
  87		if (!netif_xmit_frozen_or_stopped(txq)) {
  88			skb = __skb_dequeue(&q->skb_bad_txq);
  89			if (qdisc_is_percpu_stats(q)) {
  90				qdisc_qstats_cpu_backlog_dec(q, skb);
  91				qdisc_qstats_cpu_qlen_dec(q);
  92			} else {
  93				qdisc_qstats_backlog_dec(q, skb);
  94				q->q.qlen--;
  95			}
  96		} else {
  97			skb = SKB_XOFF_MAGIC;
  98			qdisc_maybe_clear_missed(q, txq);
  99		}
 100	}
 101
 102	if (lock)
 103		spin_unlock(lock);
 104
 105	return skb;
 106}
 107
 108static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
 109{
 110	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
 111
 112	if (unlikely(skb))
 113		skb = __skb_dequeue_bad_txq(q);
 114
 115	return skb;
 116}
 117
 118static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
 119					     struct sk_buff *skb)
 120{
 121	spinlock_t *lock = NULL;
 122
 123	if (q->flags & TCQ_F_NOLOCK) {
 124		lock = qdisc_lock(q);
 125		spin_lock(lock);
 126	}
 127
 128	__skb_queue_tail(&q->skb_bad_txq, skb);
 129
 130	if (qdisc_is_percpu_stats(q)) {
 131		qdisc_qstats_cpu_backlog_inc(q, skb);
 132		qdisc_qstats_cpu_qlen_inc(q);
 133	} else {
 134		qdisc_qstats_backlog_inc(q, skb);
 135		q->q.qlen++;
 136	}
 137
 138	if (lock)
 139		spin_unlock(lock);
 140}
 141
 142static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 143{
 144	spinlock_t *lock = NULL;
 145
 146	if (q->flags & TCQ_F_NOLOCK) {
 147		lock = qdisc_lock(q);
 148		spin_lock(lock);
 149	}
 150
 151	while (skb) {
 152		struct sk_buff *next = skb->next;
 153
 154		__skb_queue_tail(&q->gso_skb, skb);
 155
 156		/* it's still part of the queue */
 157		if (qdisc_is_percpu_stats(q)) {
 158			qdisc_qstats_cpu_requeues_inc(q);
 159			qdisc_qstats_cpu_backlog_inc(q, skb);
 160			qdisc_qstats_cpu_qlen_inc(q);
 161		} else {
 162			q->qstats.requeues++;
 163			qdisc_qstats_backlog_inc(q, skb);
 164			q->q.qlen++;
 165		}
 166
 167		skb = next;
 168	}
 169
 170	if (lock) {
 171		spin_unlock(lock);
 172		set_bit(__QDISC_STATE_MISSED, &q->state);
 173	} else {
 174		__netif_schedule(q);
 175	}
 176}
 177
 178static void try_bulk_dequeue_skb(struct Qdisc *q,
 179				 struct sk_buff *skb,
 180				 const struct netdev_queue *txq,
 181				 int *packets)
 182{
 183	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
 184
 185	while (bytelimit > 0) {
 186		struct sk_buff *nskb = q->dequeue(q);
 187
 188		if (!nskb)
 189			break;
 190
 191		bytelimit -= nskb->len; /* covers GSO len */
 192		skb->next = nskb;
 193		skb = nskb;
 194		(*packets)++; /* GSO counts as one pkt */
 195	}
 196	skb_mark_not_on_list(skb);
 197}
 198
 199/* This variant of try_bulk_dequeue_skb() makes sure
 200 * all skbs in the chain are for the same txq
 201 */
 202static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 203				      struct sk_buff *skb,
 204				      int *packets)
 205{
 206	int mapping = skb_get_queue_mapping(skb);
 207	struct sk_buff *nskb;
 208	int cnt = 0;
 209
 210	do {
 211		nskb = q->dequeue(q);
 212		if (!nskb)
 213			break;
 214		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
 215			qdisc_enqueue_skb_bad_txq(q, nskb);
 216			break;
 217		}
 218		skb->next = nskb;
 219		skb = nskb;
 220	} while (++cnt < 8);
 221	(*packets) += cnt;
 222	skb_mark_not_on_list(skb);
 223}
 224
 225/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 226 * A requeued skb (via q->gso_skb) can also be a SKB list.
 227 */
 228static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 229				   int *packets)
 230{
 231	const struct netdev_queue *txq = q->dev_queue;
 232	struct sk_buff *skb = NULL;
 233
 234	*packets = 1;
 235	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
 236		spinlock_t *lock = NULL;
 237
 238		if (q->flags & TCQ_F_NOLOCK) {
 239			lock = qdisc_lock(q);
 240			spin_lock(lock);
 241		}
 242
 243		skb = skb_peek(&q->gso_skb);
 244
 245		/* skb may be null if another cpu pulls gso_skb off in between
 246		 * empty check and lock.
 247		 */
 248		if (!skb) {
 249			if (lock)
 250				spin_unlock(lock);
 251			goto validate;
 252		}
 253
 254		/* skb in gso_skb were already validated */
 255		*validate = false;
 256		if (xfrm_offload(skb))
 257			*validate = true;
 258		/* check the reason of requeuing without tx lock first */
 259		txq = skb_get_tx_queue(txq->dev, skb);
 260		if (!netif_xmit_frozen_or_stopped(txq)) {
 261			skb = __skb_dequeue(&q->gso_skb);
 262			if (qdisc_is_percpu_stats(q)) {
 263				qdisc_qstats_cpu_backlog_dec(q, skb);
 264				qdisc_qstats_cpu_qlen_dec(q);
 265			} else {
 266				qdisc_qstats_backlog_dec(q, skb);
 267				q->q.qlen--;
 268			}
 269		} else {
 270			skb = NULL;
 271			qdisc_maybe_clear_missed(q, txq);
 272		}
 273		if (lock)
 274			spin_unlock(lock);
 275		goto trace;
 276	}
 277validate:
 278	*validate = true;
 279
 280	if ((q->flags & TCQ_F_ONETXQUEUE) &&
 281	    netif_xmit_frozen_or_stopped(txq)) {
 282		qdisc_maybe_clear_missed(q, txq);
 283		return skb;
 284	}
 285
 286	skb = qdisc_dequeue_skb_bad_txq(q);
 287	if (unlikely(skb)) {
 288		if (skb == SKB_XOFF_MAGIC)
 289			return NULL;
 290		goto bulk;
 291	}
 292	skb = q->dequeue(q);
 293	if (skb) {
 294bulk:
 295		if (qdisc_may_bulk(q))
 296			try_bulk_dequeue_skb(q, skb, txq, packets);
 297		else
 298			try_bulk_dequeue_skb_slow(q, skb, packets);
 299	}
 300trace:
 301	trace_qdisc_dequeue(q, txq, *packets, skb);
 302	return skb;
 303}
 304
 305/*
 306 * Transmit possibly several skbs, and handle the return status as
 307 * required. Owning qdisc running bit guarantees that only one CPU
 308 * can execute this function.
 309 *
 310 * Returns to the caller:
 311 *				false  - hardware queue frozen backoff
 312 *				true   - feel free to send more pkts
 313 */
 314bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 315		     struct net_device *dev, struct netdev_queue *txq,
 316		     spinlock_t *root_lock, bool validate)
 317{
 318	int ret = NETDEV_TX_BUSY;
 319	bool again = false;
 320
 321	/* And release qdisc */
 322	if (root_lock)
 323		spin_unlock(root_lock);
 324
 325	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
 326	if (validate)
 327		skb = validate_xmit_skb_list(skb, dev, &again);
 328
 329#ifdef CONFIG_XFRM_OFFLOAD
 330	if (unlikely(again)) {
 331		if (root_lock)
 332			spin_lock(root_lock);
 333
 334		dev_requeue_skb(skb, q);
 335		return false;
 336	}
 337#endif
 338
 339	if (likely(skb)) {
 340		HARD_TX_LOCK(dev, txq, smp_processor_id());
 341		if (!netif_xmit_frozen_or_stopped(txq))
 342			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 343		else
 344			qdisc_maybe_clear_missed(q, txq);
 345
 346		HARD_TX_UNLOCK(dev, txq);
 347	} else {
 348		if (root_lock)
 349			spin_lock(root_lock);
 350		return true;
 351	}
 352
 353	if (root_lock)
 354		spin_lock(root_lock);
 355
 356	if (!dev_xmit_complete(ret)) {
 357		/* Driver returned NETDEV_TX_BUSY - requeue skb */
 358		if (unlikely(ret != NETDEV_TX_BUSY))
 359			net_warn_ratelimited("BUG %s code %d qlen %d\n",
 360					     dev->name, ret, q->q.qlen);
 361
 362		dev_requeue_skb(skb, q);
 363		return false;
 364	}
 365
 366	return true;
 367}
 368
 369/*
 370 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 371 *
 372 * running seqcount guarantees only one CPU can process
 373 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 374 * this queue.
 375 *
 376 *  netif_tx_lock serializes accesses to device driver.
 377 *
 378 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 379 *  if one is grabbed, another must be free.
 380 *
 381 * Note, that this procedure can be called by a watchdog timer
 382 *
 383 * Returns to the caller:
 384 *				0  - queue is empty or throttled.
 385 *				>0 - queue is not empty.
 386 *
 387 */
 388static inline bool qdisc_restart(struct Qdisc *q, int *packets)
 389{
 390	spinlock_t *root_lock = NULL;
 391	struct netdev_queue *txq;
 392	struct net_device *dev;
 393	struct sk_buff *skb;
 394	bool validate;
 395
 396	/* Dequeue packet */
 397	skb = dequeue_skb(q, &validate, packets);
 398	if (unlikely(!skb))
 399		return false;
 400
 401	if (!(q->flags & TCQ_F_NOLOCK))
 402		root_lock = qdisc_lock(q);
 403
 404	dev = qdisc_dev(q);
 405	txq = skb_get_tx_queue(dev, skb);
 406
 407	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
 408}
 409
 410void __qdisc_run(struct Qdisc *q)
 411{
 412	int quota = READ_ONCE(dev_tx_weight);
 413	int packets;
 414
 415	while (qdisc_restart(q, &packets)) {
 416		quota -= packets;
 417		if (quota <= 0) {
 418			if (q->flags & TCQ_F_NOLOCK)
 419				set_bit(__QDISC_STATE_MISSED, &q->state);
 420			else
 421				__netif_schedule(q);
 422
 423			break;
 424		}
 425	}
 426}
 427
 428unsigned long dev_trans_start(struct net_device *dev)
 429{
 430	unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
 431	unsigned long val;
 432	unsigned int i;
 433
 434	for (i = 1; i < dev->num_tx_queues; i++) {
 435		val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
 436		if (val && time_after(val, res))
 437			res = val;
 438	}
 439
 440	return res;
 441}
 442EXPORT_SYMBOL(dev_trans_start);
 443
 444static void netif_freeze_queues(struct net_device *dev)
 445{
 446	unsigned int i;
 447	int cpu;
 448
 449	cpu = smp_processor_id();
 450	for (i = 0; i < dev->num_tx_queues; i++) {
 451		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 452
 453		/* We are the only thread of execution doing a
 454		 * freeze, but we have to grab the _xmit_lock in
 455		 * order to synchronize with threads which are in
 456		 * the ->hard_start_xmit() handler and already
 457		 * checked the frozen bit.
 458		 */
 459		__netif_tx_lock(txq, cpu);
 460		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
 461		__netif_tx_unlock(txq);
 462	}
 463}
 464
 465void netif_tx_lock(struct net_device *dev)
 466{
 467	spin_lock(&dev->tx_global_lock);
 468	netif_freeze_queues(dev);
 469}
 470EXPORT_SYMBOL(netif_tx_lock);
 471
 472static void netif_unfreeze_queues(struct net_device *dev)
 473{
 474	unsigned int i;
 475
 476	for (i = 0; i < dev->num_tx_queues; i++) {
 477		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 478
 479		/* No need to grab the _xmit_lock here.  If the
 480		 * queue is not stopped for another reason, we
 481		 * force a schedule.
 482		 */
 483		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
 484		netif_schedule_queue(txq);
 485	}
 486}
 487
 488void netif_tx_unlock(struct net_device *dev)
 489{
 490	netif_unfreeze_queues(dev);
 491	spin_unlock(&dev->tx_global_lock);
 492}
 493EXPORT_SYMBOL(netif_tx_unlock);
 494
 495static void dev_watchdog(struct timer_list *t)
 496{
 497	struct net_device *dev = from_timer(dev, t, watchdog_timer);
 498	bool release = true;
 499
 500	spin_lock(&dev->tx_global_lock);
 501	if (!qdisc_tx_is_noop(dev)) {
 502		if (netif_device_present(dev) &&
 503		    netif_running(dev) &&
 504		    netif_carrier_ok(dev)) {
 505			unsigned int timedout_ms = 0;
 506			unsigned int i;
 507			unsigned long trans_start;
 
 508
 509			for (i = 0; i < dev->num_tx_queues; i++) {
 510				struct netdev_queue *txq;
 511
 512				txq = netdev_get_tx_queue(dev, i);
 
 
 
 
 
 
 
 513				trans_start = READ_ONCE(txq->trans_start);
 514				if (netif_xmit_stopped(txq) &&
 515				    time_after(jiffies, (trans_start +
 516							 dev->watchdog_timeo))) {
 517					timedout_ms = jiffies_to_msecs(jiffies - trans_start);
 518					atomic_long_inc(&txq->trans_timeout);
 519					break;
 520				}
 
 
 521			}
 522
 523			if (unlikely(timedout_ms)) {
 524				trace_net_dev_xmit_timeout(dev, i);
 525				netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n",
 526					    raw_smp_processor_id(),
 527					    i, timedout_ms);
 528				netif_freeze_queues(dev);
 529				dev->netdev_ops->ndo_tx_timeout(dev, i);
 530				netif_unfreeze_queues(dev);
 531			}
 532			if (!mod_timer(&dev->watchdog_timer,
 533				       round_jiffies(jiffies +
 534						     dev->watchdog_timeo)))
 535				release = false;
 536		}
 537	}
 538	spin_unlock(&dev->tx_global_lock);
 539
 540	if (release)
 541		netdev_put(dev, &dev->watchdog_dev_tracker);
 542}
 543
 544void __netdev_watchdog_up(struct net_device *dev)
 545{
 546	if (dev->netdev_ops->ndo_tx_timeout) {
 547		if (dev->watchdog_timeo <= 0)
 548			dev->watchdog_timeo = 5*HZ;
 549		if (!mod_timer(&dev->watchdog_timer,
 550			       round_jiffies(jiffies + dev->watchdog_timeo)))
 551			netdev_hold(dev, &dev->watchdog_dev_tracker,
 552				    GFP_ATOMIC);
 553	}
 554}
 555EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
 556
 557static void dev_watchdog_up(struct net_device *dev)
 558{
 559	__netdev_watchdog_up(dev);
 560}
 561
 562static void dev_watchdog_down(struct net_device *dev)
 563{
 564	netif_tx_lock_bh(dev);
 565	if (del_timer(&dev->watchdog_timer))
 566		netdev_put(dev, &dev->watchdog_dev_tracker);
 567	netif_tx_unlock_bh(dev);
 568}
 569
 570/**
 571 *	netif_carrier_on - set carrier
 572 *	@dev: network device
 573 *
 574 * Device has detected acquisition of carrier.
 575 */
 576void netif_carrier_on(struct net_device *dev)
 577{
 578	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 579		if (dev->reg_state == NETREG_UNINITIALIZED)
 580			return;
 581		atomic_inc(&dev->carrier_up_count);
 582		linkwatch_fire_event(dev);
 583		if (netif_running(dev))
 584			__netdev_watchdog_up(dev);
 585	}
 586}
 587EXPORT_SYMBOL(netif_carrier_on);
 588
 589/**
 590 *	netif_carrier_off - clear carrier
 591 *	@dev: network device
 592 *
 593 * Device has detected loss of carrier.
 594 */
 595void netif_carrier_off(struct net_device *dev)
 596{
 597	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 598		if (dev->reg_state == NETREG_UNINITIALIZED)
 599			return;
 600		atomic_inc(&dev->carrier_down_count);
 601		linkwatch_fire_event(dev);
 602	}
 603}
 604EXPORT_SYMBOL(netif_carrier_off);
 605
 606/**
 607 *	netif_carrier_event - report carrier state event
 608 *	@dev: network device
 609 *
 610 * Device has detected a carrier event but the carrier state wasn't changed.
 611 * Use in drivers when querying carrier state asynchronously, to avoid missing
 612 * events (link flaps) if link recovers before it's queried.
 613 */
 614void netif_carrier_event(struct net_device *dev)
 615{
 616	if (dev->reg_state == NETREG_UNINITIALIZED)
 617		return;
 618	atomic_inc(&dev->carrier_up_count);
 619	atomic_inc(&dev->carrier_down_count);
 620	linkwatch_fire_event(dev);
 621}
 622EXPORT_SYMBOL_GPL(netif_carrier_event);
 623
 624/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
 625   under all circumstances. It is difficult to invent anything faster or
 626   cheaper.
 627 */
 628
 629static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 630			struct sk_buff **to_free)
 631{
 
 632	__qdisc_drop(skb, to_free);
 633	return NET_XMIT_CN;
 634}
 635
 636static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
 637{
 638	return NULL;
 639}
 640
 641struct Qdisc_ops noop_qdisc_ops __read_mostly = {
 642	.id		=	"noop",
 643	.priv_size	=	0,
 644	.enqueue	=	noop_enqueue,
 645	.dequeue	=	noop_dequeue,
 646	.peek		=	noop_dequeue,
 647	.owner		=	THIS_MODULE,
 648};
 649
 650static struct netdev_queue noop_netdev_queue = {
 651	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
 652	RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
 653};
 654
 655struct Qdisc noop_qdisc = {
 656	.enqueue	=	noop_enqueue,
 657	.dequeue	=	noop_dequeue,
 658	.flags		=	TCQ_F_BUILTIN,
 659	.ops		=	&noop_qdisc_ops,
 660	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
 661	.dev_queue	=	&noop_netdev_queue,
 662	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
 663	.gso_skb = {
 664		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
 665		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
 666		.qlen = 0,
 667		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
 668	},
 669	.skb_bad_txq = {
 670		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 671		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 672		.qlen = 0,
 673		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
 674	},
 
 675};
 676EXPORT_SYMBOL(noop_qdisc);
 677
 678static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
 679			struct netlink_ext_ack *extack)
 680{
 681	/* register_qdisc() assigns a default of noop_enqueue if unset,
 682	 * but __dev_queue_xmit() treats noqueue only as such
 683	 * if this is NULL - so clear it here. */
 684	qdisc->enqueue = NULL;
 685	return 0;
 686}
 687
 688struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
 689	.id		=	"noqueue",
 690	.priv_size	=	0,
 691	.init		=	noqueue_init,
 692	.enqueue	=	noop_enqueue,
 693	.dequeue	=	noop_dequeue,
 694	.peek		=	noop_dequeue,
 695	.owner		=	THIS_MODULE,
 696};
 697
 698const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = {
 699	1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1
 700};
 701EXPORT_SYMBOL(sch_default_prio2band);
 702
 703/* 3-band FIFO queue: old style, but should be a bit faster than
 704   generic prio+fifo combination.
 705 */
 706
 707#define PFIFO_FAST_BANDS 3
 708
 709/*
 710 * Private data for a pfifo_fast scheduler containing:
 711 *	- rings for priority bands
 712 */
 713struct pfifo_fast_priv {
 714	struct skb_array q[PFIFO_FAST_BANDS];
 715};
 716
 717static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
 718					  int band)
 719{
 720	return &priv->q[band];
 721}
 722
 723static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 724			      struct sk_buff **to_free)
 725{
 726	int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX];
 727	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 728	struct skb_array *q = band2list(priv, band);
 729	unsigned int pkt_len = qdisc_pkt_len(skb);
 730	int err;
 731
 732	err = skb_array_produce(q, skb);
 733
 734	if (unlikely(err)) {
 735		if (qdisc_is_percpu_stats(qdisc))
 736			return qdisc_drop_cpu(skb, qdisc, to_free);
 737		else
 738			return qdisc_drop(skb, qdisc, to_free);
 739	}
 740
 741	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
 742	return NET_XMIT_SUCCESS;
 743}
 744
 745static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 746{
 747	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 748	struct sk_buff *skb = NULL;
 749	bool need_retry = true;
 750	int band;
 751
 752retry:
 753	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 754		struct skb_array *q = band2list(priv, band);
 755
 756		if (__skb_array_empty(q))
 757			continue;
 758
 759		skb = __skb_array_consume(q);
 760	}
 761	if (likely(skb)) {
 762		qdisc_update_stats_at_dequeue(qdisc, skb);
 763	} else if (need_retry &&
 764		   READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
 765		/* Delay clearing the STATE_MISSED here to reduce
 766		 * the overhead of the second spin_trylock() in
 767		 * qdisc_run_begin() and __netif_schedule() calling
 768		 * in qdisc_run_end().
 769		 */
 770		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
 771		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
 772
 773		/* Make sure dequeuing happens after clearing
 774		 * STATE_MISSED.
 775		 */
 776		smp_mb__after_atomic();
 777
 778		need_retry = false;
 779
 780		goto retry;
 781	}
 782
 783	return skb;
 784}
 785
 786static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 787{
 788	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 789	struct sk_buff *skb = NULL;
 790	int band;
 791
 792	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 793		struct skb_array *q = band2list(priv, band);
 794
 795		skb = __skb_array_peek(q);
 796	}
 797
 798	return skb;
 799}
 800
 801static void pfifo_fast_reset(struct Qdisc *qdisc)
 802{
 803	int i, band;
 804	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 805
 806	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
 807		struct skb_array *q = band2list(priv, band);
 808		struct sk_buff *skb;
 809
 810		/* NULL ring is possible if destroy path is due to a failed
 811		 * skb_array_init() in pfifo_fast_init() case.
 812		 */
 813		if (!q->ring.queue)
 814			continue;
 815
 816		while ((skb = __skb_array_consume(q)) != NULL)
 817			kfree_skb(skb);
 818	}
 819
 820	if (qdisc_is_percpu_stats(qdisc)) {
 821		for_each_possible_cpu(i) {
 822			struct gnet_stats_queue *q;
 823
 824			q = per_cpu_ptr(qdisc->cpu_qstats, i);
 825			q->backlog = 0;
 826			q->qlen = 0;
 827		}
 828	}
 829}
 830
 831static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 832{
 833	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 834
 835	memcpy(&opt.priomap, sch_default_prio2band, TC_PRIO_MAX + 1);
 836	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
 837		goto nla_put_failure;
 838	return skb->len;
 839
 840nla_put_failure:
 841	return -1;
 842}
 843
 844static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
 845			   struct netlink_ext_ack *extack)
 846{
 847	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
 848	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 849	int prio;
 850
 851	/* guard against zero length rings */
 852	if (!qlen)
 853		return -EINVAL;
 854
 855	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 856		struct skb_array *q = band2list(priv, prio);
 857		int err;
 858
 859		err = skb_array_init(q, qlen, GFP_KERNEL);
 860		if (err)
 861			return -ENOMEM;
 862	}
 863
 864	/* Can by-pass the queue discipline */
 865	qdisc->flags |= TCQ_F_CAN_BYPASS;
 866	return 0;
 867}
 868
 869static void pfifo_fast_destroy(struct Qdisc *sch)
 870{
 871	struct pfifo_fast_priv *priv = qdisc_priv(sch);
 872	int prio;
 873
 874	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 875		struct skb_array *q = band2list(priv, prio);
 876
 877		/* NULL ring is possible if destroy path is due to a failed
 878		 * skb_array_init() in pfifo_fast_init() case.
 879		 */
 880		if (!q->ring.queue)
 881			continue;
 882		/* Destroy ring but no need to kfree_skb because a call to
 883		 * pfifo_fast_reset() has already done that work.
 884		 */
 885		ptr_ring_cleanup(&q->ring, NULL);
 886	}
 887}
 888
 889static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
 890					  unsigned int new_len)
 891{
 892	struct pfifo_fast_priv *priv = qdisc_priv(sch);
 893	struct skb_array *bands[PFIFO_FAST_BANDS];
 894	int prio;
 895
 896	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 897		struct skb_array *q = band2list(priv, prio);
 898
 899		bands[prio] = q;
 900	}
 901
 902	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
 903					 GFP_KERNEL);
 904}
 905
 906struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 907	.id		=	"pfifo_fast",
 908	.priv_size	=	sizeof(struct pfifo_fast_priv),
 909	.enqueue	=	pfifo_fast_enqueue,
 910	.dequeue	=	pfifo_fast_dequeue,
 911	.peek		=	pfifo_fast_peek,
 912	.init		=	pfifo_fast_init,
 913	.destroy	=	pfifo_fast_destroy,
 914	.reset		=	pfifo_fast_reset,
 915	.dump		=	pfifo_fast_dump,
 916	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
 917	.owner		=	THIS_MODULE,
 918	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
 919};
 920EXPORT_SYMBOL(pfifo_fast_ops);
 921
 922static struct lock_class_key qdisc_tx_busylock;
 923
 924struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 925			  const struct Qdisc_ops *ops,
 926			  struct netlink_ext_ack *extack)
 927{
 928	struct Qdisc *sch;
 929	unsigned int size = sizeof(*sch) + ops->priv_size;
 930	int err = -ENOBUFS;
 931	struct net_device *dev;
 932
 933	if (!dev_queue) {
 934		NL_SET_ERR_MSG(extack, "No device queue given");
 935		err = -EINVAL;
 936		goto errout;
 937	}
 938
 939	dev = dev_queue->dev;
 940	sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
 941
 942	if (!sch)
 943		goto errout;
 944	__skb_queue_head_init(&sch->gso_skb);
 945	__skb_queue_head_init(&sch->skb_bad_txq);
 946	gnet_stats_basic_sync_init(&sch->bstats);
 
 947	spin_lock_init(&sch->q.lock);
 
 948
 949	if (ops->static_flags & TCQ_F_CPUSTATS) {
 950		sch->cpu_bstats =
 951			netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
 952		if (!sch->cpu_bstats)
 953			goto errout1;
 954
 955		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 956		if (!sch->cpu_qstats) {
 957			free_percpu(sch->cpu_bstats);
 958			goto errout1;
 959		}
 960	}
 961
 962	spin_lock_init(&sch->busylock);
 963	lockdep_set_class(&sch->busylock,
 964			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 965
 966	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
 967	spin_lock_init(&sch->seqlock);
 968	lockdep_set_class(&sch->seqlock,
 969			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 970
 971	sch->ops = ops;
 972	sch->flags = ops->static_flags;
 973	sch->enqueue = ops->enqueue;
 974	sch->dequeue = ops->dequeue;
 975	sch->dev_queue = dev_queue;
 
 976	netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
 977	refcount_set(&sch->refcnt, 1);
 978
 979	return sch;
 980errout1:
 
 981	kfree(sch);
 982errout:
 983	return ERR_PTR(err);
 984}
 985
 986struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 987				const struct Qdisc_ops *ops,
 988				unsigned int parentid,
 989				struct netlink_ext_ack *extack)
 990{
 991	struct Qdisc *sch;
 992
 993	if (!try_module_get(ops->owner)) {
 994		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
 995		return NULL;
 996	}
 997
 998	sch = qdisc_alloc(dev_queue, ops, extack);
 999	if (IS_ERR(sch)) {
1000		module_put(ops->owner);
1001		return NULL;
1002	}
1003	sch->parent = parentid;
1004
1005	if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1006		trace_qdisc_create(ops, dev_queue->dev, parentid);
1007		return sch;
1008	}
1009
1010	qdisc_put(sch);
1011	return NULL;
1012}
1013EXPORT_SYMBOL(qdisc_create_dflt);
1014
1015/* Under qdisc_lock(qdisc) and BH! */
1016
1017void qdisc_reset(struct Qdisc *qdisc)
1018{
1019	const struct Qdisc_ops *ops = qdisc->ops;
1020
1021	trace_qdisc_reset(qdisc);
1022
1023	if (ops->reset)
1024		ops->reset(qdisc);
1025
1026	__skb_queue_purge(&qdisc->gso_skb);
1027	__skb_queue_purge(&qdisc->skb_bad_txq);
1028
1029	qdisc->q.qlen = 0;
1030	qdisc->qstats.backlog = 0;
1031}
1032EXPORT_SYMBOL(qdisc_reset);
1033
1034void qdisc_free(struct Qdisc *qdisc)
1035{
1036	if (qdisc_is_percpu_stats(qdisc)) {
1037		free_percpu(qdisc->cpu_bstats);
1038		free_percpu(qdisc->cpu_qstats);
1039	}
1040
1041	kfree(qdisc);
1042}
1043
1044static void qdisc_free_cb(struct rcu_head *head)
1045{
1046	struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1047
1048	qdisc_free(q);
1049}
1050
1051static void __qdisc_destroy(struct Qdisc *qdisc)
1052{
1053	const struct Qdisc_ops  *ops = qdisc->ops;
1054	struct net_device *dev = qdisc_dev(qdisc);
1055
1056#ifdef CONFIG_NET_SCHED
1057	qdisc_hash_del(qdisc);
1058
1059	qdisc_put_stab(rtnl_dereference(qdisc->stab));
1060#endif
1061	gen_kill_estimator(&qdisc->rate_est);
1062
1063	qdisc_reset(qdisc);
1064
1065
1066	if (ops->destroy)
1067		ops->destroy(qdisc);
1068
 
1069	module_put(ops->owner);
1070	netdev_put(dev, &qdisc->dev_tracker);
1071
1072	trace_qdisc_destroy(qdisc);
1073
1074	call_rcu(&qdisc->rcu, qdisc_free_cb);
1075}
1076
1077void qdisc_destroy(struct Qdisc *qdisc)
1078{
1079	if (qdisc->flags & TCQ_F_BUILTIN)
1080		return;
1081
1082	__qdisc_destroy(qdisc);
1083}
1084
1085void qdisc_put(struct Qdisc *qdisc)
1086{
1087	if (!qdisc)
1088		return;
1089
1090	if (qdisc->flags & TCQ_F_BUILTIN ||
1091	    !refcount_dec_and_test(&qdisc->refcnt))
1092		return;
1093
1094	__qdisc_destroy(qdisc);
1095}
1096EXPORT_SYMBOL(qdisc_put);
1097
1098/* Version of qdisc_put() that is called with rtnl mutex unlocked.
1099 * Intended to be used as optimization, this function only takes rtnl lock if
1100 * qdisc reference counter reached zero.
1101 */
1102
1103void qdisc_put_unlocked(struct Qdisc *qdisc)
1104{
1105	if (qdisc->flags & TCQ_F_BUILTIN ||
1106	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1107		return;
1108
1109	__qdisc_destroy(qdisc);
1110	rtnl_unlock();
1111}
1112EXPORT_SYMBOL(qdisc_put_unlocked);
1113
1114/* Attach toplevel qdisc to device queue. */
1115struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1116			      struct Qdisc *qdisc)
1117{
1118	struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1119	spinlock_t *root_lock;
1120
1121	root_lock = qdisc_lock(oqdisc);
1122	spin_lock_bh(root_lock);
1123
1124	/* ... and graft new one */
1125	if (qdisc == NULL)
1126		qdisc = &noop_qdisc;
1127	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1128	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1129
1130	spin_unlock_bh(root_lock);
1131
1132	return oqdisc;
1133}
1134EXPORT_SYMBOL(dev_graft_qdisc);
1135
1136static void shutdown_scheduler_queue(struct net_device *dev,
1137				     struct netdev_queue *dev_queue,
1138				     void *_qdisc_default)
1139{
1140	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1141	struct Qdisc *qdisc_default = _qdisc_default;
1142
1143	if (qdisc) {
1144		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1145		rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
1146
1147		qdisc_put(qdisc);
1148	}
1149}
1150
1151static void attach_one_default_qdisc(struct net_device *dev,
1152				     struct netdev_queue *dev_queue,
1153				     void *_unused)
1154{
1155	struct Qdisc *qdisc;
1156	const struct Qdisc_ops *ops = default_qdisc_ops;
1157
1158	if (dev->priv_flags & IFF_NO_QUEUE)
1159		ops = &noqueue_qdisc_ops;
1160	else if(dev->type == ARPHRD_CAN)
1161		ops = &pfifo_fast_ops;
1162
1163	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1164	if (!qdisc)
1165		return;
1166
1167	if (!netif_is_multiqueue(dev))
1168		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1169	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1170}
1171
1172static void attach_default_qdiscs(struct net_device *dev)
1173{
1174	struct netdev_queue *txq;
1175	struct Qdisc *qdisc;
1176
1177	txq = netdev_get_tx_queue(dev, 0);
1178
1179	if (!netif_is_multiqueue(dev) ||
1180	    dev->priv_flags & IFF_NO_QUEUE) {
1181		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1182		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1183		rcu_assign_pointer(dev->qdisc, qdisc);
1184		qdisc_refcount_inc(qdisc);
1185	} else {
1186		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1187		if (qdisc) {
1188			rcu_assign_pointer(dev->qdisc, qdisc);
1189			qdisc->ops->attach(qdisc);
1190		}
1191	}
1192	qdisc = rtnl_dereference(dev->qdisc);
1193
1194	/* Detect default qdisc setup/init failed and fallback to "noqueue" */
1195	if (qdisc == &noop_qdisc) {
1196		netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1197			    default_qdisc_ops->id, noqueue_qdisc_ops.id);
1198		netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1199		dev->priv_flags |= IFF_NO_QUEUE;
1200		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1201		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1202		rcu_assign_pointer(dev->qdisc, qdisc);
1203		qdisc_refcount_inc(qdisc);
1204		dev->priv_flags ^= IFF_NO_QUEUE;
1205	}
1206
1207#ifdef CONFIG_NET_SCHED
1208	if (qdisc != &noop_qdisc)
1209		qdisc_hash_add(qdisc, false);
1210#endif
1211}
1212
1213static void transition_one_qdisc(struct net_device *dev,
1214				 struct netdev_queue *dev_queue,
1215				 void *_need_watchdog)
1216{
1217	struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1218	int *need_watchdog_p = _need_watchdog;
1219
1220	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1221		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1222
1223	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1224	if (need_watchdog_p) {
1225		WRITE_ONCE(dev_queue->trans_start, 0);
1226		*need_watchdog_p = 1;
1227	}
1228}
1229
1230void dev_activate(struct net_device *dev)
1231{
1232	int need_watchdog;
1233
1234	/* No queueing discipline is attached to device;
1235	 * create default one for devices, which need queueing
1236	 * and noqueue_qdisc for virtual interfaces
1237	 */
1238
1239	if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
1240		attach_default_qdiscs(dev);
1241
1242	if (!netif_carrier_ok(dev))
1243		/* Delay activation until next carrier-on event */
1244		return;
1245
1246	need_watchdog = 0;
1247	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1248	if (dev_ingress_queue(dev))
1249		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1250
1251	if (need_watchdog) {
1252		netif_trans_update(dev);
1253		dev_watchdog_up(dev);
1254	}
1255}
1256EXPORT_SYMBOL(dev_activate);
1257
1258static void qdisc_deactivate(struct Qdisc *qdisc)
1259{
1260	if (qdisc->flags & TCQ_F_BUILTIN)
1261		return;
1262
1263	set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1264}
1265
1266static void dev_deactivate_queue(struct net_device *dev,
1267				 struct netdev_queue *dev_queue,
1268				 void *_qdisc_default)
1269{
1270	struct Qdisc *qdisc_default = _qdisc_default;
1271	struct Qdisc *qdisc;
1272
1273	qdisc = rtnl_dereference(dev_queue->qdisc);
1274	if (qdisc) {
1275		qdisc_deactivate(qdisc);
1276		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1277	}
1278}
1279
1280static void dev_reset_queue(struct net_device *dev,
1281			    struct netdev_queue *dev_queue,
1282			    void *_unused)
1283{
1284	struct Qdisc *qdisc;
1285	bool nolock;
1286
1287	qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1288	if (!qdisc)
1289		return;
1290
1291	nolock = qdisc->flags & TCQ_F_NOLOCK;
1292
1293	if (nolock)
1294		spin_lock_bh(&qdisc->seqlock);
1295	spin_lock_bh(qdisc_lock(qdisc));
1296
1297	qdisc_reset(qdisc);
1298
1299	spin_unlock_bh(qdisc_lock(qdisc));
1300	if (nolock) {
1301		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
1302		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
1303		spin_unlock_bh(&qdisc->seqlock);
1304	}
1305}
1306
1307static bool some_qdisc_is_busy(struct net_device *dev)
1308{
1309	unsigned int i;
1310
1311	for (i = 0; i < dev->num_tx_queues; i++) {
1312		struct netdev_queue *dev_queue;
1313		spinlock_t *root_lock;
1314		struct Qdisc *q;
1315		int val;
1316
1317		dev_queue = netdev_get_tx_queue(dev, i);
1318		q = rtnl_dereference(dev_queue->qdisc_sleeping);
1319
1320		root_lock = qdisc_lock(q);
1321		spin_lock_bh(root_lock);
1322
1323		val = (qdisc_is_running(q) ||
1324		       test_bit(__QDISC_STATE_SCHED, &q->state));
1325
1326		spin_unlock_bh(root_lock);
1327
1328		if (val)
1329			return true;
1330	}
1331	return false;
1332}
1333
1334/**
1335 * 	dev_deactivate_many - deactivate transmissions on several devices
1336 * 	@head: list of devices to deactivate
1337 *
1338 *	This function returns only when all outstanding transmissions
1339 *	have completed, unless all devices are in dismantle phase.
1340 */
1341void dev_deactivate_many(struct list_head *head)
1342{
1343	struct net_device *dev;
1344
1345	list_for_each_entry(dev, head, close_list) {
1346		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1347					 &noop_qdisc);
1348		if (dev_ingress_queue(dev))
1349			dev_deactivate_queue(dev, dev_ingress_queue(dev),
1350					     &noop_qdisc);
1351
1352		dev_watchdog_down(dev);
1353	}
1354
1355	/* Wait for outstanding qdisc-less dev_queue_xmit calls or
1356	 * outstanding qdisc enqueuing calls.
1357	 * This is avoided if all devices are in dismantle phase :
1358	 * Caller will call synchronize_net() for us
1359	 */
1360	synchronize_net();
1361
1362	list_for_each_entry(dev, head, close_list) {
1363		netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1364
1365		if (dev_ingress_queue(dev))
1366			dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1367	}
1368
1369	/* Wait for outstanding qdisc_run calls. */
1370	list_for_each_entry(dev, head, close_list) {
1371		while (some_qdisc_is_busy(dev)) {
1372			/* wait_event() would avoid this sleep-loop but would
1373			 * require expensive checks in the fast paths of packet
1374			 * processing which isn't worth it.
1375			 */
1376			schedule_timeout_uninterruptible(1);
1377		}
1378	}
1379}
1380
1381void dev_deactivate(struct net_device *dev)
1382{
1383	LIST_HEAD(single);
1384
1385	list_add(&dev->close_list, &single);
1386	dev_deactivate_many(&single);
1387	list_del(&single);
1388}
1389EXPORT_SYMBOL(dev_deactivate);
1390
1391static int qdisc_change_tx_queue_len(struct net_device *dev,
1392				     struct netdev_queue *dev_queue)
1393{
1394	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1395	const struct Qdisc_ops *ops = qdisc->ops;
1396
1397	if (ops->change_tx_queue_len)
1398		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1399	return 0;
1400}
1401
1402void dev_qdisc_change_real_num_tx(struct net_device *dev,
1403				  unsigned int new_real_tx)
1404{
1405	struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
1406
1407	if (qdisc->ops->change_real_num_tx)
1408		qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1409}
1410
1411void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1412{
1413#ifdef CONFIG_NET_SCHED
1414	struct net_device *dev = qdisc_dev(sch);
1415	struct Qdisc *qdisc;
1416	unsigned int i;
1417
1418	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
1419		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1420		/* Only update the default qdiscs we created,
1421		 * qdiscs with handles are always hashed.
1422		 */
1423		if (qdisc != &noop_qdisc && !qdisc->handle)
1424			qdisc_hash_del(qdisc);
1425	}
1426	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
1427		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1428		if (qdisc != &noop_qdisc && !qdisc->handle)
1429			qdisc_hash_add(qdisc, false);
1430	}
1431#endif
1432}
1433EXPORT_SYMBOL(mq_change_real_num_tx);
1434
1435int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1436{
1437	bool up = dev->flags & IFF_UP;
1438	unsigned int i;
1439	int ret = 0;
1440
1441	if (up)
1442		dev_deactivate(dev);
1443
1444	for (i = 0; i < dev->num_tx_queues; i++) {
1445		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1446
1447		/* TODO: revert changes on a partial failure */
1448		if (ret)
1449			break;
1450	}
1451
1452	if (up)
1453		dev_activate(dev);
1454	return ret;
1455}
1456
1457static void dev_init_scheduler_queue(struct net_device *dev,
1458				     struct netdev_queue *dev_queue,
1459				     void *_qdisc)
1460{
1461	struct Qdisc *qdisc = _qdisc;
1462
1463	rcu_assign_pointer(dev_queue->qdisc, qdisc);
1464	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1465}
1466
1467void dev_init_scheduler(struct net_device *dev)
1468{
1469	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1470	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1471	if (dev_ingress_queue(dev))
1472		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1473
1474	timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1475}
1476
1477void dev_shutdown(struct net_device *dev)
1478{
1479	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1480	if (dev_ingress_queue(dev))
1481		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1482	qdisc_put(rtnl_dereference(dev->qdisc));
1483	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1484
1485	WARN_ON(timer_pending(&dev->watchdog_timer));
1486}
1487
1488/**
1489 * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1490 * @rate:   Rate to compute reciprocal division values of
1491 * @mult:   Multiplier for reciprocal division
1492 * @shift:  Shift for reciprocal division
1493 *
1494 * The multiplier and shift for reciprocal division by rate are stored
1495 * in mult and shift.
1496 *
1497 * The deal here is to replace a divide by a reciprocal one
1498 * in fast path (a reciprocal divide is a multiply and a shift)
1499 *
1500 * Normal formula would be :
1501 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1502 *
1503 * We compute mult/shift to use instead :
1504 *  time_in_ns = (len * mult) >> shift;
1505 *
1506 * We try to get the highest possible mult value for accuracy,
1507 * but have to make sure no overflows will ever happen.
1508 *
1509 * reciprocal_value() is not used here it doesn't handle 64-bit values.
1510 */
1511static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1512{
1513	u64 factor = NSEC_PER_SEC;
1514
1515	*mult = 1;
1516	*shift = 0;
1517
1518	if (rate <= 0)
1519		return;
1520
1521	for (;;) {
1522		*mult = div64_u64(factor, rate);
1523		if (*mult & (1U << 31) || factor & (1ULL << 63))
1524			break;
1525		factor <<= 1;
1526		(*shift)++;
1527	}
1528}
1529
1530void psched_ratecfg_precompute(struct psched_ratecfg *r,
1531			       const struct tc_ratespec *conf,
1532			       u64 rate64)
1533{
1534	memset(r, 0, sizeof(*r));
1535	r->overhead = conf->overhead;
1536	r->mpu = conf->mpu;
1537	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1538	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1539	psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
1540}
1541EXPORT_SYMBOL(psched_ratecfg_precompute);
1542
1543void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1544{
1545	r->rate_pkts_ps = pktrate64;
1546	psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1547}
1548EXPORT_SYMBOL(psched_ppscfg_precompute);
1549
1550void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1551			  struct tcf_proto *tp_head)
1552{
1553	/* Protected with chain0->filter_chain_lock.
1554	 * Can't access chain directly because tp_head can be NULL.
1555	 */
1556	struct mini_Qdisc *miniq_old =
1557		rcu_dereference_protected(*miniqp->p_miniq, 1);
1558	struct mini_Qdisc *miniq;
1559
1560	if (!tp_head) {
1561		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1562	} else {
1563		miniq = miniq_old != &miniqp->miniq1 ?
1564			&miniqp->miniq1 : &miniqp->miniq2;
1565
1566		/* We need to make sure that readers won't see the miniq
1567		 * we are about to modify. So ensure that at least one RCU
1568		 * grace period has elapsed since the miniq was made
1569		 * inactive.
1570		 */
1571		if (IS_ENABLED(CONFIG_PREEMPT_RT))
1572			cond_synchronize_rcu(miniq->rcu_state);
1573		else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1574			synchronize_rcu_expedited();
1575
1576		miniq->filter_list = tp_head;
1577		rcu_assign_pointer(*miniqp->p_miniq, miniq);
1578	}
1579
1580	if (miniq_old)
1581		/* This is counterpart of the rcu sync above. We need to
1582		 * block potential new user of miniq_old until all readers
1583		 * are not seeing it.
1584		 */
1585		miniq_old->rcu_state = start_poll_synchronize_rcu();
1586}
1587EXPORT_SYMBOL(mini_qdisc_pair_swap);
1588
1589void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1590				struct tcf_block *block)
1591{
1592	miniqp->miniq1.block = block;
1593	miniqp->miniq2.block = block;
1594}
1595EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1596
1597void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1598			  struct mini_Qdisc __rcu **p_miniq)
1599{
1600	miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1601	miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1602	miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1603	miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1604	miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1605	miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
1606	miniqp->p_miniq = p_miniq;
1607}
1608EXPORT_SYMBOL(mini_qdisc_pair_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_generic.c	Generic packet scheduler routines.
   4 *
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
   7 *              - Ingress support
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/netdevice.h>
  18#include <linux/skbuff.h>
  19#include <linux/rtnetlink.h>
  20#include <linux/init.h>
  21#include <linux/rcupdate.h>
  22#include <linux/list.h>
  23#include <linux/slab.h>
  24#include <linux/if_vlan.h>
  25#include <linux/skb_array.h>
  26#include <linux/if_macvlan.h>
  27#include <net/sch_generic.h>
  28#include <net/pkt_sched.h>
  29#include <net/dst.h>
  30#include <net/hotdata.h>
  31#include <trace/events/qdisc.h>
  32#include <trace/events/net.h>
  33#include <net/xfrm.h>
  34
  35/* Qdisc to use by default */
  36const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
  37EXPORT_SYMBOL(default_qdisc_ops);
  38
  39static void qdisc_maybe_clear_missed(struct Qdisc *q,
  40				     const struct netdev_queue *txq)
  41{
  42	clear_bit(__QDISC_STATE_MISSED, &q->state);
  43
  44	/* Make sure the below netif_xmit_frozen_or_stopped()
  45	 * checking happens after clearing STATE_MISSED.
  46	 */
  47	smp_mb__after_atomic();
  48
  49	/* Checking netif_xmit_frozen_or_stopped() again to
  50	 * make sure STATE_MISSED is set if the STATE_MISSED
  51	 * set by netif_tx_wake_queue()'s rescheduling of
  52	 * net_tx_action() is cleared by the above clear_bit().
  53	 */
  54	if (!netif_xmit_frozen_or_stopped(txq))
  55		set_bit(__QDISC_STATE_MISSED, &q->state);
  56	else
  57		set_bit(__QDISC_STATE_DRAINING, &q->state);
  58}
  59
  60/* Main transmission queue. */
  61
  62/* Modifications to data participating in scheduling must be protected with
  63 * qdisc_lock(qdisc) spinlock.
  64 *
  65 * The idea is the following:
  66 * - enqueue, dequeue are serialized via qdisc root lock
  67 * - ingress filtering is also serialized via qdisc root lock
  68 * - updates to tree and tree walking are only done under the rtnl mutex.
  69 */
  70
  71#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
  72
  73static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
  74{
  75	const struct netdev_queue *txq = q->dev_queue;
  76	spinlock_t *lock = NULL;
  77	struct sk_buff *skb;
  78
  79	if (q->flags & TCQ_F_NOLOCK) {
  80		lock = qdisc_lock(q);
  81		spin_lock(lock);
  82	}
  83
  84	skb = skb_peek(&q->skb_bad_txq);
  85	if (skb) {
  86		/* check the reason of requeuing without tx lock first */
  87		txq = skb_get_tx_queue(txq->dev, skb);
  88		if (!netif_xmit_frozen_or_stopped(txq)) {
  89			skb = __skb_dequeue(&q->skb_bad_txq);
  90			if (qdisc_is_percpu_stats(q)) {
  91				qdisc_qstats_cpu_backlog_dec(q, skb);
  92				qdisc_qstats_cpu_qlen_dec(q);
  93			} else {
  94				qdisc_qstats_backlog_dec(q, skb);
  95				q->q.qlen--;
  96			}
  97		} else {
  98			skb = SKB_XOFF_MAGIC;
  99			qdisc_maybe_clear_missed(q, txq);
 100		}
 101	}
 102
 103	if (lock)
 104		spin_unlock(lock);
 105
 106	return skb;
 107}
 108
 109static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
 110{
 111	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
 112
 113	if (unlikely(skb))
 114		skb = __skb_dequeue_bad_txq(q);
 115
 116	return skb;
 117}
 118
 119static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
 120					     struct sk_buff *skb)
 121{
 122	spinlock_t *lock = NULL;
 123
 124	if (q->flags & TCQ_F_NOLOCK) {
 125		lock = qdisc_lock(q);
 126		spin_lock(lock);
 127	}
 128
 129	__skb_queue_tail(&q->skb_bad_txq, skb);
 130
 131	if (qdisc_is_percpu_stats(q)) {
 132		qdisc_qstats_cpu_backlog_inc(q, skb);
 133		qdisc_qstats_cpu_qlen_inc(q);
 134	} else {
 135		qdisc_qstats_backlog_inc(q, skb);
 136		q->q.qlen++;
 137	}
 138
 139	if (lock)
 140		spin_unlock(lock);
 141}
 142
 143static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 144{
 145	spinlock_t *lock = NULL;
 146
 147	if (q->flags & TCQ_F_NOLOCK) {
 148		lock = qdisc_lock(q);
 149		spin_lock(lock);
 150	}
 151
 152	while (skb) {
 153		struct sk_buff *next = skb->next;
 154
 155		__skb_queue_tail(&q->gso_skb, skb);
 156
 157		/* it's still part of the queue */
 158		if (qdisc_is_percpu_stats(q)) {
 159			qdisc_qstats_cpu_requeues_inc(q);
 160			qdisc_qstats_cpu_backlog_inc(q, skb);
 161			qdisc_qstats_cpu_qlen_inc(q);
 162		} else {
 163			q->qstats.requeues++;
 164			qdisc_qstats_backlog_inc(q, skb);
 165			q->q.qlen++;
 166		}
 167
 168		skb = next;
 169	}
 170
 171	if (lock) {
 172		spin_unlock(lock);
 173		set_bit(__QDISC_STATE_MISSED, &q->state);
 174	} else {
 175		__netif_schedule(q);
 176	}
 177}
 178
 179static void try_bulk_dequeue_skb(struct Qdisc *q,
 180				 struct sk_buff *skb,
 181				 const struct netdev_queue *txq,
 182				 int *packets)
 183{
 184	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
 185
 186	while (bytelimit > 0) {
 187		struct sk_buff *nskb = q->dequeue(q);
 188
 189		if (!nskb)
 190			break;
 191
 192		bytelimit -= nskb->len; /* covers GSO len */
 193		skb->next = nskb;
 194		skb = nskb;
 195		(*packets)++; /* GSO counts as one pkt */
 196	}
 197	skb_mark_not_on_list(skb);
 198}
 199
 200/* This variant of try_bulk_dequeue_skb() makes sure
 201 * all skbs in the chain are for the same txq
 202 */
 203static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 204				      struct sk_buff *skb,
 205				      int *packets)
 206{
 207	int mapping = skb_get_queue_mapping(skb);
 208	struct sk_buff *nskb;
 209	int cnt = 0;
 210
 211	do {
 212		nskb = q->dequeue(q);
 213		if (!nskb)
 214			break;
 215		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
 216			qdisc_enqueue_skb_bad_txq(q, nskb);
 217			break;
 218		}
 219		skb->next = nskb;
 220		skb = nskb;
 221	} while (++cnt < 8);
 222	(*packets) += cnt;
 223	skb_mark_not_on_list(skb);
 224}
 225
 226/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 227 * A requeued skb (via q->gso_skb) can also be a SKB list.
 228 */
 229static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 230				   int *packets)
 231{
 232	const struct netdev_queue *txq = q->dev_queue;
 233	struct sk_buff *skb = NULL;
 234
 235	*packets = 1;
 236	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
 237		spinlock_t *lock = NULL;
 238
 239		if (q->flags & TCQ_F_NOLOCK) {
 240			lock = qdisc_lock(q);
 241			spin_lock(lock);
 242		}
 243
 244		skb = skb_peek(&q->gso_skb);
 245
 246		/* skb may be null if another cpu pulls gso_skb off in between
 247		 * empty check and lock.
 248		 */
 249		if (!skb) {
 250			if (lock)
 251				spin_unlock(lock);
 252			goto validate;
 253		}
 254
 255		/* skb in gso_skb were already validated */
 256		*validate = false;
 257		if (xfrm_offload(skb))
 258			*validate = true;
 259		/* check the reason of requeuing without tx lock first */
 260		txq = skb_get_tx_queue(txq->dev, skb);
 261		if (!netif_xmit_frozen_or_stopped(txq)) {
 262			skb = __skb_dequeue(&q->gso_skb);
 263			if (qdisc_is_percpu_stats(q)) {
 264				qdisc_qstats_cpu_backlog_dec(q, skb);
 265				qdisc_qstats_cpu_qlen_dec(q);
 266			} else {
 267				qdisc_qstats_backlog_dec(q, skb);
 268				q->q.qlen--;
 269			}
 270		} else {
 271			skb = NULL;
 272			qdisc_maybe_clear_missed(q, txq);
 273		}
 274		if (lock)
 275			spin_unlock(lock);
 276		goto trace;
 277	}
 278validate:
 279	*validate = true;
 280
 281	if ((q->flags & TCQ_F_ONETXQUEUE) &&
 282	    netif_xmit_frozen_or_stopped(txq)) {
 283		qdisc_maybe_clear_missed(q, txq);
 284		return skb;
 285	}
 286
 287	skb = qdisc_dequeue_skb_bad_txq(q);
 288	if (unlikely(skb)) {
 289		if (skb == SKB_XOFF_MAGIC)
 290			return NULL;
 291		goto bulk;
 292	}
 293	skb = q->dequeue(q);
 294	if (skb) {
 295bulk:
 296		if (qdisc_may_bulk(q))
 297			try_bulk_dequeue_skb(q, skb, txq, packets);
 298		else
 299			try_bulk_dequeue_skb_slow(q, skb, packets);
 300	}
 301trace:
 302	trace_qdisc_dequeue(q, txq, *packets, skb);
 303	return skb;
 304}
 305
 306/*
 307 * Transmit possibly several skbs, and handle the return status as
 308 * required. Owning qdisc running bit guarantees that only one CPU
 309 * can execute this function.
 310 *
 311 * Returns to the caller:
 312 *				false  - hardware queue frozen backoff
 313 *				true   - feel free to send more pkts
 314 */
 315bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 316		     struct net_device *dev, struct netdev_queue *txq,
 317		     spinlock_t *root_lock, bool validate)
 318{
 319	int ret = NETDEV_TX_BUSY;
 320	bool again = false;
 321
 322	/* And release qdisc */
 323	if (root_lock)
 324		spin_unlock(root_lock);
 325
 326	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
 327	if (validate)
 328		skb = validate_xmit_skb_list(skb, dev, &again);
 329
 330#ifdef CONFIG_XFRM_OFFLOAD
 331	if (unlikely(again)) {
 332		if (root_lock)
 333			spin_lock(root_lock);
 334
 335		dev_requeue_skb(skb, q);
 336		return false;
 337	}
 338#endif
 339
 340	if (likely(skb)) {
 341		HARD_TX_LOCK(dev, txq, smp_processor_id());
 342		if (!netif_xmit_frozen_or_stopped(txq))
 343			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 344		else
 345			qdisc_maybe_clear_missed(q, txq);
 346
 347		HARD_TX_UNLOCK(dev, txq);
 348	} else {
 349		if (root_lock)
 350			spin_lock(root_lock);
 351		return true;
 352	}
 353
 354	if (root_lock)
 355		spin_lock(root_lock);
 356
 357	if (!dev_xmit_complete(ret)) {
 358		/* Driver returned NETDEV_TX_BUSY - requeue skb */
 359		if (unlikely(ret != NETDEV_TX_BUSY))
 360			net_warn_ratelimited("BUG %s code %d qlen %d\n",
 361					     dev->name, ret, q->q.qlen);
 362
 363		dev_requeue_skb(skb, q);
 364		return false;
 365	}
 366
 367	return true;
 368}
 369
 370/*
 371 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 372 *
 373 * running seqcount guarantees only one CPU can process
 374 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 375 * this queue.
 376 *
 377 *  netif_tx_lock serializes accesses to device driver.
 378 *
 379 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 380 *  if one is grabbed, another must be free.
 381 *
 382 * Note, that this procedure can be called by a watchdog timer
 383 *
 384 * Returns to the caller:
 385 *				0  - queue is empty or throttled.
 386 *				>0 - queue is not empty.
 387 *
 388 */
 389static inline bool qdisc_restart(struct Qdisc *q, int *packets)
 390{
 391	spinlock_t *root_lock = NULL;
 392	struct netdev_queue *txq;
 393	struct net_device *dev;
 394	struct sk_buff *skb;
 395	bool validate;
 396
 397	/* Dequeue packet */
 398	skb = dequeue_skb(q, &validate, packets);
 399	if (unlikely(!skb))
 400		return false;
 401
 402	if (!(q->flags & TCQ_F_NOLOCK))
 403		root_lock = qdisc_lock(q);
 404
 405	dev = qdisc_dev(q);
 406	txq = skb_get_tx_queue(dev, skb);
 407
 408	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
 409}
 410
 411void __qdisc_run(struct Qdisc *q)
 412{
 413	int quota = READ_ONCE(net_hotdata.dev_tx_weight);
 414	int packets;
 415
 416	while (qdisc_restart(q, &packets)) {
 417		quota -= packets;
 418		if (quota <= 0) {
 419			if (q->flags & TCQ_F_NOLOCK)
 420				set_bit(__QDISC_STATE_MISSED, &q->state);
 421			else
 422				__netif_schedule(q);
 423
 424			break;
 425		}
 426	}
 427}
 428
 429unsigned long dev_trans_start(struct net_device *dev)
 430{
 431	unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
 432	unsigned long val;
 433	unsigned int i;
 434
 435	for (i = 1; i < dev->num_tx_queues; i++) {
 436		val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
 437		if (val && time_after(val, res))
 438			res = val;
 439	}
 440
 441	return res;
 442}
 443EXPORT_SYMBOL(dev_trans_start);
 444
 445static void netif_freeze_queues(struct net_device *dev)
 446{
 447	unsigned int i;
 448	int cpu;
 449
 450	cpu = smp_processor_id();
 451	for (i = 0; i < dev->num_tx_queues; i++) {
 452		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 453
 454		/* We are the only thread of execution doing a
 455		 * freeze, but we have to grab the _xmit_lock in
 456		 * order to synchronize with threads which are in
 457		 * the ->hard_start_xmit() handler and already
 458		 * checked the frozen bit.
 459		 */
 460		__netif_tx_lock(txq, cpu);
 461		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
 462		__netif_tx_unlock(txq);
 463	}
 464}
 465
 466void netif_tx_lock(struct net_device *dev)
 467{
 468	spin_lock(&dev->tx_global_lock);
 469	netif_freeze_queues(dev);
 470}
 471EXPORT_SYMBOL(netif_tx_lock);
 472
 473static void netif_unfreeze_queues(struct net_device *dev)
 474{
 475	unsigned int i;
 476
 477	for (i = 0; i < dev->num_tx_queues; i++) {
 478		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 479
 480		/* No need to grab the _xmit_lock here.  If the
 481		 * queue is not stopped for another reason, we
 482		 * force a schedule.
 483		 */
 484		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
 485		netif_schedule_queue(txq);
 486	}
 487}
 488
 489void netif_tx_unlock(struct net_device *dev)
 490{
 491	netif_unfreeze_queues(dev);
 492	spin_unlock(&dev->tx_global_lock);
 493}
 494EXPORT_SYMBOL(netif_tx_unlock);
 495
 496static void dev_watchdog(struct timer_list *t)
 497{
 498	struct net_device *dev = from_timer(dev, t, watchdog_timer);
 499	bool release = true;
 500
 501	spin_lock(&dev->tx_global_lock);
 502	if (!qdisc_tx_is_noop(dev)) {
 503		if (netif_device_present(dev) &&
 504		    netif_running(dev) &&
 505		    netif_carrier_ok(dev)) {
 506			unsigned int timedout_ms = 0;
 507			unsigned int i;
 508			unsigned long trans_start;
 509			unsigned long oldest_start = jiffies;
 510
 511			for (i = 0; i < dev->num_tx_queues; i++) {
 512				struct netdev_queue *txq;
 513
 514				txq = netdev_get_tx_queue(dev, i);
 515				if (!netif_xmit_stopped(txq))
 516					continue;
 517
 518				/* Paired with WRITE_ONCE() + smp_mb...() in
 519				 * netdev_tx_sent_queue() and netif_tx_stop_queue().
 520				 */
 521				smp_mb();
 522				trans_start = READ_ONCE(txq->trans_start);
 523
 524				if (time_after(jiffies, trans_start + dev->watchdog_timeo)) {
 
 525					timedout_ms = jiffies_to_msecs(jiffies - trans_start);
 526					atomic_long_inc(&txq->trans_timeout);
 527					break;
 528				}
 529				if (time_after(oldest_start, trans_start))
 530					oldest_start = trans_start;
 531			}
 532
 533			if (unlikely(timedout_ms)) {
 534				trace_net_dev_xmit_timeout(dev, i);
 535				netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n",
 536					    raw_smp_processor_id(),
 537					    i, timedout_ms);
 538				netif_freeze_queues(dev);
 539				dev->netdev_ops->ndo_tx_timeout(dev, i);
 540				netif_unfreeze_queues(dev);
 541			}
 542			if (!mod_timer(&dev->watchdog_timer,
 543				       round_jiffies(oldest_start +
 544						     dev->watchdog_timeo)))
 545				release = false;
 546		}
 547	}
 548	spin_unlock(&dev->tx_global_lock);
 549
 550	if (release)
 551		netdev_put(dev, &dev->watchdog_dev_tracker);
 552}
 553
 554void __netdev_watchdog_up(struct net_device *dev)
 555{
 556	if (dev->netdev_ops->ndo_tx_timeout) {
 557		if (dev->watchdog_timeo <= 0)
 558			dev->watchdog_timeo = 5*HZ;
 559		if (!mod_timer(&dev->watchdog_timer,
 560			       round_jiffies(jiffies + dev->watchdog_timeo)))
 561			netdev_hold(dev, &dev->watchdog_dev_tracker,
 562				    GFP_ATOMIC);
 563	}
 564}
 565EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
 566
 567static void dev_watchdog_up(struct net_device *dev)
 568{
 569	__netdev_watchdog_up(dev);
 570}
 571
 572static void dev_watchdog_down(struct net_device *dev)
 573{
 574	netif_tx_lock_bh(dev);
 575	if (del_timer(&dev->watchdog_timer))
 576		netdev_put(dev, &dev->watchdog_dev_tracker);
 577	netif_tx_unlock_bh(dev);
 578}
 579
 580/**
 581 *	netif_carrier_on - set carrier
 582 *	@dev: network device
 583 *
 584 * Device has detected acquisition of carrier.
 585 */
 586void netif_carrier_on(struct net_device *dev)
 587{
 588	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 589		if (dev->reg_state == NETREG_UNINITIALIZED)
 590			return;
 591		atomic_inc(&dev->carrier_up_count);
 592		linkwatch_fire_event(dev);
 593		if (netif_running(dev))
 594			__netdev_watchdog_up(dev);
 595	}
 596}
 597EXPORT_SYMBOL(netif_carrier_on);
 598
 599/**
 600 *	netif_carrier_off - clear carrier
 601 *	@dev: network device
 602 *
 603 * Device has detected loss of carrier.
 604 */
 605void netif_carrier_off(struct net_device *dev)
 606{
 607	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 608		if (dev->reg_state == NETREG_UNINITIALIZED)
 609			return;
 610		atomic_inc(&dev->carrier_down_count);
 611		linkwatch_fire_event(dev);
 612	}
 613}
 614EXPORT_SYMBOL(netif_carrier_off);
 615
 616/**
 617 *	netif_carrier_event - report carrier state event
 618 *	@dev: network device
 619 *
 620 * Device has detected a carrier event but the carrier state wasn't changed.
 621 * Use in drivers when querying carrier state asynchronously, to avoid missing
 622 * events (link flaps) if link recovers before it's queried.
 623 */
 624void netif_carrier_event(struct net_device *dev)
 625{
 626	if (dev->reg_state == NETREG_UNINITIALIZED)
 627		return;
 628	atomic_inc(&dev->carrier_up_count);
 629	atomic_inc(&dev->carrier_down_count);
 630	linkwatch_fire_event(dev);
 631}
 632EXPORT_SYMBOL_GPL(netif_carrier_event);
 633
 634/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
 635   under all circumstances. It is difficult to invent anything faster or
 636   cheaper.
 637 */
 638
 639static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 640			struct sk_buff **to_free)
 641{
 642	dev_core_stats_tx_dropped_inc(skb->dev);
 643	__qdisc_drop(skb, to_free);
 644	return NET_XMIT_CN;
 645}
 646
 647static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
 648{
 649	return NULL;
 650}
 651
 652struct Qdisc_ops noop_qdisc_ops __read_mostly = {
 653	.id		=	"noop",
 654	.priv_size	=	0,
 655	.enqueue	=	noop_enqueue,
 656	.dequeue	=	noop_dequeue,
 657	.peek		=	noop_dequeue,
 658	.owner		=	THIS_MODULE,
 659};
 660
 661static struct netdev_queue noop_netdev_queue = {
 662	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
 663	RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
 664};
 665
 666struct Qdisc noop_qdisc = {
 667	.enqueue	=	noop_enqueue,
 668	.dequeue	=	noop_dequeue,
 669	.flags		=	TCQ_F_BUILTIN,
 670	.ops		=	&noop_qdisc_ops,
 671	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
 672	.dev_queue	=	&noop_netdev_queue,
 673	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
 674	.gso_skb = {
 675		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
 676		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
 677		.qlen = 0,
 678		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
 679	},
 680	.skb_bad_txq = {
 681		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 682		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 683		.qlen = 0,
 684		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
 685	},
 686	.owner = -1,
 687};
 688EXPORT_SYMBOL(noop_qdisc);
 689
 690static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
 691			struct netlink_ext_ack *extack)
 692{
 693	/* register_qdisc() assigns a default of noop_enqueue if unset,
 694	 * but __dev_queue_xmit() treats noqueue only as such
 695	 * if this is NULL - so clear it here. */
 696	qdisc->enqueue = NULL;
 697	return 0;
 698}
 699
 700struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
 701	.id		=	"noqueue",
 702	.priv_size	=	0,
 703	.init		=	noqueue_init,
 704	.enqueue	=	noop_enqueue,
 705	.dequeue	=	noop_dequeue,
 706	.peek		=	noop_dequeue,
 707	.owner		=	THIS_MODULE,
 708};
 709
 710const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = {
 711	1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1
 712};
 713EXPORT_SYMBOL(sch_default_prio2band);
 714
 715/* 3-band FIFO queue: old style, but should be a bit faster than
 716   generic prio+fifo combination.
 717 */
 718
 719#define PFIFO_FAST_BANDS 3
 720
 721/*
 722 * Private data for a pfifo_fast scheduler containing:
 723 *	- rings for priority bands
 724 */
 725struct pfifo_fast_priv {
 726	struct skb_array q[PFIFO_FAST_BANDS];
 727};
 728
 729static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
 730					  int band)
 731{
 732	return &priv->q[band];
 733}
 734
 735static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 736			      struct sk_buff **to_free)
 737{
 738	int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX];
 739	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 740	struct skb_array *q = band2list(priv, band);
 741	unsigned int pkt_len = qdisc_pkt_len(skb);
 742	int err;
 743
 744	err = skb_array_produce(q, skb);
 745
 746	if (unlikely(err)) {
 747		if (qdisc_is_percpu_stats(qdisc))
 748			return qdisc_drop_cpu(skb, qdisc, to_free);
 749		else
 750			return qdisc_drop(skb, qdisc, to_free);
 751	}
 752
 753	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
 754	return NET_XMIT_SUCCESS;
 755}
 756
 757static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 758{
 759	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 760	struct sk_buff *skb = NULL;
 761	bool need_retry = true;
 762	int band;
 763
 764retry:
 765	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 766		struct skb_array *q = band2list(priv, band);
 767
 768		if (__skb_array_empty(q))
 769			continue;
 770
 771		skb = __skb_array_consume(q);
 772	}
 773	if (likely(skb)) {
 774		qdisc_update_stats_at_dequeue(qdisc, skb);
 775	} else if (need_retry &&
 776		   READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
 777		/* Delay clearing the STATE_MISSED here to reduce
 778		 * the overhead of the second spin_trylock() in
 779		 * qdisc_run_begin() and __netif_schedule() calling
 780		 * in qdisc_run_end().
 781		 */
 782		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
 783		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
 784
 785		/* Make sure dequeuing happens after clearing
 786		 * STATE_MISSED.
 787		 */
 788		smp_mb__after_atomic();
 789
 790		need_retry = false;
 791
 792		goto retry;
 793	}
 794
 795	return skb;
 796}
 797
 798static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 799{
 800	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 801	struct sk_buff *skb = NULL;
 802	int band;
 803
 804	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 805		struct skb_array *q = band2list(priv, band);
 806
 807		skb = __skb_array_peek(q);
 808	}
 809
 810	return skb;
 811}
 812
 813static void pfifo_fast_reset(struct Qdisc *qdisc)
 814{
 815	int i, band;
 816	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 817
 818	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
 819		struct skb_array *q = band2list(priv, band);
 820		struct sk_buff *skb;
 821
 822		/* NULL ring is possible if destroy path is due to a failed
 823		 * skb_array_init() in pfifo_fast_init() case.
 824		 */
 825		if (!q->ring.queue)
 826			continue;
 827
 828		while ((skb = __skb_array_consume(q)) != NULL)
 829			kfree_skb(skb);
 830	}
 831
 832	if (qdisc_is_percpu_stats(qdisc)) {
 833		for_each_possible_cpu(i) {
 834			struct gnet_stats_queue *q;
 835
 836			q = per_cpu_ptr(qdisc->cpu_qstats, i);
 837			q->backlog = 0;
 838			q->qlen = 0;
 839		}
 840	}
 841}
 842
 843static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 844{
 845	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 846
 847	memcpy(&opt.priomap, sch_default_prio2band, TC_PRIO_MAX + 1);
 848	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
 849		goto nla_put_failure;
 850	return skb->len;
 851
 852nla_put_failure:
 853	return -1;
 854}
 855
 856static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
 857			   struct netlink_ext_ack *extack)
 858{
 859	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
 860	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 861	int prio;
 862
 863	/* guard against zero length rings */
 864	if (!qlen)
 865		return -EINVAL;
 866
 867	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 868		struct skb_array *q = band2list(priv, prio);
 869		int err;
 870
 871		err = skb_array_init(q, qlen, GFP_KERNEL);
 872		if (err)
 873			return -ENOMEM;
 874	}
 875
 876	/* Can by-pass the queue discipline */
 877	qdisc->flags |= TCQ_F_CAN_BYPASS;
 878	return 0;
 879}
 880
 881static void pfifo_fast_destroy(struct Qdisc *sch)
 882{
 883	struct pfifo_fast_priv *priv = qdisc_priv(sch);
 884	int prio;
 885
 886	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 887		struct skb_array *q = band2list(priv, prio);
 888
 889		/* NULL ring is possible if destroy path is due to a failed
 890		 * skb_array_init() in pfifo_fast_init() case.
 891		 */
 892		if (!q->ring.queue)
 893			continue;
 894		/* Destroy ring but no need to kfree_skb because a call to
 895		 * pfifo_fast_reset() has already done that work.
 896		 */
 897		ptr_ring_cleanup(&q->ring, NULL);
 898	}
 899}
 900
 901static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
 902					  unsigned int new_len)
 903{
 904	struct pfifo_fast_priv *priv = qdisc_priv(sch);
 905	struct skb_array *bands[PFIFO_FAST_BANDS];
 906	int prio;
 907
 908	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 909		struct skb_array *q = band2list(priv, prio);
 910
 911		bands[prio] = q;
 912	}
 913
 914	return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len,
 915					    GFP_KERNEL);
 916}
 917
 918struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 919	.id		=	"pfifo_fast",
 920	.priv_size	=	sizeof(struct pfifo_fast_priv),
 921	.enqueue	=	pfifo_fast_enqueue,
 922	.dequeue	=	pfifo_fast_dequeue,
 923	.peek		=	pfifo_fast_peek,
 924	.init		=	pfifo_fast_init,
 925	.destroy	=	pfifo_fast_destroy,
 926	.reset		=	pfifo_fast_reset,
 927	.dump		=	pfifo_fast_dump,
 928	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
 929	.owner		=	THIS_MODULE,
 930	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
 931};
 932EXPORT_SYMBOL(pfifo_fast_ops);
 933
 934static struct lock_class_key qdisc_tx_busylock;
 935
 936struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 937			  const struct Qdisc_ops *ops,
 938			  struct netlink_ext_ack *extack)
 939{
 940	struct Qdisc *sch;
 941	unsigned int size = sizeof(*sch) + ops->priv_size;
 942	int err = -ENOBUFS;
 943	struct net_device *dev;
 944
 945	if (!dev_queue) {
 946		NL_SET_ERR_MSG(extack, "No device queue given");
 947		err = -EINVAL;
 948		goto errout;
 949	}
 950
 951	dev = dev_queue->dev;
 952	sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
 953
 954	if (!sch)
 955		goto errout;
 956	__skb_queue_head_init(&sch->gso_skb);
 957	__skb_queue_head_init(&sch->skb_bad_txq);
 958	gnet_stats_basic_sync_init(&sch->bstats);
 959	lockdep_register_key(&sch->root_lock_key);
 960	spin_lock_init(&sch->q.lock);
 961	lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
 962
 963	if (ops->static_flags & TCQ_F_CPUSTATS) {
 964		sch->cpu_bstats =
 965			netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
 966		if (!sch->cpu_bstats)
 967			goto errout1;
 968
 969		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 970		if (!sch->cpu_qstats) {
 971			free_percpu(sch->cpu_bstats);
 972			goto errout1;
 973		}
 974	}
 975
 976	spin_lock_init(&sch->busylock);
 977	lockdep_set_class(&sch->busylock,
 978			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 979
 980	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
 981	spin_lock_init(&sch->seqlock);
 982	lockdep_set_class(&sch->seqlock,
 983			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 984
 985	sch->ops = ops;
 986	sch->flags = ops->static_flags;
 987	sch->enqueue = ops->enqueue;
 988	sch->dequeue = ops->dequeue;
 989	sch->dev_queue = dev_queue;
 990	sch->owner = -1;
 991	netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
 992	refcount_set(&sch->refcnt, 1);
 993
 994	return sch;
 995errout1:
 996	lockdep_unregister_key(&sch->root_lock_key);
 997	kfree(sch);
 998errout:
 999	return ERR_PTR(err);
1000}
1001
1002struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
1003				const struct Qdisc_ops *ops,
1004				unsigned int parentid,
1005				struct netlink_ext_ack *extack)
1006{
1007	struct Qdisc *sch;
1008
1009	if (!try_module_get(ops->owner)) {
1010		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
1011		return NULL;
1012	}
1013
1014	sch = qdisc_alloc(dev_queue, ops, extack);
1015	if (IS_ERR(sch)) {
1016		module_put(ops->owner);
1017		return NULL;
1018	}
1019	sch->parent = parentid;
1020
1021	if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1022		trace_qdisc_create(ops, dev_queue->dev, parentid);
1023		return sch;
1024	}
1025
1026	qdisc_put(sch);
1027	return NULL;
1028}
1029EXPORT_SYMBOL(qdisc_create_dflt);
1030
1031/* Under qdisc_lock(qdisc) and BH! */
1032
1033void qdisc_reset(struct Qdisc *qdisc)
1034{
1035	const struct Qdisc_ops *ops = qdisc->ops;
1036
1037	trace_qdisc_reset(qdisc);
1038
1039	if (ops->reset)
1040		ops->reset(qdisc);
1041
1042	__skb_queue_purge(&qdisc->gso_skb);
1043	__skb_queue_purge(&qdisc->skb_bad_txq);
1044
1045	qdisc->q.qlen = 0;
1046	qdisc->qstats.backlog = 0;
1047}
1048EXPORT_SYMBOL(qdisc_reset);
1049
1050void qdisc_free(struct Qdisc *qdisc)
1051{
1052	if (qdisc_is_percpu_stats(qdisc)) {
1053		free_percpu(qdisc->cpu_bstats);
1054		free_percpu(qdisc->cpu_qstats);
1055	}
1056
1057	kfree(qdisc);
1058}
1059
1060static void qdisc_free_cb(struct rcu_head *head)
1061{
1062	struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1063
1064	qdisc_free(q);
1065}
1066
1067static void __qdisc_destroy(struct Qdisc *qdisc)
1068{
1069	const struct Qdisc_ops  *ops = qdisc->ops;
1070	struct net_device *dev = qdisc_dev(qdisc);
1071
1072#ifdef CONFIG_NET_SCHED
1073	qdisc_hash_del(qdisc);
1074
1075	qdisc_put_stab(rtnl_dereference(qdisc->stab));
1076#endif
1077	gen_kill_estimator(&qdisc->rate_est);
1078
1079	qdisc_reset(qdisc);
1080
1081
1082	if (ops->destroy)
1083		ops->destroy(qdisc);
1084
1085	lockdep_unregister_key(&qdisc->root_lock_key);
1086	module_put(ops->owner);
1087	netdev_put(dev, &qdisc->dev_tracker);
1088
1089	trace_qdisc_destroy(qdisc);
1090
1091	call_rcu(&qdisc->rcu, qdisc_free_cb);
1092}
1093
1094void qdisc_destroy(struct Qdisc *qdisc)
1095{
1096	if (qdisc->flags & TCQ_F_BUILTIN)
1097		return;
1098
1099	__qdisc_destroy(qdisc);
1100}
1101
1102void qdisc_put(struct Qdisc *qdisc)
1103{
1104	if (!qdisc)
1105		return;
1106
1107	if (qdisc->flags & TCQ_F_BUILTIN ||
1108	    !refcount_dec_and_test(&qdisc->refcnt))
1109		return;
1110
1111	__qdisc_destroy(qdisc);
1112}
1113EXPORT_SYMBOL(qdisc_put);
1114
1115/* Version of qdisc_put() that is called with rtnl mutex unlocked.
1116 * Intended to be used as optimization, this function only takes rtnl lock if
1117 * qdisc reference counter reached zero.
1118 */
1119
1120void qdisc_put_unlocked(struct Qdisc *qdisc)
1121{
1122	if (qdisc->flags & TCQ_F_BUILTIN ||
1123	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1124		return;
1125
1126	__qdisc_destroy(qdisc);
1127	rtnl_unlock();
1128}
1129EXPORT_SYMBOL(qdisc_put_unlocked);
1130
1131/* Attach toplevel qdisc to device queue. */
1132struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1133			      struct Qdisc *qdisc)
1134{
1135	struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1136	spinlock_t *root_lock;
1137
1138	root_lock = qdisc_lock(oqdisc);
1139	spin_lock_bh(root_lock);
1140
1141	/* ... and graft new one */
1142	if (qdisc == NULL)
1143		qdisc = &noop_qdisc;
1144	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1145	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1146
1147	spin_unlock_bh(root_lock);
1148
1149	return oqdisc;
1150}
1151EXPORT_SYMBOL(dev_graft_qdisc);
1152
1153static void shutdown_scheduler_queue(struct net_device *dev,
1154				     struct netdev_queue *dev_queue,
1155				     void *_qdisc_default)
1156{
1157	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1158	struct Qdisc *qdisc_default = _qdisc_default;
1159
1160	if (qdisc) {
1161		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1162		rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
1163
1164		qdisc_put(qdisc);
1165	}
1166}
1167
1168static void attach_one_default_qdisc(struct net_device *dev,
1169				     struct netdev_queue *dev_queue,
1170				     void *_unused)
1171{
1172	struct Qdisc *qdisc;
1173	const struct Qdisc_ops *ops = default_qdisc_ops;
1174
1175	if (dev->priv_flags & IFF_NO_QUEUE)
1176		ops = &noqueue_qdisc_ops;
1177	else if(dev->type == ARPHRD_CAN)
1178		ops = &pfifo_fast_ops;
1179
1180	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1181	if (!qdisc)
1182		return;
1183
1184	if (!netif_is_multiqueue(dev))
1185		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1186	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1187}
1188
1189static void attach_default_qdiscs(struct net_device *dev)
1190{
1191	struct netdev_queue *txq;
1192	struct Qdisc *qdisc;
1193
1194	txq = netdev_get_tx_queue(dev, 0);
1195
1196	if (!netif_is_multiqueue(dev) ||
1197	    dev->priv_flags & IFF_NO_QUEUE) {
1198		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1199		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1200		rcu_assign_pointer(dev->qdisc, qdisc);
1201		qdisc_refcount_inc(qdisc);
1202	} else {
1203		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1204		if (qdisc) {
1205			rcu_assign_pointer(dev->qdisc, qdisc);
1206			qdisc->ops->attach(qdisc);
1207		}
1208	}
1209	qdisc = rtnl_dereference(dev->qdisc);
1210
1211	/* Detect default qdisc setup/init failed and fallback to "noqueue" */
1212	if (qdisc == &noop_qdisc) {
1213		netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1214			    default_qdisc_ops->id, noqueue_qdisc_ops.id);
1215		netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1216		dev->priv_flags |= IFF_NO_QUEUE;
1217		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1218		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1219		rcu_assign_pointer(dev->qdisc, qdisc);
1220		qdisc_refcount_inc(qdisc);
1221		dev->priv_flags ^= IFF_NO_QUEUE;
1222	}
1223
1224#ifdef CONFIG_NET_SCHED
1225	if (qdisc != &noop_qdisc)
1226		qdisc_hash_add(qdisc, false);
1227#endif
1228}
1229
1230static void transition_one_qdisc(struct net_device *dev,
1231				 struct netdev_queue *dev_queue,
1232				 void *_need_watchdog)
1233{
1234	struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1235	int *need_watchdog_p = _need_watchdog;
1236
1237	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1238		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1239
1240	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1241	if (need_watchdog_p) {
1242		WRITE_ONCE(dev_queue->trans_start, 0);
1243		*need_watchdog_p = 1;
1244	}
1245}
1246
1247void dev_activate(struct net_device *dev)
1248{
1249	int need_watchdog;
1250
1251	/* No queueing discipline is attached to device;
1252	 * create default one for devices, which need queueing
1253	 * and noqueue_qdisc for virtual interfaces
1254	 */
1255
1256	if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
1257		attach_default_qdiscs(dev);
1258
1259	if (!netif_carrier_ok(dev))
1260		/* Delay activation until next carrier-on event */
1261		return;
1262
1263	need_watchdog = 0;
1264	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1265	if (dev_ingress_queue(dev))
1266		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1267
1268	if (need_watchdog) {
1269		netif_trans_update(dev);
1270		dev_watchdog_up(dev);
1271	}
1272}
1273EXPORT_SYMBOL(dev_activate);
1274
1275static void qdisc_deactivate(struct Qdisc *qdisc)
1276{
1277	if (qdisc->flags & TCQ_F_BUILTIN)
1278		return;
1279
1280	set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1281}
1282
1283static void dev_deactivate_queue(struct net_device *dev,
1284				 struct netdev_queue *dev_queue,
1285				 void *_qdisc_default)
1286{
1287	struct Qdisc *qdisc_default = _qdisc_default;
1288	struct Qdisc *qdisc;
1289
1290	qdisc = rtnl_dereference(dev_queue->qdisc);
1291	if (qdisc) {
1292		qdisc_deactivate(qdisc);
1293		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1294	}
1295}
1296
1297static void dev_reset_queue(struct net_device *dev,
1298			    struct netdev_queue *dev_queue,
1299			    void *_unused)
1300{
1301	struct Qdisc *qdisc;
1302	bool nolock;
1303
1304	qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1305	if (!qdisc)
1306		return;
1307
1308	nolock = qdisc->flags & TCQ_F_NOLOCK;
1309
1310	if (nolock)
1311		spin_lock_bh(&qdisc->seqlock);
1312	spin_lock_bh(qdisc_lock(qdisc));
1313
1314	qdisc_reset(qdisc);
1315
1316	spin_unlock_bh(qdisc_lock(qdisc));
1317	if (nolock) {
1318		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
1319		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
1320		spin_unlock_bh(&qdisc->seqlock);
1321	}
1322}
1323
1324static bool some_qdisc_is_busy(struct net_device *dev)
1325{
1326	unsigned int i;
1327
1328	for (i = 0; i < dev->num_tx_queues; i++) {
1329		struct netdev_queue *dev_queue;
1330		spinlock_t *root_lock;
1331		struct Qdisc *q;
1332		int val;
1333
1334		dev_queue = netdev_get_tx_queue(dev, i);
1335		q = rtnl_dereference(dev_queue->qdisc_sleeping);
1336
1337		root_lock = qdisc_lock(q);
1338		spin_lock_bh(root_lock);
1339
1340		val = (qdisc_is_running(q) ||
1341		       test_bit(__QDISC_STATE_SCHED, &q->state));
1342
1343		spin_unlock_bh(root_lock);
1344
1345		if (val)
1346			return true;
1347	}
1348	return false;
1349}
1350
1351/**
1352 * 	dev_deactivate_many - deactivate transmissions on several devices
1353 * 	@head: list of devices to deactivate
1354 *
1355 *	This function returns only when all outstanding transmissions
1356 *	have completed, unless all devices are in dismantle phase.
1357 */
1358void dev_deactivate_many(struct list_head *head)
1359{
1360	struct net_device *dev;
1361
1362	list_for_each_entry(dev, head, close_list) {
1363		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1364					 &noop_qdisc);
1365		if (dev_ingress_queue(dev))
1366			dev_deactivate_queue(dev, dev_ingress_queue(dev),
1367					     &noop_qdisc);
1368
1369		dev_watchdog_down(dev);
1370	}
1371
1372	/* Wait for outstanding qdisc-less dev_queue_xmit calls or
1373	 * outstanding qdisc enqueuing calls.
1374	 * This is avoided if all devices are in dismantle phase :
1375	 * Caller will call synchronize_net() for us
1376	 */
1377	synchronize_net();
1378
1379	list_for_each_entry(dev, head, close_list) {
1380		netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1381
1382		if (dev_ingress_queue(dev))
1383			dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1384	}
1385
1386	/* Wait for outstanding qdisc_run calls. */
1387	list_for_each_entry(dev, head, close_list) {
1388		while (some_qdisc_is_busy(dev)) {
1389			/* wait_event() would avoid this sleep-loop but would
1390			 * require expensive checks in the fast paths of packet
1391			 * processing which isn't worth it.
1392			 */
1393			schedule_timeout_uninterruptible(1);
1394		}
1395	}
1396}
1397
1398void dev_deactivate(struct net_device *dev)
1399{
1400	LIST_HEAD(single);
1401
1402	list_add(&dev->close_list, &single);
1403	dev_deactivate_many(&single);
1404	list_del(&single);
1405}
1406EXPORT_SYMBOL(dev_deactivate);
1407
1408static int qdisc_change_tx_queue_len(struct net_device *dev,
1409				     struct netdev_queue *dev_queue)
1410{
1411	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1412	const struct Qdisc_ops *ops = qdisc->ops;
1413
1414	if (ops->change_tx_queue_len)
1415		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1416	return 0;
1417}
1418
1419void dev_qdisc_change_real_num_tx(struct net_device *dev,
1420				  unsigned int new_real_tx)
1421{
1422	struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
1423
1424	if (qdisc->ops->change_real_num_tx)
1425		qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1426}
1427
1428void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1429{
1430#ifdef CONFIG_NET_SCHED
1431	struct net_device *dev = qdisc_dev(sch);
1432	struct Qdisc *qdisc;
1433	unsigned int i;
1434
1435	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
1436		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1437		/* Only update the default qdiscs we created,
1438		 * qdiscs with handles are always hashed.
1439		 */
1440		if (qdisc != &noop_qdisc && !qdisc->handle)
1441			qdisc_hash_del(qdisc);
1442	}
1443	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
1444		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1445		if (qdisc != &noop_qdisc && !qdisc->handle)
1446			qdisc_hash_add(qdisc, false);
1447	}
1448#endif
1449}
1450EXPORT_SYMBOL(mq_change_real_num_tx);
1451
1452int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1453{
1454	bool up = dev->flags & IFF_UP;
1455	unsigned int i;
1456	int ret = 0;
1457
1458	if (up)
1459		dev_deactivate(dev);
1460
1461	for (i = 0; i < dev->num_tx_queues; i++) {
1462		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1463
1464		/* TODO: revert changes on a partial failure */
1465		if (ret)
1466			break;
1467	}
1468
1469	if (up)
1470		dev_activate(dev);
1471	return ret;
1472}
1473
1474static void dev_init_scheduler_queue(struct net_device *dev,
1475				     struct netdev_queue *dev_queue,
1476				     void *_qdisc)
1477{
1478	struct Qdisc *qdisc = _qdisc;
1479
1480	rcu_assign_pointer(dev_queue->qdisc, qdisc);
1481	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1482}
1483
1484void dev_init_scheduler(struct net_device *dev)
1485{
1486	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1487	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1488	if (dev_ingress_queue(dev))
1489		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1490
1491	timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1492}
1493
1494void dev_shutdown(struct net_device *dev)
1495{
1496	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1497	if (dev_ingress_queue(dev))
1498		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1499	qdisc_put(rtnl_dereference(dev->qdisc));
1500	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1501
1502	WARN_ON(timer_pending(&dev->watchdog_timer));
1503}
1504
1505/**
1506 * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1507 * @rate:   Rate to compute reciprocal division values of
1508 * @mult:   Multiplier for reciprocal division
1509 * @shift:  Shift for reciprocal division
1510 *
1511 * The multiplier and shift for reciprocal division by rate are stored
1512 * in mult and shift.
1513 *
1514 * The deal here is to replace a divide by a reciprocal one
1515 * in fast path (a reciprocal divide is a multiply and a shift)
1516 *
1517 * Normal formula would be :
1518 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1519 *
1520 * We compute mult/shift to use instead :
1521 *  time_in_ns = (len * mult) >> shift;
1522 *
1523 * We try to get the highest possible mult value for accuracy,
1524 * but have to make sure no overflows will ever happen.
1525 *
1526 * reciprocal_value() is not used here it doesn't handle 64-bit values.
1527 */
1528static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1529{
1530	u64 factor = NSEC_PER_SEC;
1531
1532	*mult = 1;
1533	*shift = 0;
1534
1535	if (rate <= 0)
1536		return;
1537
1538	for (;;) {
1539		*mult = div64_u64(factor, rate);
1540		if (*mult & (1U << 31) || factor & (1ULL << 63))
1541			break;
1542		factor <<= 1;
1543		(*shift)++;
1544	}
1545}
1546
1547void psched_ratecfg_precompute(struct psched_ratecfg *r,
1548			       const struct tc_ratespec *conf,
1549			       u64 rate64)
1550{
1551	memset(r, 0, sizeof(*r));
1552	r->overhead = conf->overhead;
1553	r->mpu = conf->mpu;
1554	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1555	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1556	psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
1557}
1558EXPORT_SYMBOL(psched_ratecfg_precompute);
1559
1560void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1561{
1562	r->rate_pkts_ps = pktrate64;
1563	psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1564}
1565EXPORT_SYMBOL(psched_ppscfg_precompute);
1566
1567void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1568			  struct tcf_proto *tp_head)
1569{
1570	/* Protected with chain0->filter_chain_lock.
1571	 * Can't access chain directly because tp_head can be NULL.
1572	 */
1573	struct mini_Qdisc *miniq_old =
1574		rcu_dereference_protected(*miniqp->p_miniq, 1);
1575	struct mini_Qdisc *miniq;
1576
1577	if (!tp_head) {
1578		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1579	} else {
1580		miniq = miniq_old != &miniqp->miniq1 ?
1581			&miniqp->miniq1 : &miniqp->miniq2;
1582
1583		/* We need to make sure that readers won't see the miniq
1584		 * we are about to modify. So ensure that at least one RCU
1585		 * grace period has elapsed since the miniq was made
1586		 * inactive.
1587		 */
1588		if (IS_ENABLED(CONFIG_PREEMPT_RT))
1589			cond_synchronize_rcu(miniq->rcu_state);
1590		else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1591			synchronize_rcu_expedited();
1592
1593		miniq->filter_list = tp_head;
1594		rcu_assign_pointer(*miniqp->p_miniq, miniq);
1595	}
1596
1597	if (miniq_old)
1598		/* This is counterpart of the rcu sync above. We need to
1599		 * block potential new user of miniq_old until all readers
1600		 * are not seeing it.
1601		 */
1602		miniq_old->rcu_state = start_poll_synchronize_rcu();
1603}
1604EXPORT_SYMBOL(mini_qdisc_pair_swap);
1605
1606void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1607				struct tcf_block *block)
1608{
1609	miniqp->miniq1.block = block;
1610	miniqp->miniq2.block = block;
1611}
1612EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1613
1614void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1615			  struct mini_Qdisc __rcu **p_miniq)
1616{
1617	miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1618	miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1619	miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1620	miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1621	miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1622	miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
1623	miniqp->p_miniq = p_miniq;
1624}
1625EXPORT_SYMBOL(mini_qdisc_pair_init);