Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	linux/kernel/softirq.c
   4 *
   5 *	Copyright (C) 1992 Linus Torvalds
   6 *
 
 
   7 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/export.h>
  13#include <linux/kernel_stat.h>
  14#include <linux/interrupt.h>
  15#include <linux/init.h>
  16#include <linux/local_lock.h>
  17#include <linux/mm.h>
  18#include <linux/notifier.h>
  19#include <linux/percpu.h>
  20#include <linux/cpu.h>
  21#include <linux/freezer.h>
  22#include <linux/kthread.h>
  23#include <linux/rcupdate.h>
  24#include <linux/ftrace.h>
  25#include <linux/smp.h>
  26#include <linux/smpboot.h>
  27#include <linux/tick.h>
  28#include <linux/irq.h>
  29#include <linux/wait_bit.h>
  30
  31#include <asm/softirq_stack.h>
  32
  33#define CREATE_TRACE_POINTS
  34#include <trace/events/irq.h>
  35
  36/*
  37   - No shared variables, all the data are CPU local.
  38   - If a softirq needs serialization, let it serialize itself
  39     by its own spinlocks.
  40   - Even if softirq is serialized, only local cpu is marked for
  41     execution. Hence, we get something sort of weak cpu binding.
  42     Though it is still not clear, will it result in better locality
  43     or will not.
  44
  45   Examples:
  46   - NET RX softirq. It is multithreaded and does not require
  47     any global serialization.
  48   - NET TX softirq. It kicks software netdevice queues, hence
  49     it is logically serialized per device, but this serialization
  50     is invisible to common code.
  51   - Tasklets: serialized wrt itself.
  52 */
  53
  54#ifndef __ARCH_IRQ_STAT
  55DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
  56EXPORT_PER_CPU_SYMBOL(irq_stat);
  57#endif
  58
  59static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
  60
  61DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
  62
  63const char * const softirq_to_name[NR_SOFTIRQS] = {
  64	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
  65	"TASKLET", "SCHED", "HRTIMER", "RCU"
  66};
  67
  68/*
  69 * we cannot loop indefinitely here to avoid userspace starvation,
  70 * but we also don't want to introduce a worst case 1/HZ latency
  71 * to the pending events, so lets the scheduler to balance
  72 * the softirq load for us.
  73 */
  74static void wakeup_softirqd(void)
  75{
  76	/* Interrupts are disabled: no need to stop preemption */
  77	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
  78
  79	if (tsk)
  80		wake_up_process(tsk);
  81}
  82
  83/*
  84 * If ksoftirqd is scheduled, we do not want to process pending softirqs
  85 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
  86 * unless we're doing some of the synchronous softirqs.
  87 */
  88#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
  89static bool ksoftirqd_running(unsigned long pending)
  90{
  91	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
  92
  93	if (pending & SOFTIRQ_NOW_MASK)
  94		return false;
  95	return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
  96}
  97
  98#ifdef CONFIG_TRACE_IRQFLAGS
  99DEFINE_PER_CPU(int, hardirqs_enabled);
 100DEFINE_PER_CPU(int, hardirq_context);
 101EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
 102EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
 103#endif
 104
 105/*
 106 * SOFTIRQ_OFFSET usage:
 107 *
 108 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
 109 * to a per CPU counter and to task::softirqs_disabled_cnt.
 110 *
 111 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
 112 *   processing.
 113 *
 114 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 115 *   on local_bh_disable or local_bh_enable.
 116 *
 117 * This lets us distinguish between whether we are currently processing
 118 * softirq and whether we just have bh disabled.
 119 */
 120#ifdef CONFIG_PREEMPT_RT
 121
 122/*
 123 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
 124 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
 125 * softirq disabled section to be preempted.
 126 *
 127 * The per task counter is used for softirq_count(), in_softirq() and
 128 * in_serving_softirqs() because these counts are only valid when the task
 129 * holding softirq_ctrl::lock is running.
 130 *
 131 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
 132 * the task which is in a softirq disabled section is preempted or blocks.
 133 */
 134struct softirq_ctrl {
 135	local_lock_t	lock;
 136	int		cnt;
 137};
 138
 139static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
 140	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
 141};
 142
 143/**
 144 * local_bh_blocked() - Check for idle whether BH processing is blocked
 145 *
 146 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
 147 *
 148 * This is invoked from the idle task to guard against false positive
 149 * softirq pending warnings, which would happen when the task which holds
 150 * softirq_ctrl::lock was the only running task on the CPU and blocks on
 151 * some other lock.
 152 */
 153bool local_bh_blocked(void)
 154{
 155	return __this_cpu_read(softirq_ctrl.cnt) != 0;
 156}
 157
 158void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 159{
 160	unsigned long flags;
 161	int newcnt;
 162
 163	WARN_ON_ONCE(in_hardirq());
 164
 165	/* First entry of a task into a BH disabled section? */
 166	if (!current->softirq_disable_cnt) {
 167		if (preemptible()) {
 168			local_lock(&softirq_ctrl.lock);
 169			/* Required to meet the RCU bottomhalf requirements. */
 170			rcu_read_lock();
 171		} else {
 172			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
 173		}
 174	}
 175
 176	/*
 177	 * Track the per CPU softirq disabled state. On RT this is per CPU
 178	 * state to allow preemption of bottom half disabled sections.
 179	 */
 180	newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
 181	/*
 182	 * Reflect the result in the task state to prevent recursion on the
 183	 * local lock and to make softirq_count() & al work.
 184	 */
 185	current->softirq_disable_cnt = newcnt;
 186
 187	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
 188		raw_local_irq_save(flags);
 189		lockdep_softirqs_off(ip);
 190		raw_local_irq_restore(flags);
 191	}
 192}
 193EXPORT_SYMBOL(__local_bh_disable_ip);
 194
 195static void __local_bh_enable(unsigned int cnt, bool unlock)
 196{
 197	unsigned long flags;
 198	int newcnt;
 199
 200	DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
 201			    this_cpu_read(softirq_ctrl.cnt));
 202
 203	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
 204		raw_local_irq_save(flags);
 205		lockdep_softirqs_on(_RET_IP_);
 206		raw_local_irq_restore(flags);
 207	}
 208
 209	newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
 210	current->softirq_disable_cnt = newcnt;
 211
 212	if (!newcnt && unlock) {
 213		rcu_read_unlock();
 214		local_unlock(&softirq_ctrl.lock);
 215	}
 216}
 217
 218void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 219{
 220	bool preempt_on = preemptible();
 221	unsigned long flags;
 222	u32 pending;
 223	int curcnt;
 224
 225	WARN_ON_ONCE(in_hardirq());
 226	lockdep_assert_irqs_enabled();
 227
 228	local_irq_save(flags);
 229	curcnt = __this_cpu_read(softirq_ctrl.cnt);
 230
 231	/*
 232	 * If this is not reenabling soft interrupts, no point in trying to
 233	 * run pending ones.
 234	 */
 235	if (curcnt != cnt)
 236		goto out;
 237
 238	pending = local_softirq_pending();
 239	if (!pending || ksoftirqd_running(pending))
 240		goto out;
 241
 242	/*
 243	 * If this was called from non preemptible context, wake up the
 244	 * softirq daemon.
 245	 */
 246	if (!preempt_on) {
 247		wakeup_softirqd();
 248		goto out;
 249	}
 250
 251	/*
 252	 * Adjust softirq count to SOFTIRQ_OFFSET which makes
 253	 * in_serving_softirq() become true.
 254	 */
 255	cnt = SOFTIRQ_OFFSET;
 256	__local_bh_enable(cnt, false);
 257	__do_softirq();
 258
 259out:
 260	__local_bh_enable(cnt, preempt_on);
 261	local_irq_restore(flags);
 262}
 263EXPORT_SYMBOL(__local_bh_enable_ip);
 264
 265/*
 266 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
 267 * to acquire the per CPU local lock for reentrancy protection.
 268 */
 269static inline void ksoftirqd_run_begin(void)
 270{
 271	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
 272	local_irq_disable();
 273}
 274
 275/* Counterpart to ksoftirqd_run_begin() */
 276static inline void ksoftirqd_run_end(void)
 277{
 278	__local_bh_enable(SOFTIRQ_OFFSET, true);
 279	WARN_ON_ONCE(in_interrupt());
 280	local_irq_enable();
 281}
 282
 283static inline void softirq_handle_begin(void) { }
 284static inline void softirq_handle_end(void) { }
 285
 286static inline bool should_wake_ksoftirqd(void)
 287{
 288	return !this_cpu_read(softirq_ctrl.cnt);
 289}
 290
 291static inline void invoke_softirq(void)
 292{
 293	if (should_wake_ksoftirqd())
 294		wakeup_softirqd();
 295}
 296
 297/*
 298 * flush_smp_call_function_queue() can raise a soft interrupt in a function
 299 * call. On RT kernels this is undesired and the only known functionality
 300 * in the block layer which does this is disabled on RT. If soft interrupts
 301 * get raised which haven't been raised before the flush, warn so it can be
 302 * investigated.
 303 */
 304void do_softirq_post_smp_call_flush(unsigned int was_pending)
 305{
 306	if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
 307		invoke_softirq();
 308}
 309
 310#else /* CONFIG_PREEMPT_RT */
 311
 312/*
 313 * This one is for softirq.c-internal use, where hardirqs are disabled
 314 * legitimately:
 315 */
 316#ifdef CONFIG_TRACE_IRQFLAGS
 317void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 318{
 319	unsigned long flags;
 320
 321	WARN_ON_ONCE(in_hardirq());
 322
 323	raw_local_irq_save(flags);
 324	/*
 325	 * The preempt tracer hooks into preempt_count_add and will break
 326	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
 327	 * is set and before current->softirq_enabled is cleared.
 328	 * We must manually increment preempt_count here and manually
 329	 * call the trace_preempt_off later.
 330	 */
 331	__preempt_count_add(cnt);
 332	/*
 333	 * Were softirqs turned off above:
 334	 */
 335	if (softirq_count() == (cnt & SOFTIRQ_MASK))
 336		lockdep_softirqs_off(ip);
 337	raw_local_irq_restore(flags);
 338
 339	if (preempt_count() == cnt) {
 340#ifdef CONFIG_DEBUG_PREEMPT
 341		current->preempt_disable_ip = get_lock_parent_ip();
 342#endif
 343		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
 344	}
 345}
 346EXPORT_SYMBOL(__local_bh_disable_ip);
 347#endif /* CONFIG_TRACE_IRQFLAGS */
 348
 349static void __local_bh_enable(unsigned int cnt)
 350{
 351	lockdep_assert_irqs_disabled();
 352
 353	if (preempt_count() == cnt)
 354		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
 355
 356	if (softirq_count() == (cnt & SOFTIRQ_MASK))
 357		lockdep_softirqs_on(_RET_IP_);
 358
 359	__preempt_count_sub(cnt);
 360}
 361
 362/*
 363 * Special-case - softirqs can safely be enabled by __do_softirq(),
 
 364 * without processing still-pending softirqs:
 365 */
 366void _local_bh_enable(void)
 367{
 368	WARN_ON_ONCE(in_hardirq());
 369	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
 370}
 371EXPORT_SYMBOL(_local_bh_enable);
 372
 373void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 374{
 375	WARN_ON_ONCE(in_hardirq());
 376	lockdep_assert_irqs_enabled();
 377#ifdef CONFIG_TRACE_IRQFLAGS
 378	local_irq_disable();
 379#endif
 380	/*
 381	 * Are softirqs going to be turned on now:
 382	 */
 383	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
 384		lockdep_softirqs_on(ip);
 385	/*
 386	 * Keep preemption disabled until we are done with
 387	 * softirq processing:
 388	 */
 389	__preempt_count_sub(cnt - 1);
 390
 391	if (unlikely(!in_interrupt() && local_softirq_pending())) {
 392		/*
 393		 * Run softirq if any pending. And do it in its own stack
 394		 * as we may be calling this deep in a task call stack already.
 395		 */
 396		do_softirq();
 397	}
 398
 399	preempt_count_dec();
 400#ifdef CONFIG_TRACE_IRQFLAGS
 401	local_irq_enable();
 402#endif
 403	preempt_check_resched();
 404}
 405EXPORT_SYMBOL(__local_bh_enable_ip);
 406
 407static inline void softirq_handle_begin(void)
 408{
 409	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
 410}
 411
 412static inline void softirq_handle_end(void)
 413{
 414	__local_bh_enable(SOFTIRQ_OFFSET);
 415	WARN_ON_ONCE(in_interrupt());
 416}
 417
 418static inline void ksoftirqd_run_begin(void)
 419{
 420	local_irq_disable();
 421}
 422
 423static inline void ksoftirqd_run_end(void)
 424{
 425	local_irq_enable();
 426}
 427
 428static inline bool should_wake_ksoftirqd(void)
 429{
 430	return true;
 431}
 432
 433static inline void invoke_softirq(void)
 434{
 435	if (ksoftirqd_running(local_softirq_pending()))
 436		return;
 437
 438	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
 439#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 440		/*
 441		 * We can safely execute softirq on the current stack if
 442		 * it is the irq stack, because it should be near empty
 443		 * at this stage.
 444		 */
 445		__do_softirq();
 446#else
 447		/*
 448		 * Otherwise, irq_exit() is called on the task stack that can
 449		 * be potentially deep already. So call softirq in its own stack
 450		 * to prevent from any overrun.
 451		 */
 452		do_softirq_own_stack();
 453#endif
 454	} else {
 455		wakeup_softirqd();
 456	}
 457}
 458
 459asmlinkage __visible void do_softirq(void)
 460{
 461	__u32 pending;
 462	unsigned long flags;
 463
 464	if (in_interrupt())
 465		return;
 466
 467	local_irq_save(flags);
 468
 469	pending = local_softirq_pending();
 470
 471	if (pending && !ksoftirqd_running(pending))
 472		do_softirq_own_stack();
 473
 474	local_irq_restore(flags);
 475}
 476
 477#endif /* !CONFIG_PREEMPT_RT */
 478
 479/*
 480 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
 481 * but break the loop if need_resched() is set or after 2 ms.
 482 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
 483 * certain cases, such as stop_machine(), jiffies may cease to
 484 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
 485 * well to make sure we eventually return from this method.
 486 *
 487 * These limits have been established via experimentation.
 488 * The two things to balance is latency against fairness -
 489 * we want to handle softirqs as soon as possible, but they
 490 * should not be able to lock up the box.
 491 */
 492#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
 493#define MAX_SOFTIRQ_RESTART 10
 494
 495#ifdef CONFIG_TRACE_IRQFLAGS
 496/*
 497 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
 498 * to keep the lockdep irq context tracking as tight as possible in order to
 499 * not miss-qualify lock contexts and miss possible deadlocks.
 500 */
 501
 502static inline bool lockdep_softirq_start(void)
 503{
 504	bool in_hardirq = false;
 505
 506	if (lockdep_hardirq_context()) {
 507		in_hardirq = true;
 508		lockdep_hardirq_exit();
 509	}
 510
 511	lockdep_softirq_enter();
 512
 513	return in_hardirq;
 514}
 515
 516static inline void lockdep_softirq_end(bool in_hardirq)
 517{
 518	lockdep_softirq_exit();
 519
 520	if (in_hardirq)
 521		lockdep_hardirq_enter();
 522}
 523#else
 524static inline bool lockdep_softirq_start(void) { return false; }
 525static inline void lockdep_softirq_end(bool in_hardirq) { }
 526#endif
 527
 528asmlinkage __visible void __softirq_entry __do_softirq(void)
 529{
 530	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
 531	unsigned long old_flags = current->flags;
 532	int max_restart = MAX_SOFTIRQ_RESTART;
 533	struct softirq_action *h;
 534	bool in_hardirq;
 535	__u32 pending;
 536	int softirq_bit;
 537
 538	/*
 539	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
 540	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
 541	 * again if the socket is related to swapping.
 542	 */
 543	current->flags &= ~PF_MEMALLOC;
 544
 545	pending = local_softirq_pending();
 
 546
 547	softirq_handle_begin();
 548	in_hardirq = lockdep_softirq_start();
 549	account_softirq_enter(current);
 550
 551restart:
 552	/* Reset the pending bitmask before enabling irqs */
 553	set_softirq_pending(0);
 554
 555	local_irq_enable();
 556
 557	h = softirq_vec;
 558
 559	while ((softirq_bit = ffs(pending))) {
 560		unsigned int vec_nr;
 561		int prev_count;
 562
 563		h += softirq_bit - 1;
 564
 565		vec_nr = h - softirq_vec;
 566		prev_count = preempt_count();
 567
 568		kstat_incr_softirqs_this_cpu(vec_nr);
 569
 570		trace_softirq_entry(vec_nr);
 571		h->action(h);
 572		trace_softirq_exit(vec_nr);
 573		if (unlikely(prev_count != preempt_count())) {
 574			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
 575			       vec_nr, softirq_to_name[vec_nr], h->action,
 576			       prev_count, preempt_count());
 577			preempt_count_set(prev_count);
 578		}
 579		h++;
 580		pending >>= softirq_bit;
 581	}
 582
 583	if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
 584	    __this_cpu_read(ksoftirqd) == current)
 585		rcu_softirq_qs();
 586
 587	local_irq_disable();
 588
 589	pending = local_softirq_pending();
 590	if (pending) {
 591		if (time_before(jiffies, end) && !need_resched() &&
 592		    --max_restart)
 593			goto restart;
 594
 595		wakeup_softirqd();
 596	}
 597
 598	account_softirq_exit(current);
 599	lockdep_softirq_end(in_hardirq);
 600	softirq_handle_end();
 
 
 601	current_restore_flags(old_flags, PF_MEMALLOC);
 602}
 603
 604/**
 605 * irq_enter_rcu - Enter an interrupt context with RCU watching
 606 */
 607void irq_enter_rcu(void)
 608{
 609	__irq_enter_raw();
 
 610
 611	if (tick_nohz_full_cpu(smp_processor_id()) ||
 612	    (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
 613		tick_irq_enter();
 614
 615	account_hardirq_enter(current);
 
 
 
 
 
 
 
 616}
 617
 618/**
 619 * irq_enter - Enter an interrupt context including RCU update
 620 */
 621void irq_enter(void)
 622{
 623	ct_irq_enter();
 624	irq_enter_rcu();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625}
 626
 627static inline void tick_irq_exit(void)
 628{
 629#ifdef CONFIG_NO_HZ_COMMON
 630	int cpu = smp_processor_id();
 631
 632	/* Make sure that timer wheel updates are propagated */
 633	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
 634		if (!in_hardirq())
 635			tick_nohz_irq_exit();
 636	}
 637#endif
 638}
 639
 640static inline void __irq_exit_rcu(void)
 
 
 
 641{
 642#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
 643	local_irq_disable();
 644#else
 645	lockdep_assert_irqs_disabled();
 646#endif
 647	account_hardirq_exit(current);
 648	preempt_count_sub(HARDIRQ_OFFSET);
 649	if (!in_interrupt() && local_softirq_pending())
 650		invoke_softirq();
 651
 652	tick_irq_exit();
 653}
 654
 655/**
 656 * irq_exit_rcu() - Exit an interrupt context without updating RCU
 657 *
 658 * Also processes softirqs if needed and possible.
 659 */
 660void irq_exit_rcu(void)
 661{
 662	__irq_exit_rcu();
 663	 /* must be last! */
 664	lockdep_hardirq_exit();
 665}
 666
 667/**
 668 * irq_exit - Exit an interrupt context, update RCU and lockdep
 669 *
 670 * Also processes softirqs if needed and possible.
 671 */
 672void irq_exit(void)
 673{
 674	__irq_exit_rcu();
 675	ct_irq_exit();
 676	 /* must be last! */
 677	lockdep_hardirq_exit();
 678}
 679
 680/*
 681 * This function must run with irqs disabled!
 682 */
 683inline void raise_softirq_irqoff(unsigned int nr)
 684{
 685	__raise_softirq_irqoff(nr);
 686
 687	/*
 688	 * If we're in an interrupt or softirq, we're done
 689	 * (this also catches softirq-disabled code). We will
 690	 * actually run the softirq once we return from
 691	 * the irq or softirq.
 692	 *
 693	 * Otherwise we wake up ksoftirqd to make sure we
 694	 * schedule the softirq soon.
 695	 */
 696	if (!in_interrupt() && should_wake_ksoftirqd())
 697		wakeup_softirqd();
 698}
 699
 700void raise_softirq(unsigned int nr)
 701{
 702	unsigned long flags;
 703
 704	local_irq_save(flags);
 705	raise_softirq_irqoff(nr);
 706	local_irq_restore(flags);
 707}
 708
 709void __raise_softirq_irqoff(unsigned int nr)
 710{
 711	lockdep_assert_irqs_disabled();
 712	trace_softirq_raise(nr);
 713	or_softirq_pending(1UL << nr);
 714}
 715
 716void open_softirq(int nr, void (*action)(struct softirq_action *))
 717{
 718	softirq_vec[nr].action = action;
 719}
 720
 721/*
 722 * Tasklets
 723 */
 724struct tasklet_head {
 725	struct tasklet_struct *head;
 726	struct tasklet_struct **tail;
 727};
 728
 729static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
 730static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
 731
 732static void __tasklet_schedule_common(struct tasklet_struct *t,
 733				      struct tasklet_head __percpu *headp,
 734				      unsigned int softirq_nr)
 735{
 736	struct tasklet_head *head;
 737	unsigned long flags;
 738
 739	local_irq_save(flags);
 740	head = this_cpu_ptr(headp);
 741	t->next = NULL;
 742	*head->tail = t;
 743	head->tail = &(t->next);
 744	raise_softirq_irqoff(softirq_nr);
 745	local_irq_restore(flags);
 746}
 747
 748void __tasklet_schedule(struct tasklet_struct *t)
 749{
 750	__tasklet_schedule_common(t, &tasklet_vec,
 751				  TASKLET_SOFTIRQ);
 752}
 753EXPORT_SYMBOL(__tasklet_schedule);
 754
 755void __tasklet_hi_schedule(struct tasklet_struct *t)
 756{
 757	__tasklet_schedule_common(t, &tasklet_hi_vec,
 758				  HI_SOFTIRQ);
 759}
 760EXPORT_SYMBOL(__tasklet_hi_schedule);
 761
 762static bool tasklet_clear_sched(struct tasklet_struct *t)
 763{
 764	if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
 765		wake_up_var(&t->state);
 766		return true;
 767	}
 768
 769	WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
 770		  t->use_callback ? "callback" : "func",
 771		  t->use_callback ? (void *)t->callback : (void *)t->func);
 772
 773	return false;
 774}
 775
 776static void tasklet_action_common(struct softirq_action *a,
 777				  struct tasklet_head *tl_head,
 778				  unsigned int softirq_nr)
 779{
 780	struct tasklet_struct *list;
 781
 782	local_irq_disable();
 783	list = tl_head->head;
 784	tl_head->head = NULL;
 785	tl_head->tail = &tl_head->head;
 786	local_irq_enable();
 787
 788	while (list) {
 789		struct tasklet_struct *t = list;
 790
 791		list = list->next;
 792
 793		if (tasklet_trylock(t)) {
 794			if (!atomic_read(&t->count)) {
 795				if (tasklet_clear_sched(t)) {
 796					if (t->use_callback)
 797						t->callback(t);
 798					else
 799						t->func(t->data);
 800				}
 801				tasklet_unlock(t);
 802				continue;
 803			}
 804			tasklet_unlock(t);
 805		}
 806
 807		local_irq_disable();
 808		t->next = NULL;
 809		*tl_head->tail = t;
 810		tl_head->tail = &t->next;
 811		__raise_softirq_irqoff(softirq_nr);
 812		local_irq_enable();
 813	}
 814}
 815
 816static __latent_entropy void tasklet_action(struct softirq_action *a)
 817{
 818	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
 819}
 820
 821static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
 822{
 823	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
 824}
 825
 826void tasklet_setup(struct tasklet_struct *t,
 827		   void (*callback)(struct tasklet_struct *))
 828{
 829	t->next = NULL;
 830	t->state = 0;
 831	atomic_set(&t->count, 0);
 832	t->callback = callback;
 833	t->use_callback = true;
 834	t->data = 0;
 835}
 836EXPORT_SYMBOL(tasklet_setup);
 837
 838void tasklet_init(struct tasklet_struct *t,
 839		  void (*func)(unsigned long), unsigned long data)
 840{
 841	t->next = NULL;
 842	t->state = 0;
 843	atomic_set(&t->count, 0);
 844	t->func = func;
 845	t->use_callback = false;
 846	t->data = data;
 847}
 848EXPORT_SYMBOL(tasklet_init);
 849
 850#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 851/*
 852 * Do not use in new code. Waiting for tasklets from atomic contexts is
 853 * error prone and should be avoided.
 854 */
 855void tasklet_unlock_spin_wait(struct tasklet_struct *t)
 856{
 857	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
 858		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
 859			/*
 860			 * Prevent a live lock when current preempted soft
 861			 * interrupt processing or prevents ksoftirqd from
 862			 * running. If the tasklet runs on a different CPU
 863			 * then this has no effect other than doing the BH
 864			 * disable/enable dance for nothing.
 865			 */
 866			local_bh_disable();
 867			local_bh_enable();
 868		} else {
 869			cpu_relax();
 870		}
 871	}
 872}
 873EXPORT_SYMBOL(tasklet_unlock_spin_wait);
 874#endif
 875
 876void tasklet_kill(struct tasklet_struct *t)
 877{
 878	if (in_interrupt())
 879		pr_notice("Attempt to kill tasklet from interrupt\n");
 880
 881	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
 882		wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
 883
 
 
 884	tasklet_unlock_wait(t);
 885	tasklet_clear_sched(t);
 886}
 887EXPORT_SYMBOL(tasklet_kill);
 888
 889#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 890void tasklet_unlock(struct tasklet_struct *t)
 
 
 
 
 
 
 
 
 891{
 892	smp_mb__before_atomic();
 893	clear_bit(TASKLET_STATE_RUN, &t->state);
 894	smp_mb__after_atomic();
 895	wake_up_var(&t->state);
 
 896}
 897EXPORT_SYMBOL_GPL(tasklet_unlock);
 898
 899void tasklet_unlock_wait(struct tasklet_struct *t)
 
 
 
 
 900{
 901	wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
 
 
 
 
 
 902}
 903EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
 904#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905
 906void __init softirq_init(void)
 907{
 908	int cpu;
 909
 910	for_each_possible_cpu(cpu) {
 911		per_cpu(tasklet_vec, cpu).tail =
 912			&per_cpu(tasklet_vec, cpu).head;
 913		per_cpu(tasklet_hi_vec, cpu).tail =
 914			&per_cpu(tasklet_hi_vec, cpu).head;
 915	}
 916
 917	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
 918	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 919}
 920
 921static int ksoftirqd_should_run(unsigned int cpu)
 922{
 923	return local_softirq_pending();
 924}
 925
 926static void run_ksoftirqd(unsigned int cpu)
 927{
 928	ksoftirqd_run_begin();
 929	if (local_softirq_pending()) {
 930		/*
 931		 * We can safely run softirq on inline stack, as we are not deep
 932		 * in the task stack here.
 933		 */
 934		__do_softirq();
 935		ksoftirqd_run_end();
 936		cond_resched();
 937		return;
 938	}
 939	ksoftirqd_run_end();
 940}
 941
 942#ifdef CONFIG_HOTPLUG_CPU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 943static int takeover_tasklets(unsigned int cpu)
 944{
 945	/* CPU is dead, so no lock needed. */
 946	local_irq_disable();
 947
 948	/* Find end, append list for that CPU. */
 949	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
 950		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
 951		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
 952		per_cpu(tasklet_vec, cpu).head = NULL;
 953		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
 954	}
 955	raise_softirq_irqoff(TASKLET_SOFTIRQ);
 956
 957	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
 958		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
 959		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
 960		per_cpu(tasklet_hi_vec, cpu).head = NULL;
 961		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
 962	}
 963	raise_softirq_irqoff(HI_SOFTIRQ);
 964
 965	local_irq_enable();
 966	return 0;
 967}
 968#else
 969#define takeover_tasklets	NULL
 970#endif /* CONFIG_HOTPLUG_CPU */
 971
 972static struct smp_hotplug_thread softirq_threads = {
 973	.store			= &ksoftirqd,
 974	.thread_should_run	= ksoftirqd_should_run,
 975	.thread_fn		= run_ksoftirqd,
 976	.thread_comm		= "ksoftirqd/%u",
 977};
 978
 979static __init int spawn_ksoftirqd(void)
 980{
 981	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
 982				  takeover_tasklets);
 983	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
 984
 985	return 0;
 986}
 987early_initcall(spawn_ksoftirqd);
 988
 989/*
 990 * [ These __weak aliases are kept in a separate compilation unit, so that
 991 *   GCC does not inline them incorrectly. ]
 992 */
 993
 994int __init __weak early_irq_init(void)
 995{
 996	return 0;
 997}
 998
 999int __init __weak arch_probe_nr_irqs(void)
1000{
1001	return NR_IRQS_LEGACY;
1002}
1003
1004int __init __weak arch_early_irq_init(void)
1005{
1006	return 0;
1007}
1008
1009unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1010{
1011	return from;
1012}
v4.17
 
  1/*
  2 *	linux/kernel/softirq.c
  3 *
  4 *	Copyright (C) 1992 Linus Torvalds
  5 *
  6 *	Distribute under GPLv2.
  7 *
  8 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12
 13#include <linux/export.h>
 14#include <linux/kernel_stat.h>
 15#include <linux/interrupt.h>
 16#include <linux/init.h>
 
 17#include <linux/mm.h>
 18#include <linux/notifier.h>
 19#include <linux/percpu.h>
 20#include <linux/cpu.h>
 21#include <linux/freezer.h>
 22#include <linux/kthread.h>
 23#include <linux/rcupdate.h>
 24#include <linux/ftrace.h>
 25#include <linux/smp.h>
 26#include <linux/smpboot.h>
 27#include <linux/tick.h>
 28#include <linux/irq.h>
 
 
 
 29
 30#define CREATE_TRACE_POINTS
 31#include <trace/events/irq.h>
 32
 33/*
 34   - No shared variables, all the data are CPU local.
 35   - If a softirq needs serialization, let it serialize itself
 36     by its own spinlocks.
 37   - Even if softirq is serialized, only local cpu is marked for
 38     execution. Hence, we get something sort of weak cpu binding.
 39     Though it is still not clear, will it result in better locality
 40     or will not.
 41
 42   Examples:
 43   - NET RX softirq. It is multithreaded and does not require
 44     any global serialization.
 45   - NET TX softirq. It kicks software netdevice queues, hence
 46     it is logically serialized per device, but this serialization
 47     is invisible to common code.
 48   - Tasklets: serialized wrt itself.
 49 */
 50
 51#ifndef __ARCH_IRQ_STAT
 52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
 53EXPORT_SYMBOL(irq_stat);
 54#endif
 55
 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 57
 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 59
 60const char * const softirq_to_name[NR_SOFTIRQS] = {
 61	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 62	"TASKLET", "SCHED", "HRTIMER", "RCU"
 63};
 64
 65/*
 66 * we cannot loop indefinitely here to avoid userspace starvation,
 67 * but we also don't want to introduce a worst case 1/HZ latency
 68 * to the pending events, so lets the scheduler to balance
 69 * the softirq load for us.
 70 */
 71static void wakeup_softirqd(void)
 72{
 73	/* Interrupts are disabled: no need to stop preemption */
 74	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 75
 76	if (tsk && tsk->state != TASK_RUNNING)
 77		wake_up_process(tsk);
 78}
 79
 80/*
 81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
 82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
 
 83 */
 84static bool ksoftirqd_running(void)
 
 85{
 86	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 87
 88	return tsk && (tsk->state == TASK_RUNNING);
 
 
 89}
 90
 
 
 
 
 
 
 
 91/*
 92 * preempt_count and SOFTIRQ_OFFSET usage:
 93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 94 *   softirq processing.
 95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 
 
 
 
 
 96 *   on local_bh_disable or local_bh_enable.
 
 97 * This lets us distinguish between whether we are currently processing
 98 * softirq and whether we just have bh disabled.
 99 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
101/*
102 * This one is for softirq.c-internal use,
103 * where hardirqs are disabled legitimately:
104 */
105#ifdef CONFIG_TRACE_IRQFLAGS
106void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
107{
108	unsigned long flags;
109
110	WARN_ON_ONCE(in_irq());
111
112	raw_local_irq_save(flags);
113	/*
114	 * The preempt tracer hooks into preempt_count_add and will break
115	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116	 * is set and before current->softirq_enabled is cleared.
117	 * We must manually increment preempt_count here and manually
118	 * call the trace_preempt_off later.
119	 */
120	__preempt_count_add(cnt);
121	/*
122	 * Were softirqs turned off above:
123	 */
124	if (softirq_count() == (cnt & SOFTIRQ_MASK))
125		trace_softirqs_off(ip);
126	raw_local_irq_restore(flags);
127
128	if (preempt_count() == cnt) {
129#ifdef CONFIG_DEBUG_PREEMPT
130		current->preempt_disable_ip = get_lock_parent_ip();
131#endif
132		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
133	}
134}
135EXPORT_SYMBOL(__local_bh_disable_ip);
136#endif /* CONFIG_TRACE_IRQFLAGS */
137
138static void __local_bh_enable(unsigned int cnt)
139{
140	lockdep_assert_irqs_disabled();
141
 
 
 
142	if (softirq_count() == (cnt & SOFTIRQ_MASK))
143		trace_softirqs_on(_RET_IP_);
144	preempt_count_sub(cnt);
 
145}
146
147/*
148 * Special-case - softirqs can safely be enabled in
149 * cond_resched_softirq(), or by __do_softirq(),
150 * without processing still-pending softirqs:
151 */
152void _local_bh_enable(void)
153{
154	WARN_ON_ONCE(in_irq());
155	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
156}
157EXPORT_SYMBOL(_local_bh_enable);
158
159void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
160{
161	WARN_ON_ONCE(in_irq());
162	lockdep_assert_irqs_enabled();
163#ifdef CONFIG_TRACE_IRQFLAGS
164	local_irq_disable();
165#endif
166	/*
167	 * Are softirqs going to be turned on now:
168	 */
169	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
170		trace_softirqs_on(ip);
171	/*
172	 * Keep preemption disabled until we are done with
173	 * softirq processing:
174	 */
175	preempt_count_sub(cnt - 1);
176
177	if (unlikely(!in_interrupt() && local_softirq_pending())) {
178		/*
179		 * Run softirq if any pending. And do it in its own stack
180		 * as we may be calling this deep in a task call stack already.
181		 */
182		do_softirq();
183	}
184
185	preempt_count_dec();
186#ifdef CONFIG_TRACE_IRQFLAGS
187	local_irq_enable();
188#endif
189	preempt_check_resched();
190}
191EXPORT_SYMBOL(__local_bh_enable_ip);
192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193/*
194 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
195 * but break the loop if need_resched() is set or after 2 ms.
196 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
197 * certain cases, such as stop_machine(), jiffies may cease to
198 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
199 * well to make sure we eventually return from this method.
200 *
201 * These limits have been established via experimentation.
202 * The two things to balance is latency against fairness -
203 * we want to handle softirqs as soon as possible, but they
204 * should not be able to lock up the box.
205 */
206#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
207#define MAX_SOFTIRQ_RESTART 10
208
209#ifdef CONFIG_TRACE_IRQFLAGS
210/*
211 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
212 * to keep the lockdep irq context tracking as tight as possible in order to
213 * not miss-qualify lock contexts and miss possible deadlocks.
214 */
215
216static inline bool lockdep_softirq_start(void)
217{
218	bool in_hardirq = false;
219
220	if (trace_hardirq_context(current)) {
221		in_hardirq = true;
222		trace_hardirq_exit();
223	}
224
225	lockdep_softirq_enter();
226
227	return in_hardirq;
228}
229
230static inline void lockdep_softirq_end(bool in_hardirq)
231{
232	lockdep_softirq_exit();
233
234	if (in_hardirq)
235		trace_hardirq_enter();
236}
237#else
238static inline bool lockdep_softirq_start(void) { return false; }
239static inline void lockdep_softirq_end(bool in_hardirq) { }
240#endif
241
242asmlinkage __visible void __softirq_entry __do_softirq(void)
243{
244	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
245	unsigned long old_flags = current->flags;
246	int max_restart = MAX_SOFTIRQ_RESTART;
247	struct softirq_action *h;
248	bool in_hardirq;
249	__u32 pending;
250	int softirq_bit;
251
252	/*
253	 * Mask out PF_MEMALLOC s current task context is borrowed for the
254	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
255	 * again if the socket is related to swap
256	 */
257	current->flags &= ~PF_MEMALLOC;
258
259	pending = local_softirq_pending();
260	account_irq_enter_time(current);
261
262	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
263	in_hardirq = lockdep_softirq_start();
 
264
265restart:
266	/* Reset the pending bitmask before enabling irqs */
267	set_softirq_pending(0);
268
269	local_irq_enable();
270
271	h = softirq_vec;
272
273	while ((softirq_bit = ffs(pending))) {
274		unsigned int vec_nr;
275		int prev_count;
276
277		h += softirq_bit - 1;
278
279		vec_nr = h - softirq_vec;
280		prev_count = preempt_count();
281
282		kstat_incr_softirqs_this_cpu(vec_nr);
283
284		trace_softirq_entry(vec_nr);
285		h->action(h);
286		trace_softirq_exit(vec_nr);
287		if (unlikely(prev_count != preempt_count())) {
288			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
289			       vec_nr, softirq_to_name[vec_nr], h->action,
290			       prev_count, preempt_count());
291			preempt_count_set(prev_count);
292		}
293		h++;
294		pending >>= softirq_bit;
295	}
296
297	rcu_bh_qs();
 
 
 
298	local_irq_disable();
299
300	pending = local_softirq_pending();
301	if (pending) {
302		if (time_before(jiffies, end) && !need_resched() &&
303		    --max_restart)
304			goto restart;
305
306		wakeup_softirqd();
307	}
308
 
309	lockdep_softirq_end(in_hardirq);
310	account_irq_exit_time(current);
311	__local_bh_enable(SOFTIRQ_OFFSET);
312	WARN_ON_ONCE(in_interrupt());
313	current_restore_flags(old_flags, PF_MEMALLOC);
314}
315
316asmlinkage __visible void do_softirq(void)
 
 
 
317{
318	__u32 pending;
319	unsigned long flags;
320
321	if (in_interrupt())
322		return;
 
323
324	local_irq_save(flags);
325
326	pending = local_softirq_pending();
327
328	if (pending && !ksoftirqd_running())
329		do_softirq_own_stack();
330
331	local_irq_restore(flags);
332}
333
334/*
335 * Enter an interrupt context.
336 */
337void irq_enter(void)
338{
339	rcu_irq_enter();
340	if (is_idle_task(current) && !in_interrupt()) {
341		/*
342		 * Prevent raise_softirq from needlessly waking up ksoftirqd
343		 * here, as softirq will be serviced on return from interrupt.
344		 */
345		local_bh_disable();
346		tick_irq_enter();
347		_local_bh_enable();
348	}
349
350	__irq_enter();
351}
352
353static inline void invoke_softirq(void)
354{
355	if (ksoftirqd_running())
356		return;
357
358	if (!force_irqthreads) {
359#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
360		/*
361		 * We can safely execute softirq on the current stack if
362		 * it is the irq stack, because it should be near empty
363		 * at this stage.
364		 */
365		__do_softirq();
366#else
367		/*
368		 * Otherwise, irq_exit() is called on the task stack that can
369		 * be potentially deep already. So call softirq in its own stack
370		 * to prevent from any overrun.
371		 */
372		do_softirq_own_stack();
373#endif
374	} else {
375		wakeup_softirqd();
376	}
377}
378
379static inline void tick_irq_exit(void)
380{
381#ifdef CONFIG_NO_HZ_COMMON
382	int cpu = smp_processor_id();
383
384	/* Make sure that timer wheel updates are propagated */
385	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
386		if (!in_interrupt())
387			tick_nohz_irq_exit();
388	}
389#endif
390}
391
392/*
393 * Exit an interrupt context. Process softirqs if needed and possible:
394 */
395void irq_exit(void)
396{
397#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
398	local_irq_disable();
399#else
400	lockdep_assert_irqs_disabled();
401#endif
402	account_irq_exit_time(current);
403	preempt_count_sub(HARDIRQ_OFFSET);
404	if (!in_interrupt() && local_softirq_pending())
405		invoke_softirq();
406
407	tick_irq_exit();
408	rcu_irq_exit();
409	trace_hardirq_exit(); /* must be last! */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410}
411
412/*
413 * This function must run with irqs disabled!
414 */
415inline void raise_softirq_irqoff(unsigned int nr)
416{
417	__raise_softirq_irqoff(nr);
418
419	/*
420	 * If we're in an interrupt or softirq, we're done
421	 * (this also catches softirq-disabled code). We will
422	 * actually run the softirq once we return from
423	 * the irq or softirq.
424	 *
425	 * Otherwise we wake up ksoftirqd to make sure we
426	 * schedule the softirq soon.
427	 */
428	if (!in_interrupt())
429		wakeup_softirqd();
430}
431
432void raise_softirq(unsigned int nr)
433{
434	unsigned long flags;
435
436	local_irq_save(flags);
437	raise_softirq_irqoff(nr);
438	local_irq_restore(flags);
439}
440
441void __raise_softirq_irqoff(unsigned int nr)
442{
 
443	trace_softirq_raise(nr);
444	or_softirq_pending(1UL << nr);
445}
446
447void open_softirq(int nr, void (*action)(struct softirq_action *))
448{
449	softirq_vec[nr].action = action;
450}
451
452/*
453 * Tasklets
454 */
455struct tasklet_head {
456	struct tasklet_struct *head;
457	struct tasklet_struct **tail;
458};
459
460static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
462
463static void __tasklet_schedule_common(struct tasklet_struct *t,
464				      struct tasklet_head __percpu *headp,
465				      unsigned int softirq_nr)
466{
467	struct tasklet_head *head;
468	unsigned long flags;
469
470	local_irq_save(flags);
471	head = this_cpu_ptr(headp);
472	t->next = NULL;
473	*head->tail = t;
474	head->tail = &(t->next);
475	raise_softirq_irqoff(softirq_nr);
476	local_irq_restore(flags);
477}
478
479void __tasklet_schedule(struct tasklet_struct *t)
480{
481	__tasklet_schedule_common(t, &tasklet_vec,
482				  TASKLET_SOFTIRQ);
483}
484EXPORT_SYMBOL(__tasklet_schedule);
485
486void __tasklet_hi_schedule(struct tasklet_struct *t)
487{
488	__tasklet_schedule_common(t, &tasklet_hi_vec,
489				  HI_SOFTIRQ);
490}
491EXPORT_SYMBOL(__tasklet_hi_schedule);
492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493static void tasklet_action_common(struct softirq_action *a,
494				  struct tasklet_head *tl_head,
495				  unsigned int softirq_nr)
496{
497	struct tasklet_struct *list;
498
499	local_irq_disable();
500	list = tl_head->head;
501	tl_head->head = NULL;
502	tl_head->tail = &tl_head->head;
503	local_irq_enable();
504
505	while (list) {
506		struct tasklet_struct *t = list;
507
508		list = list->next;
509
510		if (tasklet_trylock(t)) {
511			if (!atomic_read(&t->count)) {
512				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
513							&t->state))
514					BUG();
515				t->func(t->data);
 
 
516				tasklet_unlock(t);
517				continue;
518			}
519			tasklet_unlock(t);
520		}
521
522		local_irq_disable();
523		t->next = NULL;
524		*tl_head->tail = t;
525		tl_head->tail = &t->next;
526		__raise_softirq_irqoff(softirq_nr);
527		local_irq_enable();
528	}
529}
530
531static __latent_entropy void tasklet_action(struct softirq_action *a)
532{
533	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
534}
535
536static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
537{
538	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
539}
540
 
 
 
 
 
 
 
 
 
 
 
 
541void tasklet_init(struct tasklet_struct *t,
542		  void (*func)(unsigned long), unsigned long data)
543{
544	t->next = NULL;
545	t->state = 0;
546	atomic_set(&t->count, 0);
547	t->func = func;
 
548	t->data = data;
549}
550EXPORT_SYMBOL(tasklet_init);
551
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
552void tasklet_kill(struct tasklet_struct *t)
553{
554	if (in_interrupt())
555		pr_notice("Attempt to kill tasklet from interrupt\n");
556
557	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
558		do {
559			yield();
560		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
561	}
562	tasklet_unlock_wait(t);
563	clear_bit(TASKLET_STATE_SCHED, &t->state);
564}
565EXPORT_SYMBOL(tasklet_kill);
566
567/*
568 * tasklet_hrtimer
569 */
570
571/*
572 * The trampoline is called when the hrtimer expires. It schedules a tasklet
573 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
574 * hrtimer callback, but from softirq context.
575 */
576static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
577{
578	struct tasklet_hrtimer *ttimer =
579		container_of(timer, struct tasklet_hrtimer, timer);
580
581	tasklet_hi_schedule(&ttimer->tasklet);
582	return HRTIMER_NORESTART;
583}
 
584
585/*
586 * Helper function which calls the hrtimer callback from
587 * tasklet/softirq context
588 */
589static void __tasklet_hrtimer_trampoline(unsigned long data)
590{
591	struct tasklet_hrtimer *ttimer = (void *)data;
592	enum hrtimer_restart restart;
593
594	restart = ttimer->function(&ttimer->timer);
595	if (restart != HRTIMER_NORESTART)
596		hrtimer_restart(&ttimer->timer);
597}
598
599/**
600 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
601 * @ttimer:	 tasklet_hrtimer which is initialized
602 * @function:	 hrtimer callback function which gets called from softirq context
603 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
604 * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
605 */
606void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
607			  enum hrtimer_restart (*function)(struct hrtimer *),
608			  clockid_t which_clock, enum hrtimer_mode mode)
609{
610	hrtimer_init(&ttimer->timer, which_clock, mode);
611	ttimer->timer.function = __hrtimer_tasklet_trampoline;
612	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
613		     (unsigned long)ttimer);
614	ttimer->function = function;
615}
616EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
617
618void __init softirq_init(void)
619{
620	int cpu;
621
622	for_each_possible_cpu(cpu) {
623		per_cpu(tasklet_vec, cpu).tail =
624			&per_cpu(tasklet_vec, cpu).head;
625		per_cpu(tasklet_hi_vec, cpu).tail =
626			&per_cpu(tasklet_hi_vec, cpu).head;
627	}
628
629	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
630	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
631}
632
633static int ksoftirqd_should_run(unsigned int cpu)
634{
635	return local_softirq_pending();
636}
637
638static void run_ksoftirqd(unsigned int cpu)
639{
640	local_irq_disable();
641	if (local_softirq_pending()) {
642		/*
643		 * We can safely run softirq on inline stack, as we are not deep
644		 * in the task stack here.
645		 */
646		__do_softirq();
647		local_irq_enable();
648		cond_resched();
649		return;
650	}
651	local_irq_enable();
652}
653
654#ifdef CONFIG_HOTPLUG_CPU
655/*
656 * tasklet_kill_immediate is called to remove a tasklet which can already be
657 * scheduled for execution on @cpu.
658 *
659 * Unlike tasklet_kill, this function removes the tasklet
660 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
661 *
662 * When this function is called, @cpu must be in the CPU_DEAD state.
663 */
664void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
665{
666	struct tasklet_struct **i;
667
668	BUG_ON(cpu_online(cpu));
669	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
670
671	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
672		return;
673
674	/* CPU is dead, so no lock needed. */
675	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
676		if (*i == t) {
677			*i = t->next;
678			/* If this was the tail element, move the tail ptr */
679			if (*i == NULL)
680				per_cpu(tasklet_vec, cpu).tail = i;
681			return;
682		}
683	}
684	BUG();
685}
686
687static int takeover_tasklets(unsigned int cpu)
688{
689	/* CPU is dead, so no lock needed. */
690	local_irq_disable();
691
692	/* Find end, append list for that CPU. */
693	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
694		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
695		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
696		per_cpu(tasklet_vec, cpu).head = NULL;
697		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
698	}
699	raise_softirq_irqoff(TASKLET_SOFTIRQ);
700
701	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
702		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
703		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
704		per_cpu(tasklet_hi_vec, cpu).head = NULL;
705		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
706	}
707	raise_softirq_irqoff(HI_SOFTIRQ);
708
709	local_irq_enable();
710	return 0;
711}
712#else
713#define takeover_tasklets	NULL
714#endif /* CONFIG_HOTPLUG_CPU */
715
716static struct smp_hotplug_thread softirq_threads = {
717	.store			= &ksoftirqd,
718	.thread_should_run	= ksoftirqd_should_run,
719	.thread_fn		= run_ksoftirqd,
720	.thread_comm		= "ksoftirqd/%u",
721};
722
723static __init int spawn_ksoftirqd(void)
724{
725	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
726				  takeover_tasklets);
727	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
728
729	return 0;
730}
731early_initcall(spawn_ksoftirqd);
732
733/*
734 * [ These __weak aliases are kept in a separate compilation unit, so that
735 *   GCC does not inline them incorrectly. ]
736 */
737
738int __init __weak early_irq_init(void)
739{
740	return 0;
741}
742
743int __init __weak arch_probe_nr_irqs(void)
744{
745	return NR_IRQS_LEGACY;
746}
747
748int __init __weak arch_early_irq_init(void)
749{
750	return 0;
751}
752
753unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
754{
755	return from;
756}