Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v4.10.11
  1/*
  2 *	linux/kernel/softirq.c
  3 *
  4 *	Copyright (C) 1992 Linus Torvalds
  5 *
  6 *	Distribute under GPLv2.
  7 *
  8 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12
 13#include <linux/export.h>
 14#include <linux/kernel_stat.h>
 15#include <linux/interrupt.h>
 16#include <linux/init.h>
 17#include <linux/mm.h>
 18#include <linux/notifier.h>
 19#include <linux/percpu.h>
 20#include <linux/cpu.h>
 21#include <linux/freezer.h>
 22#include <linux/kthread.h>
 23#include <linux/rcupdate.h>
 24#include <linux/ftrace.h>
 25#include <linux/smp.h>
 26#include <linux/smpboot.h>
 27#include <linux/tick.h>
 28#include <linux/irq.h>
 29
 30#define CREATE_TRACE_POINTS
 31#include <trace/events/irq.h>
 32
 33/*
 34   - No shared variables, all the data are CPU local.
 35   - If a softirq needs serialization, let it serialize itself
 36     by its own spinlocks.
 37   - Even if softirq is serialized, only local cpu is marked for
 38     execution. Hence, we get something sort of weak cpu binding.
 39     Though it is still not clear, will it result in better locality
 40     or will not.
 41
 42   Examples:
 43   - NET RX softirq. It is multithreaded and does not require
 44     any global serialization.
 45   - NET TX softirq. It kicks software netdevice queues, hence
 46     it is logically serialized per device, but this serialization
 47     is invisible to common code.
 48   - Tasklets: serialized wrt itself.
 49 */
 50
 51#ifndef __ARCH_IRQ_STAT
 52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
 53EXPORT_SYMBOL(irq_stat);
 54#endif
 55
 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 57
 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 59
 60const char * const softirq_to_name[NR_SOFTIRQS] = {
 61	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 62	"TASKLET", "SCHED", "HRTIMER", "RCU"
 63};
 64
 65/*
 66 * we cannot loop indefinitely here to avoid userspace starvation,
 67 * but we also don't want to introduce a worst case 1/HZ latency
 68 * to the pending events, so lets the scheduler to balance
 69 * the softirq load for us.
 70 */
 71static void wakeup_softirqd(void)
 72{
 73	/* Interrupts are disabled: no need to stop preemption */
 74	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 75
 76	if (tsk && tsk->state != TASK_RUNNING)
 77		wake_up_process(tsk);
 78}
 79
 80/*
 81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
 82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
 83 */
 84static bool ksoftirqd_running(void)
 85{
 86	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 87
 88	return tsk && (tsk->state == TASK_RUNNING);
 89}
 90
 91/*
 92 * preempt_count and SOFTIRQ_OFFSET usage:
 93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 94 *   softirq processing.
 95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 96 *   on local_bh_disable or local_bh_enable.
 97 * This lets us distinguish between whether we are currently processing
 98 * softirq and whether we just have bh disabled.
 99 */
100
101/*
102 * This one is for softirq.c-internal use,
103 * where hardirqs are disabled legitimately:
104 */
105#ifdef CONFIG_TRACE_IRQFLAGS
106void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
107{
108	unsigned long flags;
109
110	WARN_ON_ONCE(in_irq());
111
112	raw_local_irq_save(flags);
113	/*
114	 * The preempt tracer hooks into preempt_count_add and will break
115	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116	 * is set and before current->softirq_enabled is cleared.
117	 * We must manually increment preempt_count here and manually
118	 * call the trace_preempt_off later.
119	 */
120	__preempt_count_add(cnt);
121	/*
122	 * Were softirqs turned off above:
123	 */
124	if (softirq_count() == (cnt & SOFTIRQ_MASK))
125		trace_softirqs_off(ip);
126	raw_local_irq_restore(flags);
127
128	if (preempt_count() == cnt) {
129#ifdef CONFIG_DEBUG_PREEMPT
130		current->preempt_disable_ip = get_lock_parent_ip();
131#endif
132		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
133	}
134}
135EXPORT_SYMBOL(__local_bh_disable_ip);
136#endif /* CONFIG_TRACE_IRQFLAGS */
137
138static void __local_bh_enable(unsigned int cnt)
139{
140	WARN_ON_ONCE(!irqs_disabled());
141
142	if (softirq_count() == (cnt & SOFTIRQ_MASK))
143		trace_softirqs_on(_RET_IP_);
144	preempt_count_sub(cnt);
145}
146
147/*
148 * Special-case - softirqs can safely be enabled in
149 * cond_resched_softirq(), or by __do_softirq(),
150 * without processing still-pending softirqs:
151 */
152void _local_bh_enable(void)
153{
154	WARN_ON_ONCE(in_irq());
155	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
156}
157EXPORT_SYMBOL(_local_bh_enable);
158
159void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
160{
161	WARN_ON_ONCE(in_irq() || irqs_disabled());
 
162#ifdef CONFIG_TRACE_IRQFLAGS
163	local_irq_disable();
164#endif
165	/*
166	 * Are softirqs going to be turned on now:
167	 */
168	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
169		trace_softirqs_on(ip);
170	/*
171	 * Keep preemption disabled until we are done with
172	 * softirq processing:
173	 */
174	preempt_count_sub(cnt - 1);
175
176	if (unlikely(!in_interrupt() && local_softirq_pending())) {
177		/*
178		 * Run softirq if any pending. And do it in its own stack
179		 * as we may be calling this deep in a task call stack already.
180		 */
181		do_softirq();
182	}
183
184	preempt_count_dec();
185#ifdef CONFIG_TRACE_IRQFLAGS
186	local_irq_enable();
187#endif
188	preempt_check_resched();
189}
190EXPORT_SYMBOL(__local_bh_enable_ip);
191
192/*
193 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
194 * but break the loop if need_resched() is set or after 2 ms.
195 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
196 * certain cases, such as stop_machine(), jiffies may cease to
197 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
198 * well to make sure we eventually return from this method.
199 *
200 * These limits have been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
204 */
205#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
206#define MAX_SOFTIRQ_RESTART 10
207
208#ifdef CONFIG_TRACE_IRQFLAGS
209/*
210 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
211 * to keep the lockdep irq context tracking as tight as possible in order to
212 * not miss-qualify lock contexts and miss possible deadlocks.
213 */
214
215static inline bool lockdep_softirq_start(void)
216{
217	bool in_hardirq = false;
218
219	if (trace_hardirq_context(current)) {
220		in_hardirq = true;
221		trace_hardirq_exit();
222	}
223
224	lockdep_softirq_enter();
225
226	return in_hardirq;
227}
228
229static inline void lockdep_softirq_end(bool in_hardirq)
230{
231	lockdep_softirq_exit();
232
233	if (in_hardirq)
234		trace_hardirq_enter();
235}
236#else
237static inline bool lockdep_softirq_start(void) { return false; }
238static inline void lockdep_softirq_end(bool in_hardirq) { }
239#endif
240
241asmlinkage __visible void __softirq_entry __do_softirq(void)
242{
243	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
244	unsigned long old_flags = current->flags;
245	int max_restart = MAX_SOFTIRQ_RESTART;
246	struct softirq_action *h;
247	bool in_hardirq;
248	__u32 pending;
249	int softirq_bit;
250
251	/*
252	 * Mask out PF_MEMALLOC s current task context is borrowed for the
253	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
254	 * again if the socket is related to swap
255	 */
256	current->flags &= ~PF_MEMALLOC;
257
258	pending = local_softirq_pending();
259	account_irq_enter_time(current);
260
261	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
262	in_hardirq = lockdep_softirq_start();
263
264restart:
265	/* Reset the pending bitmask before enabling irqs */
266	set_softirq_pending(0);
267
268	local_irq_enable();
269
270	h = softirq_vec;
271
272	while ((softirq_bit = ffs(pending))) {
273		unsigned int vec_nr;
274		int prev_count;
275
276		h += softirq_bit - 1;
277
278		vec_nr = h - softirq_vec;
279		prev_count = preempt_count();
280
281		kstat_incr_softirqs_this_cpu(vec_nr);
282
283		trace_softirq_entry(vec_nr);
284		h->action(h);
285		trace_softirq_exit(vec_nr);
286		if (unlikely(prev_count != preempt_count())) {
287			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
288			       vec_nr, softirq_to_name[vec_nr], h->action,
289			       prev_count, preempt_count());
290			preempt_count_set(prev_count);
291		}
292		h++;
293		pending >>= softirq_bit;
294	}
295
296	rcu_bh_qs();
297	local_irq_disable();
298
299	pending = local_softirq_pending();
300	if (pending) {
301		if (time_before(jiffies, end) && !need_resched() &&
302		    --max_restart)
303			goto restart;
304
305		wakeup_softirqd();
306	}
307
308	lockdep_softirq_end(in_hardirq);
309	account_irq_exit_time(current);
310	__local_bh_enable(SOFTIRQ_OFFSET);
311	WARN_ON_ONCE(in_interrupt());
312	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
313}
314
315asmlinkage __visible void do_softirq(void)
316{
317	__u32 pending;
318	unsigned long flags;
319
320	if (in_interrupt())
321		return;
322
323	local_irq_save(flags);
324
325	pending = local_softirq_pending();
326
327	if (pending && !ksoftirqd_running())
328		do_softirq_own_stack();
329
330	local_irq_restore(flags);
331}
332
333/*
334 * Enter an interrupt context.
335 */
336void irq_enter(void)
337{
338	rcu_irq_enter();
339	if (is_idle_task(current) && !in_interrupt()) {
340		/*
341		 * Prevent raise_softirq from needlessly waking up ksoftirqd
342		 * here, as softirq will be serviced on return from interrupt.
343		 */
344		local_bh_disable();
345		tick_irq_enter();
346		_local_bh_enable();
347	}
348
349	__irq_enter();
350}
351
352static inline void invoke_softirq(void)
353{
354	if (ksoftirqd_running())
355		return;
356
357	if (!force_irqthreads) {
358#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
359		/*
360		 * We can safely execute softirq on the current stack if
361		 * it is the irq stack, because it should be near empty
362		 * at this stage.
363		 */
364		__do_softirq();
365#else
366		/*
367		 * Otherwise, irq_exit() is called on the task stack that can
368		 * be potentially deep already. So call softirq in its own stack
369		 * to prevent from any overrun.
370		 */
371		do_softirq_own_stack();
372#endif
373	} else {
374		wakeup_softirqd();
375	}
376}
377
378static inline void tick_irq_exit(void)
379{
380#ifdef CONFIG_NO_HZ_COMMON
381	int cpu = smp_processor_id();
382
383	/* Make sure that timer wheel updates are propagated */
384	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
385		if (!in_interrupt())
386			tick_nohz_irq_exit();
387	}
388#endif
389}
390
391/*
392 * Exit an interrupt context. Process softirqs if needed and possible:
393 */
394void irq_exit(void)
395{
396#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
397	local_irq_disable();
398#else
399	WARN_ON_ONCE(!irqs_disabled());
400#endif
401
402	account_irq_exit_time(current);
403	preempt_count_sub(HARDIRQ_OFFSET);
404	if (!in_interrupt() && local_softirq_pending())
405		invoke_softirq();
406
407	tick_irq_exit();
408	rcu_irq_exit();
409	trace_hardirq_exit(); /* must be last! */
410}
411
412/*
413 * This function must run with irqs disabled!
414 */
415inline void raise_softirq_irqoff(unsigned int nr)
416{
417	__raise_softirq_irqoff(nr);
418
419	/*
420	 * If we're in an interrupt or softirq, we're done
421	 * (this also catches softirq-disabled code). We will
422	 * actually run the softirq once we return from
423	 * the irq or softirq.
424	 *
425	 * Otherwise we wake up ksoftirqd to make sure we
426	 * schedule the softirq soon.
427	 */
428	if (!in_interrupt())
429		wakeup_softirqd();
430}
431
432void raise_softirq(unsigned int nr)
433{
434	unsigned long flags;
435
436	local_irq_save(flags);
437	raise_softirq_irqoff(nr);
438	local_irq_restore(flags);
439}
440
441void __raise_softirq_irqoff(unsigned int nr)
442{
443	trace_softirq_raise(nr);
444	or_softirq_pending(1UL << nr);
445}
446
447void open_softirq(int nr, void (*action)(struct softirq_action *))
448{
449	softirq_vec[nr].action = action;
450}
451
452/*
453 * Tasklets
454 */
455struct tasklet_head {
456	struct tasklet_struct *head;
457	struct tasklet_struct **tail;
458};
459
460static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
462
463void __tasklet_schedule(struct tasklet_struct *t)
 
 
464{
 
465	unsigned long flags;
466
467	local_irq_save(flags);
 
468	t->next = NULL;
469	*__this_cpu_read(tasklet_vec.tail) = t;
470	__this_cpu_write(tasklet_vec.tail, &(t->next));
471	raise_softirq_irqoff(TASKLET_SOFTIRQ);
472	local_irq_restore(flags);
473}
474EXPORT_SYMBOL(__tasklet_schedule);
475
476void __tasklet_hi_schedule(struct tasklet_struct *t)
477{
478	unsigned long flags;
479
480	local_irq_save(flags);
481	t->next = NULL;
482	*__this_cpu_read(tasklet_hi_vec.tail) = t;
483	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
484	raise_softirq_irqoff(HI_SOFTIRQ);
485	local_irq_restore(flags);
486}
487EXPORT_SYMBOL(__tasklet_hi_schedule);
488
489void __tasklet_hi_schedule_first(struct tasklet_struct *t)
490{
491	BUG_ON(!irqs_disabled());
492
493	t->next = __this_cpu_read(tasklet_hi_vec.head);
494	__this_cpu_write(tasklet_hi_vec.head, t);
495	__raise_softirq_irqoff(HI_SOFTIRQ);
496}
497EXPORT_SYMBOL(__tasklet_hi_schedule_first);
498
499static __latent_entropy void tasklet_action(struct softirq_action *a)
 
 
500{
501	struct tasklet_struct *list;
502
503	local_irq_disable();
504	list = __this_cpu_read(tasklet_vec.head);
505	__this_cpu_write(tasklet_vec.head, NULL);
506	__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
507	local_irq_enable();
508
509	while (list) {
510		struct tasklet_struct *t = list;
511
512		list = list->next;
513
514		if (tasklet_trylock(t)) {
515			if (!atomic_read(&t->count)) {
516				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
517							&t->state))
518					BUG();
519				t->func(t->data);
520				tasklet_unlock(t);
521				continue;
522			}
523			tasklet_unlock(t);
524		}
525
526		local_irq_disable();
527		t->next = NULL;
528		*__this_cpu_read(tasklet_vec.tail) = t;
529		__this_cpu_write(tasklet_vec.tail, &(t->next));
530		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
531		local_irq_enable();
532	}
533}
534
535static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
536{
537	struct tasklet_struct *list;
538
539	local_irq_disable();
540	list = __this_cpu_read(tasklet_hi_vec.head);
541	__this_cpu_write(tasklet_hi_vec.head, NULL);
542	__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
543	local_irq_enable();
544
545	while (list) {
546		struct tasklet_struct *t = list;
547
548		list = list->next;
549
550		if (tasklet_trylock(t)) {
551			if (!atomic_read(&t->count)) {
552				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
553							&t->state))
554					BUG();
555				t->func(t->data);
556				tasklet_unlock(t);
557				continue;
558			}
559			tasklet_unlock(t);
560		}
561
562		local_irq_disable();
563		t->next = NULL;
564		*__this_cpu_read(tasklet_hi_vec.tail) = t;
565		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
566		__raise_softirq_irqoff(HI_SOFTIRQ);
567		local_irq_enable();
568	}
569}
570
571void tasklet_init(struct tasklet_struct *t,
572		  void (*func)(unsigned long), unsigned long data)
573{
574	t->next = NULL;
575	t->state = 0;
576	atomic_set(&t->count, 0);
577	t->func = func;
578	t->data = data;
579}
580EXPORT_SYMBOL(tasklet_init);
581
582void tasklet_kill(struct tasklet_struct *t)
583{
584	if (in_interrupt())
585		pr_notice("Attempt to kill tasklet from interrupt\n");
586
587	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
588		do {
589			yield();
590		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
591	}
592	tasklet_unlock_wait(t);
593	clear_bit(TASKLET_STATE_SCHED, &t->state);
594}
595EXPORT_SYMBOL(tasklet_kill);
596
597/*
598 * tasklet_hrtimer
599 */
600
601/*
602 * The trampoline is called when the hrtimer expires. It schedules a tasklet
603 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
604 * hrtimer callback, but from softirq context.
605 */
606static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
607{
608	struct tasklet_hrtimer *ttimer =
609		container_of(timer, struct tasklet_hrtimer, timer);
610
611	tasklet_hi_schedule(&ttimer->tasklet);
612	return HRTIMER_NORESTART;
613}
614
615/*
616 * Helper function which calls the hrtimer callback from
617 * tasklet/softirq context
618 */
619static void __tasklet_hrtimer_trampoline(unsigned long data)
620{
621	struct tasklet_hrtimer *ttimer = (void *)data;
622	enum hrtimer_restart restart;
623
624	restart = ttimer->function(&ttimer->timer);
625	if (restart != HRTIMER_NORESTART)
626		hrtimer_restart(&ttimer->timer);
627}
628
629/**
630 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
631 * @ttimer:	 tasklet_hrtimer which is initialized
632 * @function:	 hrtimer callback function which gets called from softirq context
633 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
634 * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
635 */
636void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
637			  enum hrtimer_restart (*function)(struct hrtimer *),
638			  clockid_t which_clock, enum hrtimer_mode mode)
639{
640	hrtimer_init(&ttimer->timer, which_clock, mode);
641	ttimer->timer.function = __hrtimer_tasklet_trampoline;
642	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
643		     (unsigned long)ttimer);
644	ttimer->function = function;
645}
646EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
647
648void __init softirq_init(void)
649{
650	int cpu;
651
652	for_each_possible_cpu(cpu) {
653		per_cpu(tasklet_vec, cpu).tail =
654			&per_cpu(tasklet_vec, cpu).head;
655		per_cpu(tasklet_hi_vec, cpu).tail =
656			&per_cpu(tasklet_hi_vec, cpu).head;
657	}
658
659	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
660	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
661}
662
663static int ksoftirqd_should_run(unsigned int cpu)
664{
665	return local_softirq_pending();
666}
667
668static void run_ksoftirqd(unsigned int cpu)
669{
670	local_irq_disable();
671	if (local_softirq_pending()) {
672		/*
673		 * We can safely run softirq on inline stack, as we are not deep
674		 * in the task stack here.
675		 */
676		__do_softirq();
677		local_irq_enable();
678		cond_resched_rcu_qs();
679		return;
680	}
681	local_irq_enable();
682}
683
684#ifdef CONFIG_HOTPLUG_CPU
685/*
686 * tasklet_kill_immediate is called to remove a tasklet which can already be
687 * scheduled for execution on @cpu.
688 *
689 * Unlike tasklet_kill, this function removes the tasklet
690 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
691 *
692 * When this function is called, @cpu must be in the CPU_DEAD state.
693 */
694void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
695{
696	struct tasklet_struct **i;
697
698	BUG_ON(cpu_online(cpu));
699	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
700
701	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
702		return;
703
704	/* CPU is dead, so no lock needed. */
705	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
706		if (*i == t) {
707			*i = t->next;
708			/* If this was the tail element, move the tail ptr */
709			if (*i == NULL)
710				per_cpu(tasklet_vec, cpu).tail = i;
711			return;
712		}
713	}
714	BUG();
715}
716
717static int takeover_tasklets(unsigned int cpu)
718{
719	/* CPU is dead, so no lock needed. */
720	local_irq_disable();
721
722	/* Find end, append list for that CPU. */
723	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
724		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
725		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
726		per_cpu(tasklet_vec, cpu).head = NULL;
727		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
728	}
729	raise_softirq_irqoff(TASKLET_SOFTIRQ);
730
731	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
732		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
733		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
734		per_cpu(tasklet_hi_vec, cpu).head = NULL;
735		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
736	}
737	raise_softirq_irqoff(HI_SOFTIRQ);
738
739	local_irq_enable();
740	return 0;
741}
742#else
743#define takeover_tasklets	NULL
744#endif /* CONFIG_HOTPLUG_CPU */
745
746static struct smp_hotplug_thread softirq_threads = {
747	.store			= &ksoftirqd,
748	.thread_should_run	= ksoftirqd_should_run,
749	.thread_fn		= run_ksoftirqd,
750	.thread_comm		= "ksoftirqd/%u",
751};
752
753static __init int spawn_ksoftirqd(void)
754{
755	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
756				  takeover_tasklets);
757	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
758
759	return 0;
760}
761early_initcall(spawn_ksoftirqd);
762
763/*
764 * [ These __weak aliases are kept in a separate compilation unit, so that
765 *   GCC does not inline them incorrectly. ]
766 */
767
768int __init __weak early_irq_init(void)
769{
770	return 0;
771}
772
773int __init __weak arch_probe_nr_irqs(void)
774{
775	return NR_IRQS_LEGACY;
776}
777
778int __init __weak arch_early_irq_init(void)
779{
780	return 0;
781}
782
783unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
784{
785	return from;
786}
v4.17
  1/*
  2 *	linux/kernel/softirq.c
  3 *
  4 *	Copyright (C) 1992 Linus Torvalds
  5 *
  6 *	Distribute under GPLv2.
  7 *
  8 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12
 13#include <linux/export.h>
 14#include <linux/kernel_stat.h>
 15#include <linux/interrupt.h>
 16#include <linux/init.h>
 17#include <linux/mm.h>
 18#include <linux/notifier.h>
 19#include <linux/percpu.h>
 20#include <linux/cpu.h>
 21#include <linux/freezer.h>
 22#include <linux/kthread.h>
 23#include <linux/rcupdate.h>
 24#include <linux/ftrace.h>
 25#include <linux/smp.h>
 26#include <linux/smpboot.h>
 27#include <linux/tick.h>
 28#include <linux/irq.h>
 29
 30#define CREATE_TRACE_POINTS
 31#include <trace/events/irq.h>
 32
 33/*
 34   - No shared variables, all the data are CPU local.
 35   - If a softirq needs serialization, let it serialize itself
 36     by its own spinlocks.
 37   - Even if softirq is serialized, only local cpu is marked for
 38     execution. Hence, we get something sort of weak cpu binding.
 39     Though it is still not clear, will it result in better locality
 40     or will not.
 41
 42   Examples:
 43   - NET RX softirq. It is multithreaded and does not require
 44     any global serialization.
 45   - NET TX softirq. It kicks software netdevice queues, hence
 46     it is logically serialized per device, but this serialization
 47     is invisible to common code.
 48   - Tasklets: serialized wrt itself.
 49 */
 50
 51#ifndef __ARCH_IRQ_STAT
 52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
 53EXPORT_SYMBOL(irq_stat);
 54#endif
 55
 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 57
 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 59
 60const char * const softirq_to_name[NR_SOFTIRQS] = {
 61	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 62	"TASKLET", "SCHED", "HRTIMER", "RCU"
 63};
 64
 65/*
 66 * we cannot loop indefinitely here to avoid userspace starvation,
 67 * but we also don't want to introduce a worst case 1/HZ latency
 68 * to the pending events, so lets the scheduler to balance
 69 * the softirq load for us.
 70 */
 71static void wakeup_softirqd(void)
 72{
 73	/* Interrupts are disabled: no need to stop preemption */
 74	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 75
 76	if (tsk && tsk->state != TASK_RUNNING)
 77		wake_up_process(tsk);
 78}
 79
 80/*
 81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
 82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
 83 */
 84static bool ksoftirqd_running(void)
 85{
 86	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 87
 88	return tsk && (tsk->state == TASK_RUNNING);
 89}
 90
 91/*
 92 * preempt_count and SOFTIRQ_OFFSET usage:
 93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 94 *   softirq processing.
 95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 96 *   on local_bh_disable or local_bh_enable.
 97 * This lets us distinguish between whether we are currently processing
 98 * softirq and whether we just have bh disabled.
 99 */
100
101/*
102 * This one is for softirq.c-internal use,
103 * where hardirqs are disabled legitimately:
104 */
105#ifdef CONFIG_TRACE_IRQFLAGS
106void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
107{
108	unsigned long flags;
109
110	WARN_ON_ONCE(in_irq());
111
112	raw_local_irq_save(flags);
113	/*
114	 * The preempt tracer hooks into preempt_count_add and will break
115	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116	 * is set and before current->softirq_enabled is cleared.
117	 * We must manually increment preempt_count here and manually
118	 * call the trace_preempt_off later.
119	 */
120	__preempt_count_add(cnt);
121	/*
122	 * Were softirqs turned off above:
123	 */
124	if (softirq_count() == (cnt & SOFTIRQ_MASK))
125		trace_softirqs_off(ip);
126	raw_local_irq_restore(flags);
127
128	if (preempt_count() == cnt) {
129#ifdef CONFIG_DEBUG_PREEMPT
130		current->preempt_disable_ip = get_lock_parent_ip();
131#endif
132		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
133	}
134}
135EXPORT_SYMBOL(__local_bh_disable_ip);
136#endif /* CONFIG_TRACE_IRQFLAGS */
137
138static void __local_bh_enable(unsigned int cnt)
139{
140	lockdep_assert_irqs_disabled();
141
142	if (softirq_count() == (cnt & SOFTIRQ_MASK))
143		trace_softirqs_on(_RET_IP_);
144	preempt_count_sub(cnt);
145}
146
147/*
148 * Special-case - softirqs can safely be enabled in
149 * cond_resched_softirq(), or by __do_softirq(),
150 * without processing still-pending softirqs:
151 */
152void _local_bh_enable(void)
153{
154	WARN_ON_ONCE(in_irq());
155	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
156}
157EXPORT_SYMBOL(_local_bh_enable);
158
159void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
160{
161	WARN_ON_ONCE(in_irq());
162	lockdep_assert_irqs_enabled();
163#ifdef CONFIG_TRACE_IRQFLAGS
164	local_irq_disable();
165#endif
166	/*
167	 * Are softirqs going to be turned on now:
168	 */
169	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
170		trace_softirqs_on(ip);
171	/*
172	 * Keep preemption disabled until we are done with
173	 * softirq processing:
174	 */
175	preempt_count_sub(cnt - 1);
176
177	if (unlikely(!in_interrupt() && local_softirq_pending())) {
178		/*
179		 * Run softirq if any pending. And do it in its own stack
180		 * as we may be calling this deep in a task call stack already.
181		 */
182		do_softirq();
183	}
184
185	preempt_count_dec();
186#ifdef CONFIG_TRACE_IRQFLAGS
187	local_irq_enable();
188#endif
189	preempt_check_resched();
190}
191EXPORT_SYMBOL(__local_bh_enable_ip);
192
193/*
194 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
195 * but break the loop if need_resched() is set or after 2 ms.
196 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
197 * certain cases, such as stop_machine(), jiffies may cease to
198 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
199 * well to make sure we eventually return from this method.
200 *
201 * These limits have been established via experimentation.
202 * The two things to balance is latency against fairness -
203 * we want to handle softirqs as soon as possible, but they
204 * should not be able to lock up the box.
205 */
206#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
207#define MAX_SOFTIRQ_RESTART 10
208
209#ifdef CONFIG_TRACE_IRQFLAGS
210/*
211 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
212 * to keep the lockdep irq context tracking as tight as possible in order to
213 * not miss-qualify lock contexts and miss possible deadlocks.
214 */
215
216static inline bool lockdep_softirq_start(void)
217{
218	bool in_hardirq = false;
219
220	if (trace_hardirq_context(current)) {
221		in_hardirq = true;
222		trace_hardirq_exit();
223	}
224
225	lockdep_softirq_enter();
226
227	return in_hardirq;
228}
229
230static inline void lockdep_softirq_end(bool in_hardirq)
231{
232	lockdep_softirq_exit();
233
234	if (in_hardirq)
235		trace_hardirq_enter();
236}
237#else
238static inline bool lockdep_softirq_start(void) { return false; }
239static inline void lockdep_softirq_end(bool in_hardirq) { }
240#endif
241
242asmlinkage __visible void __softirq_entry __do_softirq(void)
243{
244	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
245	unsigned long old_flags = current->flags;
246	int max_restart = MAX_SOFTIRQ_RESTART;
247	struct softirq_action *h;
248	bool in_hardirq;
249	__u32 pending;
250	int softirq_bit;
251
252	/*
253	 * Mask out PF_MEMALLOC s current task context is borrowed for the
254	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
255	 * again if the socket is related to swap
256	 */
257	current->flags &= ~PF_MEMALLOC;
258
259	pending = local_softirq_pending();
260	account_irq_enter_time(current);
261
262	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
263	in_hardirq = lockdep_softirq_start();
264
265restart:
266	/* Reset the pending bitmask before enabling irqs */
267	set_softirq_pending(0);
268
269	local_irq_enable();
270
271	h = softirq_vec;
272
273	while ((softirq_bit = ffs(pending))) {
274		unsigned int vec_nr;
275		int prev_count;
276
277		h += softirq_bit - 1;
278
279		vec_nr = h - softirq_vec;
280		prev_count = preempt_count();
281
282		kstat_incr_softirqs_this_cpu(vec_nr);
283
284		trace_softirq_entry(vec_nr);
285		h->action(h);
286		trace_softirq_exit(vec_nr);
287		if (unlikely(prev_count != preempt_count())) {
288			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
289			       vec_nr, softirq_to_name[vec_nr], h->action,
290			       prev_count, preempt_count());
291			preempt_count_set(prev_count);
292		}
293		h++;
294		pending >>= softirq_bit;
295	}
296
297	rcu_bh_qs();
298	local_irq_disable();
299
300	pending = local_softirq_pending();
301	if (pending) {
302		if (time_before(jiffies, end) && !need_resched() &&
303		    --max_restart)
304			goto restart;
305
306		wakeup_softirqd();
307	}
308
309	lockdep_softirq_end(in_hardirq);
310	account_irq_exit_time(current);
311	__local_bh_enable(SOFTIRQ_OFFSET);
312	WARN_ON_ONCE(in_interrupt());
313	current_restore_flags(old_flags, PF_MEMALLOC);
314}
315
316asmlinkage __visible void do_softirq(void)
317{
318	__u32 pending;
319	unsigned long flags;
320
321	if (in_interrupt())
322		return;
323
324	local_irq_save(flags);
325
326	pending = local_softirq_pending();
327
328	if (pending && !ksoftirqd_running())
329		do_softirq_own_stack();
330
331	local_irq_restore(flags);
332}
333
334/*
335 * Enter an interrupt context.
336 */
337void irq_enter(void)
338{
339	rcu_irq_enter();
340	if (is_idle_task(current) && !in_interrupt()) {
341		/*
342		 * Prevent raise_softirq from needlessly waking up ksoftirqd
343		 * here, as softirq will be serviced on return from interrupt.
344		 */
345		local_bh_disable();
346		tick_irq_enter();
347		_local_bh_enable();
348	}
349
350	__irq_enter();
351}
352
353static inline void invoke_softirq(void)
354{
355	if (ksoftirqd_running())
356		return;
357
358	if (!force_irqthreads) {
359#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
360		/*
361		 * We can safely execute softirq on the current stack if
362		 * it is the irq stack, because it should be near empty
363		 * at this stage.
364		 */
365		__do_softirq();
366#else
367		/*
368		 * Otherwise, irq_exit() is called on the task stack that can
369		 * be potentially deep already. So call softirq in its own stack
370		 * to prevent from any overrun.
371		 */
372		do_softirq_own_stack();
373#endif
374	} else {
375		wakeup_softirqd();
376	}
377}
378
379static inline void tick_irq_exit(void)
380{
381#ifdef CONFIG_NO_HZ_COMMON
382	int cpu = smp_processor_id();
383
384	/* Make sure that timer wheel updates are propagated */
385	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
386		if (!in_interrupt())
387			tick_nohz_irq_exit();
388	}
389#endif
390}
391
392/*
393 * Exit an interrupt context. Process softirqs if needed and possible:
394 */
395void irq_exit(void)
396{
397#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
398	local_irq_disable();
399#else
400	lockdep_assert_irqs_disabled();
401#endif
 
402	account_irq_exit_time(current);
403	preempt_count_sub(HARDIRQ_OFFSET);
404	if (!in_interrupt() && local_softirq_pending())
405		invoke_softirq();
406
407	tick_irq_exit();
408	rcu_irq_exit();
409	trace_hardirq_exit(); /* must be last! */
410}
411
412/*
413 * This function must run with irqs disabled!
414 */
415inline void raise_softirq_irqoff(unsigned int nr)
416{
417	__raise_softirq_irqoff(nr);
418
419	/*
420	 * If we're in an interrupt or softirq, we're done
421	 * (this also catches softirq-disabled code). We will
422	 * actually run the softirq once we return from
423	 * the irq or softirq.
424	 *
425	 * Otherwise we wake up ksoftirqd to make sure we
426	 * schedule the softirq soon.
427	 */
428	if (!in_interrupt())
429		wakeup_softirqd();
430}
431
432void raise_softirq(unsigned int nr)
433{
434	unsigned long flags;
435
436	local_irq_save(flags);
437	raise_softirq_irqoff(nr);
438	local_irq_restore(flags);
439}
440
441void __raise_softirq_irqoff(unsigned int nr)
442{
443	trace_softirq_raise(nr);
444	or_softirq_pending(1UL << nr);
445}
446
447void open_softirq(int nr, void (*action)(struct softirq_action *))
448{
449	softirq_vec[nr].action = action;
450}
451
452/*
453 * Tasklets
454 */
455struct tasklet_head {
456	struct tasklet_struct *head;
457	struct tasklet_struct **tail;
458};
459
460static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
462
463static void __tasklet_schedule_common(struct tasklet_struct *t,
464				      struct tasklet_head __percpu *headp,
465				      unsigned int softirq_nr)
466{
467	struct tasklet_head *head;
468	unsigned long flags;
469
470	local_irq_save(flags);
471	head = this_cpu_ptr(headp);
472	t->next = NULL;
473	*head->tail = t;
474	head->tail = &(t->next);
475	raise_softirq_irqoff(softirq_nr);
476	local_irq_restore(flags);
477}
 
478
479void __tasklet_schedule(struct tasklet_struct *t)
480{
481	__tasklet_schedule_common(t, &tasklet_vec,
482				  TASKLET_SOFTIRQ);
 
 
 
 
 
 
483}
484EXPORT_SYMBOL(__tasklet_schedule);
485
486void __tasklet_hi_schedule(struct tasklet_struct *t)
487{
488	__tasklet_schedule_common(t, &tasklet_hi_vec,
489				  HI_SOFTIRQ);
 
 
 
490}
491EXPORT_SYMBOL(__tasklet_hi_schedule);
492
493static void tasklet_action_common(struct softirq_action *a,
494				  struct tasklet_head *tl_head,
495				  unsigned int softirq_nr)
496{
497	struct tasklet_struct *list;
498
499	local_irq_disable();
500	list = tl_head->head;
501	tl_head->head = NULL;
502	tl_head->tail = &tl_head->head;
503	local_irq_enable();
504
505	while (list) {
506		struct tasklet_struct *t = list;
507
508		list = list->next;
509
510		if (tasklet_trylock(t)) {
511			if (!atomic_read(&t->count)) {
512				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
513							&t->state))
514					BUG();
515				t->func(t->data);
516				tasklet_unlock(t);
517				continue;
518			}
519			tasklet_unlock(t);
520		}
521
522		local_irq_disable();
523		t->next = NULL;
524		*tl_head->tail = t;
525		tl_head->tail = &t->next;
526		__raise_softirq_irqoff(softirq_nr);
527		local_irq_enable();
528	}
529}
530
531static __latent_entropy void tasklet_action(struct softirq_action *a)
532{
533	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
534}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535
536static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
537{
538	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
 
 
 
 
539}
540
541void tasklet_init(struct tasklet_struct *t,
542		  void (*func)(unsigned long), unsigned long data)
543{
544	t->next = NULL;
545	t->state = 0;
546	atomic_set(&t->count, 0);
547	t->func = func;
548	t->data = data;
549}
550EXPORT_SYMBOL(tasklet_init);
551
552void tasklet_kill(struct tasklet_struct *t)
553{
554	if (in_interrupt())
555		pr_notice("Attempt to kill tasklet from interrupt\n");
556
557	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
558		do {
559			yield();
560		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
561	}
562	tasklet_unlock_wait(t);
563	clear_bit(TASKLET_STATE_SCHED, &t->state);
564}
565EXPORT_SYMBOL(tasklet_kill);
566
567/*
568 * tasklet_hrtimer
569 */
570
571/*
572 * The trampoline is called when the hrtimer expires. It schedules a tasklet
573 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
574 * hrtimer callback, but from softirq context.
575 */
576static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
577{
578	struct tasklet_hrtimer *ttimer =
579		container_of(timer, struct tasklet_hrtimer, timer);
580
581	tasklet_hi_schedule(&ttimer->tasklet);
582	return HRTIMER_NORESTART;
583}
584
585/*
586 * Helper function which calls the hrtimer callback from
587 * tasklet/softirq context
588 */
589static void __tasklet_hrtimer_trampoline(unsigned long data)
590{
591	struct tasklet_hrtimer *ttimer = (void *)data;
592	enum hrtimer_restart restart;
593
594	restart = ttimer->function(&ttimer->timer);
595	if (restart != HRTIMER_NORESTART)
596		hrtimer_restart(&ttimer->timer);
597}
598
599/**
600 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
601 * @ttimer:	 tasklet_hrtimer which is initialized
602 * @function:	 hrtimer callback function which gets called from softirq context
603 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
604 * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
605 */
606void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
607			  enum hrtimer_restart (*function)(struct hrtimer *),
608			  clockid_t which_clock, enum hrtimer_mode mode)
609{
610	hrtimer_init(&ttimer->timer, which_clock, mode);
611	ttimer->timer.function = __hrtimer_tasklet_trampoline;
612	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
613		     (unsigned long)ttimer);
614	ttimer->function = function;
615}
616EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
617
618void __init softirq_init(void)
619{
620	int cpu;
621
622	for_each_possible_cpu(cpu) {
623		per_cpu(tasklet_vec, cpu).tail =
624			&per_cpu(tasklet_vec, cpu).head;
625		per_cpu(tasklet_hi_vec, cpu).tail =
626			&per_cpu(tasklet_hi_vec, cpu).head;
627	}
628
629	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
630	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
631}
632
633static int ksoftirqd_should_run(unsigned int cpu)
634{
635	return local_softirq_pending();
636}
637
638static void run_ksoftirqd(unsigned int cpu)
639{
640	local_irq_disable();
641	if (local_softirq_pending()) {
642		/*
643		 * We can safely run softirq on inline stack, as we are not deep
644		 * in the task stack here.
645		 */
646		__do_softirq();
647		local_irq_enable();
648		cond_resched();
649		return;
650	}
651	local_irq_enable();
652}
653
654#ifdef CONFIG_HOTPLUG_CPU
655/*
656 * tasklet_kill_immediate is called to remove a tasklet which can already be
657 * scheduled for execution on @cpu.
658 *
659 * Unlike tasklet_kill, this function removes the tasklet
660 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
661 *
662 * When this function is called, @cpu must be in the CPU_DEAD state.
663 */
664void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
665{
666	struct tasklet_struct **i;
667
668	BUG_ON(cpu_online(cpu));
669	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
670
671	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
672		return;
673
674	/* CPU is dead, so no lock needed. */
675	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
676		if (*i == t) {
677			*i = t->next;
678			/* If this was the tail element, move the tail ptr */
679			if (*i == NULL)
680				per_cpu(tasklet_vec, cpu).tail = i;
681			return;
682		}
683	}
684	BUG();
685}
686
687static int takeover_tasklets(unsigned int cpu)
688{
689	/* CPU is dead, so no lock needed. */
690	local_irq_disable();
691
692	/* Find end, append list for that CPU. */
693	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
694		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
695		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
696		per_cpu(tasklet_vec, cpu).head = NULL;
697		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
698	}
699	raise_softirq_irqoff(TASKLET_SOFTIRQ);
700
701	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
702		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
703		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
704		per_cpu(tasklet_hi_vec, cpu).head = NULL;
705		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
706	}
707	raise_softirq_irqoff(HI_SOFTIRQ);
708
709	local_irq_enable();
710	return 0;
711}
712#else
713#define takeover_tasklets	NULL
714#endif /* CONFIG_HOTPLUG_CPU */
715
716static struct smp_hotplug_thread softirq_threads = {
717	.store			= &ksoftirqd,
718	.thread_should_run	= ksoftirqd_should_run,
719	.thread_fn		= run_ksoftirqd,
720	.thread_comm		= "ksoftirqd/%u",
721};
722
723static __init int spawn_ksoftirqd(void)
724{
725	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
726				  takeover_tasklets);
727	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
728
729	return 0;
730}
731early_initcall(spawn_ksoftirqd);
732
733/*
734 * [ These __weak aliases are kept in a separate compilation unit, so that
735 *   GCC does not inline them incorrectly. ]
736 */
737
738int __init __weak early_irq_init(void)
739{
740	return 0;
741}
742
743int __init __weak arch_probe_nr_irqs(void)
744{
745	return NR_IRQS_LEGACY;
746}
747
748int __init __weak arch_early_irq_init(void)
749{
750	return 0;
751}
752
753unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
754{
755	return from;
756}