Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *	linux/kernel/softirq.c
  3 *
  4 *	Copyright (C) 1992 Linus Torvalds
  5 *
  6 *	Distribute under GPLv2.
  7 *
  8 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12
 13#include <linux/export.h>
 14#include <linux/kernel_stat.h>
 15#include <linux/interrupt.h>
 16#include <linux/init.h>
 17#include <linux/mm.h>
 18#include <linux/notifier.h>
 19#include <linux/percpu.h>
 20#include <linux/cpu.h>
 21#include <linux/freezer.h>
 22#include <linux/kthread.h>
 23#include <linux/rcupdate.h>
 24#include <linux/ftrace.h>
 25#include <linux/smp.h>
 26#include <linux/smpboot.h>
 27#include <linux/tick.h>
 28#include <linux/irq.h>
 29
 30#define CREATE_TRACE_POINTS
 31#include <trace/events/irq.h>
 32
 33/*
 34   - No shared variables, all the data are CPU local.
 35   - If a softirq needs serialization, let it serialize itself
 36     by its own spinlocks.
 37   - Even if softirq is serialized, only local cpu is marked for
 38     execution. Hence, we get something sort of weak cpu binding.
 39     Though it is still not clear, will it result in better locality
 40     or will not.
 41
 42   Examples:
 43   - NET RX softirq. It is multithreaded and does not require
 44     any global serialization.
 45   - NET TX softirq. It kicks software netdevice queues, hence
 46     it is logically serialized per device, but this serialization
 47     is invisible to common code.
 48   - Tasklets: serialized wrt itself.
 49 */
 50
 51#ifndef __ARCH_IRQ_STAT
 52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
 53EXPORT_SYMBOL(irq_stat);
 54#endif
 55
 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 57
 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 59
 60const char * const softirq_to_name[NR_SOFTIRQS] = {
 61	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
 62	"TASKLET", "SCHED", "HRTIMER", "RCU"
 63};
 64
 65/*
 66 * we cannot loop indefinitely here to avoid userspace starvation,
 67 * but we also don't want to introduce a worst case 1/HZ latency
 68 * to the pending events, so lets the scheduler to balance
 69 * the softirq load for us.
 70 */
 71static void wakeup_softirqd(void)
 72{
 73	/* Interrupts are disabled: no need to stop preemption */
 74	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 75
 76	if (tsk && tsk->state != TASK_RUNNING)
 77		wake_up_process(tsk);
 78}
 79
 80/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81 * preempt_count and SOFTIRQ_OFFSET usage:
 82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 83 *   softirq processing.
 84 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 85 *   on local_bh_disable or local_bh_enable.
 86 * This lets us distinguish between whether we are currently processing
 87 * softirq and whether we just have bh disabled.
 88 */
 89
 90/*
 91 * This one is for softirq.c-internal use,
 92 * where hardirqs are disabled legitimately:
 93 */
 94#ifdef CONFIG_TRACE_IRQFLAGS
 
 
 
 
 
 
 95void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 96{
 97	unsigned long flags;
 98
 99	WARN_ON_ONCE(in_irq());
100
101	raw_local_irq_save(flags);
102	/*
103	 * The preempt tracer hooks into preempt_count_add and will break
104	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
105	 * is set and before current->softirq_enabled is cleared.
106	 * We must manually increment preempt_count here and manually
107	 * call the trace_preempt_off later.
108	 */
109	__preempt_count_add(cnt);
110	/*
111	 * Were softirqs turned off above:
112	 */
113	if (softirq_count() == (cnt & SOFTIRQ_MASK))
114		trace_softirqs_off(ip);
115	raw_local_irq_restore(flags);
116
117	if (preempt_count() == cnt)
118		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 
 
 
 
119}
120EXPORT_SYMBOL(__local_bh_disable_ip);
121#endif /* CONFIG_TRACE_IRQFLAGS */
122
123static void __local_bh_enable(unsigned int cnt)
124{
125	WARN_ON_ONCE(!irqs_disabled());
 
 
 
126
127	if (softirq_count() == (cnt & SOFTIRQ_MASK))
128		trace_softirqs_on(_RET_IP_);
129	preempt_count_sub(cnt);
 
130}
131
132/*
133 * Special-case - softirqs can safely be enabled in
134 * cond_resched_softirq(), or by __do_softirq(),
135 * without processing still-pending softirqs:
136 */
137void _local_bh_enable(void)
138{
139	WARN_ON_ONCE(in_irq());
140	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
141}
142EXPORT_SYMBOL(_local_bh_enable);
143
144void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
145{
146	WARN_ON_ONCE(in_irq() || irqs_disabled());
 
147#ifdef CONFIG_TRACE_IRQFLAGS
148	local_irq_disable();
149#endif
150	/*
151	 * Are softirqs going to be turned on now:
152	 */
153	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
154		trace_softirqs_on(ip);
155	/*
156	 * Keep preemption disabled until we are done with
157	 * softirq processing:
158	 */
159	preempt_count_sub(cnt - 1);
160
161	if (unlikely(!in_interrupt() && local_softirq_pending())) {
162		/*
163		 * Run softirq if any pending. And do it in its own stack
164		 * as we may be calling this deep in a task call stack already.
165		 */
166		do_softirq();
167	}
168
169	preempt_count_dec();
170#ifdef CONFIG_TRACE_IRQFLAGS
171	local_irq_enable();
172#endif
173	preempt_check_resched();
174}
175EXPORT_SYMBOL(__local_bh_enable_ip);
176
177/*
178 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
179 * but break the loop if need_resched() is set or after 2 ms.
180 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
181 * certain cases, such as stop_machine(), jiffies may cease to
182 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
183 * well to make sure we eventually return from this method.
184 *
185 * These limits have been established via experimentation.
186 * The two things to balance is latency against fairness -
187 * we want to handle softirqs as soon as possible, but they
188 * should not be able to lock up the box.
189 */
190#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
191#define MAX_SOFTIRQ_RESTART 10
192
193#ifdef CONFIG_TRACE_IRQFLAGS
194/*
195 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
196 * to keep the lockdep irq context tracking as tight as possible in order to
197 * not miss-qualify lock contexts and miss possible deadlocks.
198 */
199
200static inline bool lockdep_softirq_start(void)
201{
202	bool in_hardirq = false;
203
204	if (trace_hardirq_context(current)) {
205		in_hardirq = true;
206		trace_hardirq_exit();
207	}
208
209	lockdep_softirq_enter();
210
211	return in_hardirq;
212}
213
214static inline void lockdep_softirq_end(bool in_hardirq)
215{
216	lockdep_softirq_exit();
217
218	if (in_hardirq)
219		trace_hardirq_enter();
220}
221#else
222static inline bool lockdep_softirq_start(void) { return false; }
223static inline void lockdep_softirq_end(bool in_hardirq) { }
224#endif
225
226asmlinkage __visible void __do_softirq(void)
227{
228	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
229	unsigned long old_flags = current->flags;
230	int max_restart = MAX_SOFTIRQ_RESTART;
231	struct softirq_action *h;
232	bool in_hardirq;
233	__u32 pending;
234	int softirq_bit;
235	int cpu;
236
237	/*
238	 * Mask out PF_MEMALLOC s current task context is borrowed for the
239	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
240	 * again if the socket is related to swap
241	 */
242	current->flags &= ~PF_MEMALLOC;
243
244	pending = local_softirq_pending();
245	account_irq_enter_time(current);
246
247	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
248	in_hardirq = lockdep_softirq_start();
249
250	cpu = smp_processor_id();
251restart:
252	/* Reset the pending bitmask before enabling irqs */
253	set_softirq_pending(0);
254
255	local_irq_enable();
256
257	h = softirq_vec;
258
259	while ((softirq_bit = ffs(pending))) {
260		unsigned int vec_nr;
261		int prev_count;
262
263		h += softirq_bit - 1;
264
265		vec_nr = h - softirq_vec;
266		prev_count = preempt_count();
267
268		kstat_incr_softirqs_this_cpu(vec_nr);
269
270		trace_softirq_entry(vec_nr);
271		h->action(h);
272		trace_softirq_exit(vec_nr);
273		if (unlikely(prev_count != preempt_count())) {
274			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
275			       vec_nr, softirq_to_name[vec_nr], h->action,
276			       prev_count, preempt_count());
277			preempt_count_set(prev_count);
278		}
279		rcu_bh_qs(cpu);
280		h++;
281		pending >>= softirq_bit;
282	}
283
 
 
284	local_irq_disable();
285
286	pending = local_softirq_pending();
287	if (pending) {
288		if (time_before(jiffies, end) && !need_resched() &&
289		    --max_restart)
290			goto restart;
291
292		wakeup_softirqd();
293	}
294
295	lockdep_softirq_end(in_hardirq);
296	account_irq_exit_time(current);
297	__local_bh_enable(SOFTIRQ_OFFSET);
298	WARN_ON_ONCE(in_interrupt());
299	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
300}
301
302asmlinkage __visible void do_softirq(void)
303{
304	__u32 pending;
305	unsigned long flags;
306
307	if (in_interrupt())
308		return;
309
310	local_irq_save(flags);
311
312	pending = local_softirq_pending();
313
314	if (pending)
315		do_softirq_own_stack();
316
317	local_irq_restore(flags);
318}
319
320/*
321 * Enter an interrupt context.
322 */
323void irq_enter(void)
324{
325	rcu_irq_enter();
326	if (is_idle_task(current) && !in_interrupt()) {
327		/*
328		 * Prevent raise_softirq from needlessly waking up ksoftirqd
329		 * here, as softirq will be serviced on return from interrupt.
330		 */
331		local_bh_disable();
332		tick_irq_enter();
333		_local_bh_enable();
334	}
335
336	__irq_enter();
337}
338
 
 
 
 
 
 
 
 
 
339static inline void invoke_softirq(void)
340{
 
 
 
341	if (!force_irqthreads) {
342#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
343		/*
344		 * We can safely execute softirq on the current stack if
345		 * it is the irq stack, because it should be near empty
346		 * at this stage.
347		 */
348		__do_softirq();
349#else
350		/*
351		 * Otherwise, irq_exit() is called on the task stack that can
352		 * be potentially deep already. So call softirq in its own stack
353		 * to prevent from any overrun.
354		 */
355		do_softirq_own_stack();
356#endif
357	} else {
358		wakeup_softirqd();
359	}
360}
361
362static inline void tick_irq_exit(void)
363{
364#ifdef CONFIG_NO_HZ_COMMON
365	int cpu = smp_processor_id();
366
367	/* Make sure that timer wheel updates are propagated */
368	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
369		if (!in_interrupt())
370			tick_nohz_irq_exit();
371	}
372#endif
373}
374
375/*
376 * Exit an interrupt context. Process softirqs if needed and possible:
377 */
378void irq_exit(void)
379{
380#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
381	local_irq_disable();
382#else
383	WARN_ON_ONCE(!irqs_disabled());
384#endif
385
386	account_irq_exit_time(current);
387	preempt_count_sub(HARDIRQ_OFFSET);
388	if (!in_interrupt() && local_softirq_pending())
389		invoke_softirq();
390
391	tick_irq_exit();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392	rcu_irq_exit();
393	trace_hardirq_exit(); /* must be last! */
 
394}
395
396/*
397 * This function must run with irqs disabled!
398 */
399inline void raise_softirq_irqoff(unsigned int nr)
400{
401	__raise_softirq_irqoff(nr);
402
403	/*
404	 * If we're in an interrupt or softirq, we're done
405	 * (this also catches softirq-disabled code). We will
406	 * actually run the softirq once we return from
407	 * the irq or softirq.
408	 *
409	 * Otherwise we wake up ksoftirqd to make sure we
410	 * schedule the softirq soon.
411	 */
412	if (!in_interrupt())
413		wakeup_softirqd();
414}
415
416void raise_softirq(unsigned int nr)
417{
418	unsigned long flags;
419
420	local_irq_save(flags);
421	raise_softirq_irqoff(nr);
422	local_irq_restore(flags);
423}
424
425void __raise_softirq_irqoff(unsigned int nr)
426{
427	trace_softirq_raise(nr);
428	or_softirq_pending(1UL << nr);
429}
430
431void open_softirq(int nr, void (*action)(struct softirq_action *))
432{
433	softirq_vec[nr].action = action;
434}
435
436/*
437 * Tasklets
438 */
439struct tasklet_head {
440	struct tasklet_struct *head;
441	struct tasklet_struct **tail;
442};
443
444static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
445static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
446
447void __tasklet_schedule(struct tasklet_struct *t)
 
 
448{
 
449	unsigned long flags;
450
451	local_irq_save(flags);
 
452	t->next = NULL;
453	*__this_cpu_read(tasklet_vec.tail) = t;
454	__this_cpu_write(tasklet_vec.tail, &(t->next));
455	raise_softirq_irqoff(TASKLET_SOFTIRQ);
456	local_irq_restore(flags);
457}
458EXPORT_SYMBOL(__tasklet_schedule);
459
460void __tasklet_hi_schedule(struct tasklet_struct *t)
461{
462	unsigned long flags;
463
464	local_irq_save(flags);
465	t->next = NULL;
466	*__this_cpu_read(tasklet_hi_vec.tail) = t;
467	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
468	raise_softirq_irqoff(HI_SOFTIRQ);
469	local_irq_restore(flags);
470}
471EXPORT_SYMBOL(__tasklet_hi_schedule);
472
473void __tasklet_hi_schedule_first(struct tasklet_struct *t)
474{
475	BUG_ON(!irqs_disabled());
476
477	t->next = __this_cpu_read(tasklet_hi_vec.head);
478	__this_cpu_write(tasklet_hi_vec.head, t);
479	__raise_softirq_irqoff(HI_SOFTIRQ);
480}
481EXPORT_SYMBOL(__tasklet_hi_schedule_first);
482
483static void tasklet_action(struct softirq_action *a)
 
 
484{
485	struct tasklet_struct *list;
486
487	local_irq_disable();
488	list = __this_cpu_read(tasklet_vec.head);
489	__this_cpu_write(tasklet_vec.head, NULL);
490	__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
491	local_irq_enable();
492
493	while (list) {
494		struct tasklet_struct *t = list;
495
496		list = list->next;
497
498		if (tasklet_trylock(t)) {
499			if (!atomic_read(&t->count)) {
500				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
501							&t->state))
502					BUG();
503				t->func(t->data);
 
 
 
504				tasklet_unlock(t);
505				continue;
506			}
507			tasklet_unlock(t);
508		}
509
510		local_irq_disable();
511		t->next = NULL;
512		*__this_cpu_read(tasklet_vec.tail) = t;
513		__this_cpu_write(tasklet_vec.tail, &(t->next));
514		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
515		local_irq_enable();
516	}
517}
518
519static void tasklet_hi_action(struct softirq_action *a)
520{
521	struct tasklet_struct *list;
522
523	local_irq_disable();
524	list = __this_cpu_read(tasklet_hi_vec.head);
525	__this_cpu_write(tasklet_hi_vec.head, NULL);
526	__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
527	local_irq_enable();
528
529	while (list) {
530		struct tasklet_struct *t = list;
531
532		list = list->next;
533
534		if (tasklet_trylock(t)) {
535			if (!atomic_read(&t->count)) {
536				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
537							&t->state))
538					BUG();
539				t->func(t->data);
540				tasklet_unlock(t);
541				continue;
542			}
543			tasklet_unlock(t);
544		}
545
546		local_irq_disable();
547		t->next = NULL;
548		*__this_cpu_read(tasklet_hi_vec.tail) = t;
549		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
550		__raise_softirq_irqoff(HI_SOFTIRQ);
551		local_irq_enable();
552	}
 
 
553}
 
554
555void tasklet_init(struct tasklet_struct *t,
556		  void (*func)(unsigned long), unsigned long data)
557{
558	t->next = NULL;
559	t->state = 0;
560	atomic_set(&t->count, 0);
561	t->func = func;
 
562	t->data = data;
563}
564EXPORT_SYMBOL(tasklet_init);
565
566void tasklet_kill(struct tasklet_struct *t)
567{
568	if (in_interrupt())
569		pr_notice("Attempt to kill tasklet from interrupt\n");
570
571	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
572		do {
573			yield();
574		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
575	}
576	tasklet_unlock_wait(t);
577	clear_bit(TASKLET_STATE_SCHED, &t->state);
578}
579EXPORT_SYMBOL(tasklet_kill);
580
581/*
582 * tasklet_hrtimer
583 */
584
585/*
586 * The trampoline is called when the hrtimer expires. It schedules a tasklet
587 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
588 * hrtimer callback, but from softirq context.
589 */
590static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
591{
592	struct tasklet_hrtimer *ttimer =
593		container_of(timer, struct tasklet_hrtimer, timer);
594
595	tasklet_hi_schedule(&ttimer->tasklet);
596	return HRTIMER_NORESTART;
597}
598
599/*
600 * Helper function which calls the hrtimer callback from
601 * tasklet/softirq context
602 */
603static void __tasklet_hrtimer_trampoline(unsigned long data)
604{
605	struct tasklet_hrtimer *ttimer = (void *)data;
606	enum hrtimer_restart restart;
607
608	restart = ttimer->function(&ttimer->timer);
609	if (restart != HRTIMER_NORESTART)
610		hrtimer_restart(&ttimer->timer);
611}
612
613/**
614 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
615 * @ttimer:	 tasklet_hrtimer which is initialized
616 * @function:	 hrtimer callback function which gets called from softirq context
617 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
618 * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
619 */
620void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
621			  enum hrtimer_restart (*function)(struct hrtimer *),
622			  clockid_t which_clock, enum hrtimer_mode mode)
623{
624	hrtimer_init(&ttimer->timer, which_clock, mode);
625	ttimer->timer.function = __hrtimer_tasklet_trampoline;
626	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
627		     (unsigned long)ttimer);
628	ttimer->function = function;
629}
630EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
631
632void __init softirq_init(void)
633{
634	int cpu;
635
636	for_each_possible_cpu(cpu) {
637		per_cpu(tasklet_vec, cpu).tail =
638			&per_cpu(tasklet_vec, cpu).head;
639		per_cpu(tasklet_hi_vec, cpu).tail =
640			&per_cpu(tasklet_hi_vec, cpu).head;
641	}
642
643	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
644	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
645}
646
647static int ksoftirqd_should_run(unsigned int cpu)
648{
649	return local_softirq_pending();
650}
651
652static void run_ksoftirqd(unsigned int cpu)
653{
654	local_irq_disable();
655	if (local_softirq_pending()) {
656		/*
657		 * We can safely run softirq on inline stack, as we are not deep
658		 * in the task stack here.
659		 */
660		__do_softirq();
661		rcu_note_context_switch(cpu);
662		local_irq_enable();
663		cond_resched();
664		return;
665	}
666	local_irq_enable();
667}
668
669#ifdef CONFIG_HOTPLUG_CPU
670/*
671 * tasklet_kill_immediate is called to remove a tasklet which can already be
672 * scheduled for execution on @cpu.
673 *
674 * Unlike tasklet_kill, this function removes the tasklet
675 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
676 *
677 * When this function is called, @cpu must be in the CPU_DEAD state.
678 */
679void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
680{
681	struct tasklet_struct **i;
682
683	BUG_ON(cpu_online(cpu));
684	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
685
686	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
687		return;
688
689	/* CPU is dead, so no lock needed. */
690	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
691		if (*i == t) {
692			*i = t->next;
693			/* If this was the tail element, move the tail ptr */
694			if (*i == NULL)
695				per_cpu(tasklet_vec, cpu).tail = i;
696			return;
697		}
698	}
699	BUG();
700}
701
702static void takeover_tasklets(unsigned int cpu)
703{
704	/* CPU is dead, so no lock needed. */
705	local_irq_disable();
706
707	/* Find end, append list for that CPU. */
708	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
709		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
710		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
711		per_cpu(tasklet_vec, cpu).head = NULL;
712		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
713	}
714	raise_softirq_irqoff(TASKLET_SOFTIRQ);
715
716	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
717		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
718		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
719		per_cpu(tasklet_hi_vec, cpu).head = NULL;
720		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
721	}
722	raise_softirq_irqoff(HI_SOFTIRQ);
723
724	local_irq_enable();
 
725}
 
 
726#endif /* CONFIG_HOTPLUG_CPU */
727
728static int cpu_callback(struct notifier_block *nfb, unsigned long action,
729			void *hcpu)
730{
731	switch (action) {
732#ifdef CONFIG_HOTPLUG_CPU
733	case CPU_DEAD:
734	case CPU_DEAD_FROZEN:
735		takeover_tasklets((unsigned long)hcpu);
736		break;
737#endif /* CONFIG_HOTPLUG_CPU */
738	}
739	return NOTIFY_OK;
740}
741
742static struct notifier_block cpu_nfb = {
743	.notifier_call = cpu_callback
744};
745
746static struct smp_hotplug_thread softirq_threads = {
747	.store			= &ksoftirqd,
748	.thread_should_run	= ksoftirqd_should_run,
749	.thread_fn		= run_ksoftirqd,
750	.thread_comm		= "ksoftirqd/%u",
751};
752
753static __init int spawn_ksoftirqd(void)
754{
755	register_cpu_notifier(&cpu_nfb);
756
757	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
758
759	return 0;
760}
761early_initcall(spawn_ksoftirqd);
762
763/*
764 * [ These __weak aliases are kept in a separate compilation unit, so that
765 *   GCC does not inline them incorrectly. ]
766 */
767
768int __init __weak early_irq_init(void)
769{
770	return 0;
771}
772
773int __init __weak arch_probe_nr_irqs(void)
774{
775	return NR_IRQS_LEGACY;
776}
777
778int __init __weak arch_early_irq_init(void)
779{
780	return 0;
781}
782
783unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
784{
785	return from;
786}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *	linux/kernel/softirq.c
  4 *
  5 *	Copyright (C) 1992 Linus Torvalds
  6 *
 
 
  7 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 12#include <linux/export.h>
 13#include <linux/kernel_stat.h>
 14#include <linux/interrupt.h>
 15#include <linux/init.h>
 16#include <linux/mm.h>
 17#include <linux/notifier.h>
 18#include <linux/percpu.h>
 19#include <linux/cpu.h>
 20#include <linux/freezer.h>
 21#include <linux/kthread.h>
 22#include <linux/rcupdate.h>
 23#include <linux/ftrace.h>
 24#include <linux/smp.h>
 25#include <linux/smpboot.h>
 26#include <linux/tick.h>
 27#include <linux/irq.h>
 28
 29#define CREATE_TRACE_POINTS
 30#include <trace/events/irq.h>
 31
 32/*
 33   - No shared variables, all the data are CPU local.
 34   - If a softirq needs serialization, let it serialize itself
 35     by its own spinlocks.
 36   - Even if softirq is serialized, only local cpu is marked for
 37     execution. Hence, we get something sort of weak cpu binding.
 38     Though it is still not clear, will it result in better locality
 39     or will not.
 40
 41   Examples:
 42   - NET RX softirq. It is multithreaded and does not require
 43     any global serialization.
 44   - NET TX softirq. It kicks software netdevice queues, hence
 45     it is logically serialized per device, but this serialization
 46     is invisible to common code.
 47   - Tasklets: serialized wrt itself.
 48 */
 49
 50#ifndef __ARCH_IRQ_STAT
 51DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
 52EXPORT_PER_CPU_SYMBOL(irq_stat);
 53#endif
 54
 55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 56
 57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 58
 59const char * const softirq_to_name[NR_SOFTIRQS] = {
 60	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 61	"TASKLET", "SCHED", "HRTIMER", "RCU"
 62};
 63
 64/*
 65 * we cannot loop indefinitely here to avoid userspace starvation,
 66 * but we also don't want to introduce a worst case 1/HZ latency
 67 * to the pending events, so lets the scheduler to balance
 68 * the softirq load for us.
 69 */
 70static void wakeup_softirqd(void)
 71{
 72	/* Interrupts are disabled: no need to stop preemption */
 73	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 74
 75	if (tsk && tsk->state != TASK_RUNNING)
 76		wake_up_process(tsk);
 77}
 78
 79/*
 80 * If ksoftirqd is scheduled, we do not want to process pending softirqs
 81 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
 82 * unless we're doing some of the synchronous softirqs.
 83 */
 84#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
 85static bool ksoftirqd_running(unsigned long pending)
 86{
 87	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 88
 89	if (pending & SOFTIRQ_NOW_MASK)
 90		return false;
 91	return tsk && (tsk->state == TASK_RUNNING) &&
 92		!__kthread_should_park(tsk);
 93}
 94
 95/*
 96 * preempt_count and SOFTIRQ_OFFSET usage:
 97 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 98 *   softirq processing.
 99 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100 *   on local_bh_disable or local_bh_enable.
101 * This lets us distinguish between whether we are currently processing
102 * softirq and whether we just have bh disabled.
103 */
104
105/*
106 * This one is for softirq.c-internal use,
107 * where hardirqs are disabled legitimately:
108 */
109#ifdef CONFIG_TRACE_IRQFLAGS
110
111DEFINE_PER_CPU(int, hardirqs_enabled);
112DEFINE_PER_CPU(int, hardirq_context);
113EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
114EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
115
116void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
117{
118	unsigned long flags;
119
120	WARN_ON_ONCE(in_irq());
121
122	raw_local_irq_save(flags);
123	/*
124	 * The preempt tracer hooks into preempt_count_add and will break
125	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
126	 * is set and before current->softirq_enabled is cleared.
127	 * We must manually increment preempt_count here and manually
128	 * call the trace_preempt_off later.
129	 */
130	__preempt_count_add(cnt);
131	/*
132	 * Were softirqs turned off above:
133	 */
134	if (softirq_count() == (cnt & SOFTIRQ_MASK))
135		lockdep_softirqs_off(ip);
136	raw_local_irq_restore(flags);
137
138	if (preempt_count() == cnt) {
139#ifdef CONFIG_DEBUG_PREEMPT
140		current->preempt_disable_ip = get_lock_parent_ip();
141#endif
142		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
143	}
144}
145EXPORT_SYMBOL(__local_bh_disable_ip);
146#endif /* CONFIG_TRACE_IRQFLAGS */
147
148static void __local_bh_enable(unsigned int cnt)
149{
150	lockdep_assert_irqs_disabled();
151
152	if (preempt_count() == cnt)
153		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
154
155	if (softirq_count() == (cnt & SOFTIRQ_MASK))
156		lockdep_softirqs_on(_RET_IP_);
157
158	__preempt_count_sub(cnt);
159}
160
161/*
162 * Special-case - softirqs can safely be enabled by __do_softirq(),
 
163 * without processing still-pending softirqs:
164 */
165void _local_bh_enable(void)
166{
167	WARN_ON_ONCE(in_irq());
168	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
169}
170EXPORT_SYMBOL(_local_bh_enable);
171
172void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
173{
174	WARN_ON_ONCE(in_irq());
175	lockdep_assert_irqs_enabled();
176#ifdef CONFIG_TRACE_IRQFLAGS
177	local_irq_disable();
178#endif
179	/*
180	 * Are softirqs going to be turned on now:
181	 */
182	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
183		lockdep_softirqs_on(ip);
184	/*
185	 * Keep preemption disabled until we are done with
186	 * softirq processing:
187	 */
188	preempt_count_sub(cnt - 1);
189
190	if (unlikely(!in_interrupt() && local_softirq_pending())) {
191		/*
192		 * Run softirq if any pending. And do it in its own stack
193		 * as we may be calling this deep in a task call stack already.
194		 */
195		do_softirq();
196	}
197
198	preempt_count_dec();
199#ifdef CONFIG_TRACE_IRQFLAGS
200	local_irq_enable();
201#endif
202	preempt_check_resched();
203}
204EXPORT_SYMBOL(__local_bh_enable_ip);
205
206/*
207 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
208 * but break the loop if need_resched() is set or after 2 ms.
209 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
210 * certain cases, such as stop_machine(), jiffies may cease to
211 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
212 * well to make sure we eventually return from this method.
213 *
214 * These limits have been established via experimentation.
215 * The two things to balance is latency against fairness -
216 * we want to handle softirqs as soon as possible, but they
217 * should not be able to lock up the box.
218 */
219#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
220#define MAX_SOFTIRQ_RESTART 10
221
222#ifdef CONFIG_TRACE_IRQFLAGS
223/*
224 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
225 * to keep the lockdep irq context tracking as tight as possible in order to
226 * not miss-qualify lock contexts and miss possible deadlocks.
227 */
228
229static inline bool lockdep_softirq_start(void)
230{
231	bool in_hardirq = false;
232
233	if (lockdep_hardirq_context()) {
234		in_hardirq = true;
235		lockdep_hardirq_exit();
236	}
237
238	lockdep_softirq_enter();
239
240	return in_hardirq;
241}
242
243static inline void lockdep_softirq_end(bool in_hardirq)
244{
245	lockdep_softirq_exit();
246
247	if (in_hardirq)
248		lockdep_hardirq_enter();
249}
250#else
251static inline bool lockdep_softirq_start(void) { return false; }
252static inline void lockdep_softirq_end(bool in_hardirq) { }
253#endif
254
255asmlinkage __visible void __softirq_entry __do_softirq(void)
256{
257	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
258	unsigned long old_flags = current->flags;
259	int max_restart = MAX_SOFTIRQ_RESTART;
260	struct softirq_action *h;
261	bool in_hardirq;
262	__u32 pending;
263	int softirq_bit;
 
264
265	/*
266	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
267	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
268	 * again if the socket is related to swapping.
269	 */
270	current->flags &= ~PF_MEMALLOC;
271
272	pending = local_softirq_pending();
273	account_irq_enter_time(current);
274
275	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
276	in_hardirq = lockdep_softirq_start();
277
 
278restart:
279	/* Reset the pending bitmask before enabling irqs */
280	set_softirq_pending(0);
281
282	local_irq_enable();
283
284	h = softirq_vec;
285
286	while ((softirq_bit = ffs(pending))) {
287		unsigned int vec_nr;
288		int prev_count;
289
290		h += softirq_bit - 1;
291
292		vec_nr = h - softirq_vec;
293		prev_count = preempt_count();
294
295		kstat_incr_softirqs_this_cpu(vec_nr);
296
297		trace_softirq_entry(vec_nr);
298		h->action(h);
299		trace_softirq_exit(vec_nr);
300		if (unlikely(prev_count != preempt_count())) {
301			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
302			       vec_nr, softirq_to_name[vec_nr], h->action,
303			       prev_count, preempt_count());
304			preempt_count_set(prev_count);
305		}
 
306		h++;
307		pending >>= softirq_bit;
308	}
309
310	if (__this_cpu_read(ksoftirqd) == current)
311		rcu_softirq_qs();
312	local_irq_disable();
313
314	pending = local_softirq_pending();
315	if (pending) {
316		if (time_before(jiffies, end) && !need_resched() &&
317		    --max_restart)
318			goto restart;
319
320		wakeup_softirqd();
321	}
322
323	lockdep_softirq_end(in_hardirq);
324	account_irq_exit_time(current);
325	__local_bh_enable(SOFTIRQ_OFFSET);
326	WARN_ON_ONCE(in_interrupt());
327	current_restore_flags(old_flags, PF_MEMALLOC);
328}
329
330asmlinkage __visible void do_softirq(void)
331{
332	__u32 pending;
333	unsigned long flags;
334
335	if (in_interrupt())
336		return;
337
338	local_irq_save(flags);
339
340	pending = local_softirq_pending();
341
342	if (pending && !ksoftirqd_running(pending))
343		do_softirq_own_stack();
344
345	local_irq_restore(flags);
346}
347
348/**
349 * irq_enter_rcu - Enter an interrupt context with RCU watching
350 */
351void irq_enter_rcu(void)
352{
 
353	if (is_idle_task(current) && !in_interrupt()) {
354		/*
355		 * Prevent raise_softirq from needlessly waking up ksoftirqd
356		 * here, as softirq will be serviced on return from interrupt.
357		 */
358		local_bh_disable();
359		tick_irq_enter();
360		_local_bh_enable();
361	}
 
362	__irq_enter();
363}
364
365/**
366 * irq_enter - Enter an interrupt context including RCU update
367 */
368void irq_enter(void)
369{
370	rcu_irq_enter();
371	irq_enter_rcu();
372}
373
374static inline void invoke_softirq(void)
375{
376	if (ksoftirqd_running(local_softirq_pending()))
377		return;
378
379	if (!force_irqthreads) {
380#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
381		/*
382		 * We can safely execute softirq on the current stack if
383		 * it is the irq stack, because it should be near empty
384		 * at this stage.
385		 */
386		__do_softirq();
387#else
388		/*
389		 * Otherwise, irq_exit() is called on the task stack that can
390		 * be potentially deep already. So call softirq in its own stack
391		 * to prevent from any overrun.
392		 */
393		do_softirq_own_stack();
394#endif
395	} else {
396		wakeup_softirqd();
397	}
398}
399
400static inline void tick_irq_exit(void)
401{
402#ifdef CONFIG_NO_HZ_COMMON
403	int cpu = smp_processor_id();
404
405	/* Make sure that timer wheel updates are propagated */
406	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
407		if (!in_irq())
408			tick_nohz_irq_exit();
409	}
410#endif
411}
412
413static inline void __irq_exit_rcu(void)
 
 
 
414{
415#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
416	local_irq_disable();
417#else
418	lockdep_assert_irqs_disabled();
419#endif
 
420	account_irq_exit_time(current);
421	preempt_count_sub(HARDIRQ_OFFSET);
422	if (!in_interrupt() && local_softirq_pending())
423		invoke_softirq();
424
425	tick_irq_exit();
426}
427
428/**
429 * irq_exit_rcu() - Exit an interrupt context without updating RCU
430 *
431 * Also processes softirqs if needed and possible.
432 */
433void irq_exit_rcu(void)
434{
435	__irq_exit_rcu();
436	 /* must be last! */
437	lockdep_hardirq_exit();
438}
439
440/**
441 * irq_exit - Exit an interrupt context, update RCU and lockdep
442 *
443 * Also processes softirqs if needed and possible.
444 */
445void irq_exit(void)
446{
447	__irq_exit_rcu();
448	rcu_irq_exit();
449	 /* must be last! */
450	lockdep_hardirq_exit();
451}
452
453/*
454 * This function must run with irqs disabled!
455 */
456inline void raise_softirq_irqoff(unsigned int nr)
457{
458	__raise_softirq_irqoff(nr);
459
460	/*
461	 * If we're in an interrupt or softirq, we're done
462	 * (this also catches softirq-disabled code). We will
463	 * actually run the softirq once we return from
464	 * the irq or softirq.
465	 *
466	 * Otherwise we wake up ksoftirqd to make sure we
467	 * schedule the softirq soon.
468	 */
469	if (!in_interrupt())
470		wakeup_softirqd();
471}
472
473void raise_softirq(unsigned int nr)
474{
475	unsigned long flags;
476
477	local_irq_save(flags);
478	raise_softirq_irqoff(nr);
479	local_irq_restore(flags);
480}
481
482void __raise_softirq_irqoff(unsigned int nr)
483{
484	trace_softirq_raise(nr);
485	or_softirq_pending(1UL << nr);
486}
487
488void open_softirq(int nr, void (*action)(struct softirq_action *))
489{
490	softirq_vec[nr].action = action;
491}
492
493/*
494 * Tasklets
495 */
496struct tasklet_head {
497	struct tasklet_struct *head;
498	struct tasklet_struct **tail;
499};
500
501static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
502static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
503
504static void __tasklet_schedule_common(struct tasklet_struct *t,
505				      struct tasklet_head __percpu *headp,
506				      unsigned int softirq_nr)
507{
508	struct tasklet_head *head;
509	unsigned long flags;
510
511	local_irq_save(flags);
512	head = this_cpu_ptr(headp);
513	t->next = NULL;
514	*head->tail = t;
515	head->tail = &(t->next);
516	raise_softirq_irqoff(softirq_nr);
517	local_irq_restore(flags);
518}
 
519
520void __tasklet_schedule(struct tasklet_struct *t)
521{
522	__tasklet_schedule_common(t, &tasklet_vec,
523				  TASKLET_SOFTIRQ);
 
 
 
 
 
 
524}
525EXPORT_SYMBOL(__tasklet_schedule);
526
527void __tasklet_hi_schedule(struct tasklet_struct *t)
528{
529	__tasklet_schedule_common(t, &tasklet_hi_vec,
530				  HI_SOFTIRQ);
 
 
 
531}
532EXPORT_SYMBOL(__tasklet_hi_schedule);
533
534static void tasklet_action_common(struct softirq_action *a,
535				  struct tasklet_head *tl_head,
536				  unsigned int softirq_nr)
537{
538	struct tasklet_struct *list;
539
540	local_irq_disable();
541	list = tl_head->head;
542	tl_head->head = NULL;
543	tl_head->tail = &tl_head->head;
544	local_irq_enable();
545
546	while (list) {
547		struct tasklet_struct *t = list;
548
549		list = list->next;
550
551		if (tasklet_trylock(t)) {
552			if (!atomic_read(&t->count)) {
553				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
554							&t->state))
555					BUG();
556				if (t->use_callback)
557					t->callback(t);
558				else
559					t->func(t->data);
560				tasklet_unlock(t);
561				continue;
562			}
563			tasklet_unlock(t);
564		}
565
566		local_irq_disable();
567		t->next = NULL;
568		*tl_head->tail = t;
569		tl_head->tail = &t->next;
570		__raise_softirq_irqoff(softirq_nr);
571		local_irq_enable();
572	}
573}
574
575static __latent_entropy void tasklet_action(struct softirq_action *a)
576{
577	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
578}
 
 
 
 
 
 
 
 
 
 
579
580static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
581{
582	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
583}
 
 
 
 
 
 
 
584
585void tasklet_setup(struct tasklet_struct *t,
586		   void (*callback)(struct tasklet_struct *))
587{
588	t->next = NULL;
589	t->state = 0;
590	atomic_set(&t->count, 0);
591	t->callback = callback;
592	t->use_callback = true;
593	t->data = 0;
594}
595EXPORT_SYMBOL(tasklet_setup);
596
597void tasklet_init(struct tasklet_struct *t,
598		  void (*func)(unsigned long), unsigned long data)
599{
600	t->next = NULL;
601	t->state = 0;
602	atomic_set(&t->count, 0);
603	t->func = func;
604	t->use_callback = false;
605	t->data = data;
606}
607EXPORT_SYMBOL(tasklet_init);
608
609void tasklet_kill(struct tasklet_struct *t)
610{
611	if (in_interrupt())
612		pr_notice("Attempt to kill tasklet from interrupt\n");
613
614	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
615		do {
616			yield();
617		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
618	}
619	tasklet_unlock_wait(t);
620	clear_bit(TASKLET_STATE_SCHED, &t->state);
621}
622EXPORT_SYMBOL(tasklet_kill);
623
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624void __init softirq_init(void)
625{
626	int cpu;
627
628	for_each_possible_cpu(cpu) {
629		per_cpu(tasklet_vec, cpu).tail =
630			&per_cpu(tasklet_vec, cpu).head;
631		per_cpu(tasklet_hi_vec, cpu).tail =
632			&per_cpu(tasklet_hi_vec, cpu).head;
633	}
634
635	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
636	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
637}
638
639static int ksoftirqd_should_run(unsigned int cpu)
640{
641	return local_softirq_pending();
642}
643
644static void run_ksoftirqd(unsigned int cpu)
645{
646	local_irq_disable();
647	if (local_softirq_pending()) {
648		/*
649		 * We can safely run softirq on inline stack, as we are not deep
650		 * in the task stack here.
651		 */
652		__do_softirq();
 
653		local_irq_enable();
654		cond_resched();
655		return;
656	}
657	local_irq_enable();
658}
659
660#ifdef CONFIG_HOTPLUG_CPU
661/*
662 * tasklet_kill_immediate is called to remove a tasklet which can already be
663 * scheduled for execution on @cpu.
664 *
665 * Unlike tasklet_kill, this function removes the tasklet
666 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
667 *
668 * When this function is called, @cpu must be in the CPU_DEAD state.
669 */
670void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
671{
672	struct tasklet_struct **i;
673
674	BUG_ON(cpu_online(cpu));
675	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
676
677	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
678		return;
679
680	/* CPU is dead, so no lock needed. */
681	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
682		if (*i == t) {
683			*i = t->next;
684			/* If this was the tail element, move the tail ptr */
685			if (*i == NULL)
686				per_cpu(tasklet_vec, cpu).tail = i;
687			return;
688		}
689	}
690	BUG();
691}
692
693static int takeover_tasklets(unsigned int cpu)
694{
695	/* CPU is dead, so no lock needed. */
696	local_irq_disable();
697
698	/* Find end, append list for that CPU. */
699	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
700		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
701		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
702		per_cpu(tasklet_vec, cpu).head = NULL;
703		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
704	}
705	raise_softirq_irqoff(TASKLET_SOFTIRQ);
706
707	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
708		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
709		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
710		per_cpu(tasklet_hi_vec, cpu).head = NULL;
711		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
712	}
713	raise_softirq_irqoff(HI_SOFTIRQ);
714
715	local_irq_enable();
716	return 0;
717}
718#else
719#define takeover_tasklets	NULL
720#endif /* CONFIG_HOTPLUG_CPU */
721
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
722static struct smp_hotplug_thread softirq_threads = {
723	.store			= &ksoftirqd,
724	.thread_should_run	= ksoftirqd_should_run,
725	.thread_fn		= run_ksoftirqd,
726	.thread_comm		= "ksoftirqd/%u",
727};
728
729static __init int spawn_ksoftirqd(void)
730{
731	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
732				  takeover_tasklets);
733	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
734
735	return 0;
736}
737early_initcall(spawn_ksoftirqd);
738
739/*
740 * [ These __weak aliases are kept in a separate compilation unit, so that
741 *   GCC does not inline them incorrectly. ]
742 */
743
744int __init __weak early_irq_init(void)
745{
746	return 0;
747}
748
749int __init __weak arch_probe_nr_irqs(void)
750{
751	return NR_IRQS_LEGACY;
752}
753
754int __init __weak arch_early_irq_init(void)
755{
756	return 0;
757}
758
759unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
760{
761	return from;
762}