Loading...
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/irq.h>
32
33/*
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
36 by its own spinlocks.
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
40 or will not.
41
42 Examples:
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
49 */
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59
60const char * const softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63};
64
65/*
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
70 */
71static void wakeup_softirqd(void)
72{
73 /* Interrupts are disabled: no need to stop preemption */
74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
80/*
81 * preempt_count and SOFTIRQ_OFFSET usage:
82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * softirq processing.
84 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
85 * on local_bh_disable or local_bh_enable.
86 * This lets us distinguish between whether we are currently processing
87 * softirq and whether we just have bh disabled.
88 */
89
90/*
91 * This one is for softirq.c-internal use,
92 * where hardirqs are disabled legitimately:
93 */
94#ifdef CONFIG_TRACE_IRQFLAGS
95void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
96{
97 unsigned long flags;
98
99 WARN_ON_ONCE(in_irq());
100
101 raw_local_irq_save(flags);
102 /*
103 * The preempt tracer hooks into preempt_count_add and will break
104 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
105 * is set and before current->softirq_enabled is cleared.
106 * We must manually increment preempt_count here and manually
107 * call the trace_preempt_off later.
108 */
109 __preempt_count_add(cnt);
110 /*
111 * Were softirqs turned off above:
112 */
113 if (softirq_count() == (cnt & SOFTIRQ_MASK))
114 trace_softirqs_off(ip);
115 raw_local_irq_restore(flags);
116
117 if (preempt_count() == cnt) {
118#ifdef CONFIG_DEBUG_PREEMPT
119 current->preempt_disable_ip = get_lock_parent_ip();
120#endif
121 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
122 }
123}
124EXPORT_SYMBOL(__local_bh_disable_ip);
125#endif /* CONFIG_TRACE_IRQFLAGS */
126
127static void __local_bh_enable(unsigned int cnt)
128{
129 WARN_ON_ONCE(!irqs_disabled());
130
131 if (softirq_count() == (cnt & SOFTIRQ_MASK))
132 trace_softirqs_on(_RET_IP_);
133 preempt_count_sub(cnt);
134}
135
136/*
137 * Special-case - softirqs can safely be enabled in
138 * cond_resched_softirq(), or by __do_softirq(),
139 * without processing still-pending softirqs:
140 */
141void _local_bh_enable(void)
142{
143 WARN_ON_ONCE(in_irq());
144 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
145}
146EXPORT_SYMBOL(_local_bh_enable);
147
148void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
149{
150 WARN_ON_ONCE(in_irq() || irqs_disabled());
151#ifdef CONFIG_TRACE_IRQFLAGS
152 local_irq_disable();
153#endif
154 /*
155 * Are softirqs going to be turned on now:
156 */
157 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
158 trace_softirqs_on(ip);
159 /*
160 * Keep preemption disabled until we are done with
161 * softirq processing:
162 */
163 preempt_count_sub(cnt - 1);
164
165 if (unlikely(!in_interrupt() && local_softirq_pending())) {
166 /*
167 * Run softirq if any pending. And do it in its own stack
168 * as we may be calling this deep in a task call stack already.
169 */
170 do_softirq();
171 }
172
173 preempt_count_dec();
174#ifdef CONFIG_TRACE_IRQFLAGS
175 local_irq_enable();
176#endif
177 preempt_check_resched();
178}
179EXPORT_SYMBOL(__local_bh_enable_ip);
180
181/*
182 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
183 * but break the loop if need_resched() is set or after 2 ms.
184 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
185 * certain cases, such as stop_machine(), jiffies may cease to
186 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
187 * well to make sure we eventually return from this method.
188 *
189 * These limits have been established via experimentation.
190 * The two things to balance is latency against fairness -
191 * we want to handle softirqs as soon as possible, but they
192 * should not be able to lock up the box.
193 */
194#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
195#define MAX_SOFTIRQ_RESTART 10
196
197#ifdef CONFIG_TRACE_IRQFLAGS
198/*
199 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
200 * to keep the lockdep irq context tracking as tight as possible in order to
201 * not miss-qualify lock contexts and miss possible deadlocks.
202 */
203
204static inline bool lockdep_softirq_start(void)
205{
206 bool in_hardirq = false;
207
208 if (trace_hardirq_context(current)) {
209 in_hardirq = true;
210 trace_hardirq_exit();
211 }
212
213 lockdep_softirq_enter();
214
215 return in_hardirq;
216}
217
218static inline void lockdep_softirq_end(bool in_hardirq)
219{
220 lockdep_softirq_exit();
221
222 if (in_hardirq)
223 trace_hardirq_enter();
224}
225#else
226static inline bool lockdep_softirq_start(void) { return false; }
227static inline void lockdep_softirq_end(bool in_hardirq) { }
228#endif
229
230asmlinkage __visible void __softirq_entry __do_softirq(void)
231{
232 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
233 unsigned long old_flags = current->flags;
234 int max_restart = MAX_SOFTIRQ_RESTART;
235 struct softirq_action *h;
236 bool in_hardirq;
237 __u32 pending;
238 int softirq_bit;
239
240 /*
241 * Mask out PF_MEMALLOC s current task context is borrowed for the
242 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
243 * again if the socket is related to swap
244 */
245 current->flags &= ~PF_MEMALLOC;
246
247 pending = local_softirq_pending();
248 account_irq_enter_time(current);
249
250 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
251 in_hardirq = lockdep_softirq_start();
252
253restart:
254 /* Reset the pending bitmask before enabling irqs */
255 set_softirq_pending(0);
256
257 local_irq_enable();
258
259 h = softirq_vec;
260
261 while ((softirq_bit = ffs(pending))) {
262 unsigned int vec_nr;
263 int prev_count;
264
265 h += softirq_bit - 1;
266
267 vec_nr = h - softirq_vec;
268 prev_count = preempt_count();
269
270 kstat_incr_softirqs_this_cpu(vec_nr);
271
272 trace_softirq_entry(vec_nr);
273 h->action(h);
274 trace_softirq_exit(vec_nr);
275 if (unlikely(prev_count != preempt_count())) {
276 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
277 vec_nr, softirq_to_name[vec_nr], h->action,
278 prev_count, preempt_count());
279 preempt_count_set(prev_count);
280 }
281 h++;
282 pending >>= softirq_bit;
283 }
284
285 rcu_bh_qs();
286 local_irq_disable();
287
288 pending = local_softirq_pending();
289 if (pending) {
290 if (time_before(jiffies, end) && !need_resched() &&
291 --max_restart)
292 goto restart;
293
294 wakeup_softirqd();
295 }
296
297 lockdep_softirq_end(in_hardirq);
298 account_irq_exit_time(current);
299 __local_bh_enable(SOFTIRQ_OFFSET);
300 WARN_ON_ONCE(in_interrupt());
301 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
302}
303
304asmlinkage __visible void do_softirq(void)
305{
306 __u32 pending;
307 unsigned long flags;
308
309 if (in_interrupt())
310 return;
311
312 local_irq_save(flags);
313
314 pending = local_softirq_pending();
315
316 if (pending)
317 do_softirq_own_stack();
318
319 local_irq_restore(flags);
320}
321
322/*
323 * Enter an interrupt context.
324 */
325void irq_enter(void)
326{
327 rcu_irq_enter();
328 if (is_idle_task(current) && !in_interrupt()) {
329 /*
330 * Prevent raise_softirq from needlessly waking up ksoftirqd
331 * here, as softirq will be serviced on return from interrupt.
332 */
333 local_bh_disable();
334 tick_irq_enter();
335 _local_bh_enable();
336 }
337
338 __irq_enter();
339}
340
341static inline void invoke_softirq(void)
342{
343 if (!force_irqthreads) {
344#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
345 /*
346 * We can safely execute softirq on the current stack if
347 * it is the irq stack, because it should be near empty
348 * at this stage.
349 */
350 __do_softirq();
351#else
352 /*
353 * Otherwise, irq_exit() is called on the task stack that can
354 * be potentially deep already. So call softirq in its own stack
355 * to prevent from any overrun.
356 */
357 do_softirq_own_stack();
358#endif
359 } else {
360 wakeup_softirqd();
361 }
362}
363
364static inline void tick_irq_exit(void)
365{
366#ifdef CONFIG_NO_HZ_COMMON
367 int cpu = smp_processor_id();
368
369 /* Make sure that timer wheel updates are propagated */
370 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
371 if (!in_interrupt())
372 tick_nohz_irq_exit();
373 }
374#endif
375}
376
377/*
378 * Exit an interrupt context. Process softirqs if needed and possible:
379 */
380void irq_exit(void)
381{
382#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
383 local_irq_disable();
384#else
385 WARN_ON_ONCE(!irqs_disabled());
386#endif
387
388 account_irq_exit_time(current);
389 preempt_count_sub(HARDIRQ_OFFSET);
390 if (!in_interrupt() && local_softirq_pending())
391 invoke_softirq();
392
393 tick_irq_exit();
394 rcu_irq_exit();
395 trace_hardirq_exit(); /* must be last! */
396}
397
398/*
399 * This function must run with irqs disabled!
400 */
401inline void raise_softirq_irqoff(unsigned int nr)
402{
403 __raise_softirq_irqoff(nr);
404
405 /*
406 * If we're in an interrupt or softirq, we're done
407 * (this also catches softirq-disabled code). We will
408 * actually run the softirq once we return from
409 * the irq or softirq.
410 *
411 * Otherwise we wake up ksoftirqd to make sure we
412 * schedule the softirq soon.
413 */
414 if (!in_interrupt())
415 wakeup_softirqd();
416}
417
418void raise_softirq(unsigned int nr)
419{
420 unsigned long flags;
421
422 local_irq_save(flags);
423 raise_softirq_irqoff(nr);
424 local_irq_restore(flags);
425}
426
427void __raise_softirq_irqoff(unsigned int nr)
428{
429 trace_softirq_raise(nr);
430 or_softirq_pending(1UL << nr);
431}
432
433void open_softirq(int nr, void (*action)(struct softirq_action *))
434{
435 softirq_vec[nr].action = action;
436}
437
438/*
439 * Tasklets
440 */
441struct tasklet_head {
442 struct tasklet_struct *head;
443 struct tasklet_struct **tail;
444};
445
446static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
447static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
448
449void __tasklet_schedule(struct tasklet_struct *t)
450{
451 unsigned long flags;
452
453 local_irq_save(flags);
454 t->next = NULL;
455 *__this_cpu_read(tasklet_vec.tail) = t;
456 __this_cpu_write(tasklet_vec.tail, &(t->next));
457 raise_softirq_irqoff(TASKLET_SOFTIRQ);
458 local_irq_restore(flags);
459}
460EXPORT_SYMBOL(__tasklet_schedule);
461
462void __tasklet_hi_schedule(struct tasklet_struct *t)
463{
464 unsigned long flags;
465
466 local_irq_save(flags);
467 t->next = NULL;
468 *__this_cpu_read(tasklet_hi_vec.tail) = t;
469 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
470 raise_softirq_irqoff(HI_SOFTIRQ);
471 local_irq_restore(flags);
472}
473EXPORT_SYMBOL(__tasklet_hi_schedule);
474
475void __tasklet_hi_schedule_first(struct tasklet_struct *t)
476{
477 BUG_ON(!irqs_disabled());
478
479 t->next = __this_cpu_read(tasklet_hi_vec.head);
480 __this_cpu_write(tasklet_hi_vec.head, t);
481 __raise_softirq_irqoff(HI_SOFTIRQ);
482}
483EXPORT_SYMBOL(__tasklet_hi_schedule_first);
484
485static void tasklet_action(struct softirq_action *a)
486{
487 struct tasklet_struct *list;
488
489 local_irq_disable();
490 list = __this_cpu_read(tasklet_vec.head);
491 __this_cpu_write(tasklet_vec.head, NULL);
492 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
493 local_irq_enable();
494
495 while (list) {
496 struct tasklet_struct *t = list;
497
498 list = list->next;
499
500 if (tasklet_trylock(t)) {
501 if (!atomic_read(&t->count)) {
502 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
503 &t->state))
504 BUG();
505 t->func(t->data);
506 tasklet_unlock(t);
507 continue;
508 }
509 tasklet_unlock(t);
510 }
511
512 local_irq_disable();
513 t->next = NULL;
514 *__this_cpu_read(tasklet_vec.tail) = t;
515 __this_cpu_write(tasklet_vec.tail, &(t->next));
516 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
517 local_irq_enable();
518 }
519}
520
521static void tasklet_hi_action(struct softirq_action *a)
522{
523 struct tasklet_struct *list;
524
525 local_irq_disable();
526 list = __this_cpu_read(tasklet_hi_vec.head);
527 __this_cpu_write(tasklet_hi_vec.head, NULL);
528 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
529 local_irq_enable();
530
531 while (list) {
532 struct tasklet_struct *t = list;
533
534 list = list->next;
535
536 if (tasklet_trylock(t)) {
537 if (!atomic_read(&t->count)) {
538 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
539 &t->state))
540 BUG();
541 t->func(t->data);
542 tasklet_unlock(t);
543 continue;
544 }
545 tasklet_unlock(t);
546 }
547
548 local_irq_disable();
549 t->next = NULL;
550 *__this_cpu_read(tasklet_hi_vec.tail) = t;
551 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
552 __raise_softirq_irqoff(HI_SOFTIRQ);
553 local_irq_enable();
554 }
555}
556
557void tasklet_init(struct tasklet_struct *t,
558 void (*func)(unsigned long), unsigned long data)
559{
560 t->next = NULL;
561 t->state = 0;
562 atomic_set(&t->count, 0);
563 t->func = func;
564 t->data = data;
565}
566EXPORT_SYMBOL(tasklet_init);
567
568void tasklet_kill(struct tasklet_struct *t)
569{
570 if (in_interrupt())
571 pr_notice("Attempt to kill tasklet from interrupt\n");
572
573 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
574 do {
575 yield();
576 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
577 }
578 tasklet_unlock_wait(t);
579 clear_bit(TASKLET_STATE_SCHED, &t->state);
580}
581EXPORT_SYMBOL(tasklet_kill);
582
583/*
584 * tasklet_hrtimer
585 */
586
587/*
588 * The trampoline is called when the hrtimer expires. It schedules a tasklet
589 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
590 * hrtimer callback, but from softirq context.
591 */
592static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
593{
594 struct tasklet_hrtimer *ttimer =
595 container_of(timer, struct tasklet_hrtimer, timer);
596
597 tasklet_hi_schedule(&ttimer->tasklet);
598 return HRTIMER_NORESTART;
599}
600
601/*
602 * Helper function which calls the hrtimer callback from
603 * tasklet/softirq context
604 */
605static void __tasklet_hrtimer_trampoline(unsigned long data)
606{
607 struct tasklet_hrtimer *ttimer = (void *)data;
608 enum hrtimer_restart restart;
609
610 restart = ttimer->function(&ttimer->timer);
611 if (restart != HRTIMER_NORESTART)
612 hrtimer_restart(&ttimer->timer);
613}
614
615/**
616 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
617 * @ttimer: tasklet_hrtimer which is initialized
618 * @function: hrtimer callback function which gets called from softirq context
619 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
620 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
621 */
622void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
623 enum hrtimer_restart (*function)(struct hrtimer *),
624 clockid_t which_clock, enum hrtimer_mode mode)
625{
626 hrtimer_init(&ttimer->timer, which_clock, mode);
627 ttimer->timer.function = __hrtimer_tasklet_trampoline;
628 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
629 (unsigned long)ttimer);
630 ttimer->function = function;
631}
632EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
633
634void __init softirq_init(void)
635{
636 int cpu;
637
638 for_each_possible_cpu(cpu) {
639 per_cpu(tasklet_vec, cpu).tail =
640 &per_cpu(tasklet_vec, cpu).head;
641 per_cpu(tasklet_hi_vec, cpu).tail =
642 &per_cpu(tasklet_hi_vec, cpu).head;
643 }
644
645 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
646 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
647}
648
649static int ksoftirqd_should_run(unsigned int cpu)
650{
651 return local_softirq_pending();
652}
653
654static void run_ksoftirqd(unsigned int cpu)
655{
656 local_irq_disable();
657 if (local_softirq_pending()) {
658 /*
659 * We can safely run softirq on inline stack, as we are not deep
660 * in the task stack here.
661 */
662 __do_softirq();
663 local_irq_enable();
664 cond_resched_rcu_qs();
665 return;
666 }
667 local_irq_enable();
668}
669
670#ifdef CONFIG_HOTPLUG_CPU
671/*
672 * tasklet_kill_immediate is called to remove a tasklet which can already be
673 * scheduled for execution on @cpu.
674 *
675 * Unlike tasklet_kill, this function removes the tasklet
676 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
677 *
678 * When this function is called, @cpu must be in the CPU_DEAD state.
679 */
680void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
681{
682 struct tasklet_struct **i;
683
684 BUG_ON(cpu_online(cpu));
685 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
686
687 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
688 return;
689
690 /* CPU is dead, so no lock needed. */
691 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
692 if (*i == t) {
693 *i = t->next;
694 /* If this was the tail element, move the tail ptr */
695 if (*i == NULL)
696 per_cpu(tasklet_vec, cpu).tail = i;
697 return;
698 }
699 }
700 BUG();
701}
702
703static void takeover_tasklets(unsigned int cpu)
704{
705 /* CPU is dead, so no lock needed. */
706 local_irq_disable();
707
708 /* Find end, append list for that CPU. */
709 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
711 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
712 per_cpu(tasklet_vec, cpu).head = NULL;
713 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
714 }
715 raise_softirq_irqoff(TASKLET_SOFTIRQ);
716
717 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
718 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
719 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
720 per_cpu(tasklet_hi_vec, cpu).head = NULL;
721 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
722 }
723 raise_softirq_irqoff(HI_SOFTIRQ);
724
725 local_irq_enable();
726}
727#endif /* CONFIG_HOTPLUG_CPU */
728
729static int cpu_callback(struct notifier_block *nfb, unsigned long action,
730 void *hcpu)
731{
732 switch (action) {
733#ifdef CONFIG_HOTPLUG_CPU
734 case CPU_DEAD:
735 case CPU_DEAD_FROZEN:
736 takeover_tasklets((unsigned long)hcpu);
737 break;
738#endif /* CONFIG_HOTPLUG_CPU */
739 }
740 return NOTIFY_OK;
741}
742
743static struct notifier_block cpu_nfb = {
744 .notifier_call = cpu_callback
745};
746
747static struct smp_hotplug_thread softirq_threads = {
748 .store = &ksoftirqd,
749 .thread_should_run = ksoftirqd_should_run,
750 .thread_fn = run_ksoftirqd,
751 .thread_comm = "ksoftirqd/%u",
752};
753
754static __init int spawn_ksoftirqd(void)
755{
756 register_cpu_notifier(&cpu_nfb);
757
758 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
759
760 return 0;
761}
762early_initcall(spawn_ksoftirqd);
763
764/*
765 * [ These __weak aliases are kept in a separate compilation unit, so that
766 * GCC does not inline them incorrectly. ]
767 */
768
769int __init __weak early_irq_init(void)
770{
771 return 0;
772}
773
774int __init __weak arch_probe_nr_irqs(void)
775{
776 return NR_IRQS_LEGACY;
777}
778
779int __init __weak arch_early_irq_init(void)
780{
781 return 0;
782}
783
784unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
785{
786 return from;
787}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/notifier.h>
18#include <linux/percpu.h>
19#include <linux/cpu.h>
20#include <linux/freezer.h>
21#include <linux/kthread.h>
22#include <linux/rcupdate.h>
23#include <linux/ftrace.h>
24#include <linux/smp.h>
25#include <linux/smpboot.h>
26#include <linux/tick.h>
27#include <linux/irq.h>
28
29#define CREATE_TRACE_POINTS
30#include <trace/events/irq.h>
31
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52EXPORT_PER_CPU_SYMBOL(irq_stat);
53#endif
54
55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56
57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
59const char * const softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62};
63
64/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
70static void wakeup_softirqd(void)
71{
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79/*
80 * If ksoftirqd is scheduled, we do not want to process pending softirqs
81 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
82 * unless we're doing some of the synchronous softirqs.
83 */
84#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85static bool ksoftirqd_running(unsigned long pending)
86{
87 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
88
89 if (pending & SOFTIRQ_NOW_MASK)
90 return false;
91 return tsk && (tsk->state == TASK_RUNNING) &&
92 !__kthread_should_park(tsk);
93}
94
95/*
96 * preempt_count and SOFTIRQ_OFFSET usage:
97 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
98 * softirq processing.
99 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100 * on local_bh_disable or local_bh_enable.
101 * This lets us distinguish between whether we are currently processing
102 * softirq and whether we just have bh disabled.
103 */
104
105/*
106 * This one is for softirq.c-internal use,
107 * where hardirqs are disabled legitimately:
108 */
109#ifdef CONFIG_TRACE_IRQFLAGS
110
111DEFINE_PER_CPU(int, hardirqs_enabled);
112DEFINE_PER_CPU(int, hardirq_context);
113EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
114EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
115
116void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
117{
118 unsigned long flags;
119
120 WARN_ON_ONCE(in_irq());
121
122 raw_local_irq_save(flags);
123 /*
124 * The preempt tracer hooks into preempt_count_add and will break
125 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
126 * is set and before current->softirq_enabled is cleared.
127 * We must manually increment preempt_count here and manually
128 * call the trace_preempt_off later.
129 */
130 __preempt_count_add(cnt);
131 /*
132 * Were softirqs turned off above:
133 */
134 if (softirq_count() == (cnt & SOFTIRQ_MASK))
135 lockdep_softirqs_off(ip);
136 raw_local_irq_restore(flags);
137
138 if (preempt_count() == cnt) {
139#ifdef CONFIG_DEBUG_PREEMPT
140 current->preempt_disable_ip = get_lock_parent_ip();
141#endif
142 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
143 }
144}
145EXPORT_SYMBOL(__local_bh_disable_ip);
146#endif /* CONFIG_TRACE_IRQFLAGS */
147
148static void __local_bh_enable(unsigned int cnt)
149{
150 lockdep_assert_irqs_disabled();
151
152 if (preempt_count() == cnt)
153 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
154
155 if (softirq_count() == (cnt & SOFTIRQ_MASK))
156 lockdep_softirqs_on(_RET_IP_);
157
158 __preempt_count_sub(cnt);
159}
160
161/*
162 * Special-case - softirqs can safely be enabled by __do_softirq(),
163 * without processing still-pending softirqs:
164 */
165void _local_bh_enable(void)
166{
167 WARN_ON_ONCE(in_irq());
168 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
169}
170EXPORT_SYMBOL(_local_bh_enable);
171
172void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
173{
174 WARN_ON_ONCE(in_irq());
175 lockdep_assert_irqs_enabled();
176#ifdef CONFIG_TRACE_IRQFLAGS
177 local_irq_disable();
178#endif
179 /*
180 * Are softirqs going to be turned on now:
181 */
182 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
183 lockdep_softirqs_on(ip);
184 /*
185 * Keep preemption disabled until we are done with
186 * softirq processing:
187 */
188 preempt_count_sub(cnt - 1);
189
190 if (unlikely(!in_interrupt() && local_softirq_pending())) {
191 /*
192 * Run softirq if any pending. And do it in its own stack
193 * as we may be calling this deep in a task call stack already.
194 */
195 do_softirq();
196 }
197
198 preempt_count_dec();
199#ifdef CONFIG_TRACE_IRQFLAGS
200 local_irq_enable();
201#endif
202 preempt_check_resched();
203}
204EXPORT_SYMBOL(__local_bh_enable_ip);
205
206/*
207 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
208 * but break the loop if need_resched() is set or after 2 ms.
209 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
210 * certain cases, such as stop_machine(), jiffies may cease to
211 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
212 * well to make sure we eventually return from this method.
213 *
214 * These limits have been established via experimentation.
215 * The two things to balance is latency against fairness -
216 * we want to handle softirqs as soon as possible, but they
217 * should not be able to lock up the box.
218 */
219#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
220#define MAX_SOFTIRQ_RESTART 10
221
222#ifdef CONFIG_TRACE_IRQFLAGS
223/*
224 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
225 * to keep the lockdep irq context tracking as tight as possible in order to
226 * not miss-qualify lock contexts and miss possible deadlocks.
227 */
228
229static inline bool lockdep_softirq_start(void)
230{
231 bool in_hardirq = false;
232
233 if (lockdep_hardirq_context()) {
234 in_hardirq = true;
235 lockdep_hardirq_exit();
236 }
237
238 lockdep_softirq_enter();
239
240 return in_hardirq;
241}
242
243static inline void lockdep_softirq_end(bool in_hardirq)
244{
245 lockdep_softirq_exit();
246
247 if (in_hardirq)
248 lockdep_hardirq_enter();
249}
250#else
251static inline bool lockdep_softirq_start(void) { return false; }
252static inline void lockdep_softirq_end(bool in_hardirq) { }
253#endif
254
255asmlinkage __visible void __softirq_entry __do_softirq(void)
256{
257 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
258 unsigned long old_flags = current->flags;
259 int max_restart = MAX_SOFTIRQ_RESTART;
260 struct softirq_action *h;
261 bool in_hardirq;
262 __u32 pending;
263 int softirq_bit;
264
265 /*
266 * Mask out PF_MEMALLOC as the current task context is borrowed for the
267 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
268 * again if the socket is related to swapping.
269 */
270 current->flags &= ~PF_MEMALLOC;
271
272 pending = local_softirq_pending();
273 account_irq_enter_time(current);
274
275 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
276 in_hardirq = lockdep_softirq_start();
277
278restart:
279 /* Reset the pending bitmask before enabling irqs */
280 set_softirq_pending(0);
281
282 local_irq_enable();
283
284 h = softirq_vec;
285
286 while ((softirq_bit = ffs(pending))) {
287 unsigned int vec_nr;
288 int prev_count;
289
290 h += softirq_bit - 1;
291
292 vec_nr = h - softirq_vec;
293 prev_count = preempt_count();
294
295 kstat_incr_softirqs_this_cpu(vec_nr);
296
297 trace_softirq_entry(vec_nr);
298 h->action(h);
299 trace_softirq_exit(vec_nr);
300 if (unlikely(prev_count != preempt_count())) {
301 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
302 vec_nr, softirq_to_name[vec_nr], h->action,
303 prev_count, preempt_count());
304 preempt_count_set(prev_count);
305 }
306 h++;
307 pending >>= softirq_bit;
308 }
309
310 if (__this_cpu_read(ksoftirqd) == current)
311 rcu_softirq_qs();
312 local_irq_disable();
313
314 pending = local_softirq_pending();
315 if (pending) {
316 if (time_before(jiffies, end) && !need_resched() &&
317 --max_restart)
318 goto restart;
319
320 wakeup_softirqd();
321 }
322
323 lockdep_softirq_end(in_hardirq);
324 account_irq_exit_time(current);
325 __local_bh_enable(SOFTIRQ_OFFSET);
326 WARN_ON_ONCE(in_interrupt());
327 current_restore_flags(old_flags, PF_MEMALLOC);
328}
329
330asmlinkage __visible void do_softirq(void)
331{
332 __u32 pending;
333 unsigned long flags;
334
335 if (in_interrupt())
336 return;
337
338 local_irq_save(flags);
339
340 pending = local_softirq_pending();
341
342 if (pending && !ksoftirqd_running(pending))
343 do_softirq_own_stack();
344
345 local_irq_restore(flags);
346}
347
348/**
349 * irq_enter_rcu - Enter an interrupt context with RCU watching
350 */
351void irq_enter_rcu(void)
352{
353 if (is_idle_task(current) && !in_interrupt()) {
354 /*
355 * Prevent raise_softirq from needlessly waking up ksoftirqd
356 * here, as softirq will be serviced on return from interrupt.
357 */
358 local_bh_disable();
359 tick_irq_enter();
360 _local_bh_enable();
361 }
362 __irq_enter();
363}
364
365/**
366 * irq_enter - Enter an interrupt context including RCU update
367 */
368void irq_enter(void)
369{
370 rcu_irq_enter();
371 irq_enter_rcu();
372}
373
374static inline void invoke_softirq(void)
375{
376 if (ksoftirqd_running(local_softirq_pending()))
377 return;
378
379 if (!force_irqthreads) {
380#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
381 /*
382 * We can safely execute softirq on the current stack if
383 * it is the irq stack, because it should be near empty
384 * at this stage.
385 */
386 __do_softirq();
387#else
388 /*
389 * Otherwise, irq_exit() is called on the task stack that can
390 * be potentially deep already. So call softirq in its own stack
391 * to prevent from any overrun.
392 */
393 do_softirq_own_stack();
394#endif
395 } else {
396 wakeup_softirqd();
397 }
398}
399
400static inline void tick_irq_exit(void)
401{
402#ifdef CONFIG_NO_HZ_COMMON
403 int cpu = smp_processor_id();
404
405 /* Make sure that timer wheel updates are propagated */
406 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
407 if (!in_irq())
408 tick_nohz_irq_exit();
409 }
410#endif
411}
412
413static inline void __irq_exit_rcu(void)
414{
415#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
416 local_irq_disable();
417#else
418 lockdep_assert_irqs_disabled();
419#endif
420 account_irq_exit_time(current);
421 preempt_count_sub(HARDIRQ_OFFSET);
422 if (!in_interrupt() && local_softirq_pending())
423 invoke_softirq();
424
425 tick_irq_exit();
426}
427
428/**
429 * irq_exit_rcu() - Exit an interrupt context without updating RCU
430 *
431 * Also processes softirqs if needed and possible.
432 */
433void irq_exit_rcu(void)
434{
435 __irq_exit_rcu();
436 /* must be last! */
437 lockdep_hardirq_exit();
438}
439
440/**
441 * irq_exit - Exit an interrupt context, update RCU and lockdep
442 *
443 * Also processes softirqs if needed and possible.
444 */
445void irq_exit(void)
446{
447 __irq_exit_rcu();
448 rcu_irq_exit();
449 /* must be last! */
450 lockdep_hardirq_exit();
451}
452
453/*
454 * This function must run with irqs disabled!
455 */
456inline void raise_softirq_irqoff(unsigned int nr)
457{
458 __raise_softirq_irqoff(nr);
459
460 /*
461 * If we're in an interrupt or softirq, we're done
462 * (this also catches softirq-disabled code). We will
463 * actually run the softirq once we return from
464 * the irq or softirq.
465 *
466 * Otherwise we wake up ksoftirqd to make sure we
467 * schedule the softirq soon.
468 */
469 if (!in_interrupt())
470 wakeup_softirqd();
471}
472
473void raise_softirq(unsigned int nr)
474{
475 unsigned long flags;
476
477 local_irq_save(flags);
478 raise_softirq_irqoff(nr);
479 local_irq_restore(flags);
480}
481
482void __raise_softirq_irqoff(unsigned int nr)
483{
484 trace_softirq_raise(nr);
485 or_softirq_pending(1UL << nr);
486}
487
488void open_softirq(int nr, void (*action)(struct softirq_action *))
489{
490 softirq_vec[nr].action = action;
491}
492
493/*
494 * Tasklets
495 */
496struct tasklet_head {
497 struct tasklet_struct *head;
498 struct tasklet_struct **tail;
499};
500
501static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
502static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
503
504static void __tasklet_schedule_common(struct tasklet_struct *t,
505 struct tasklet_head __percpu *headp,
506 unsigned int softirq_nr)
507{
508 struct tasklet_head *head;
509 unsigned long flags;
510
511 local_irq_save(flags);
512 head = this_cpu_ptr(headp);
513 t->next = NULL;
514 *head->tail = t;
515 head->tail = &(t->next);
516 raise_softirq_irqoff(softirq_nr);
517 local_irq_restore(flags);
518}
519
520void __tasklet_schedule(struct tasklet_struct *t)
521{
522 __tasklet_schedule_common(t, &tasklet_vec,
523 TASKLET_SOFTIRQ);
524}
525EXPORT_SYMBOL(__tasklet_schedule);
526
527void __tasklet_hi_schedule(struct tasklet_struct *t)
528{
529 __tasklet_schedule_common(t, &tasklet_hi_vec,
530 HI_SOFTIRQ);
531}
532EXPORT_SYMBOL(__tasklet_hi_schedule);
533
534static void tasklet_action_common(struct softirq_action *a,
535 struct tasklet_head *tl_head,
536 unsigned int softirq_nr)
537{
538 struct tasklet_struct *list;
539
540 local_irq_disable();
541 list = tl_head->head;
542 tl_head->head = NULL;
543 tl_head->tail = &tl_head->head;
544 local_irq_enable();
545
546 while (list) {
547 struct tasklet_struct *t = list;
548
549 list = list->next;
550
551 if (tasklet_trylock(t)) {
552 if (!atomic_read(&t->count)) {
553 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
554 &t->state))
555 BUG();
556 if (t->use_callback)
557 t->callback(t);
558 else
559 t->func(t->data);
560 tasklet_unlock(t);
561 continue;
562 }
563 tasklet_unlock(t);
564 }
565
566 local_irq_disable();
567 t->next = NULL;
568 *tl_head->tail = t;
569 tl_head->tail = &t->next;
570 __raise_softirq_irqoff(softirq_nr);
571 local_irq_enable();
572 }
573}
574
575static __latent_entropy void tasklet_action(struct softirq_action *a)
576{
577 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
578}
579
580static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
581{
582 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
583}
584
585void tasklet_setup(struct tasklet_struct *t,
586 void (*callback)(struct tasklet_struct *))
587{
588 t->next = NULL;
589 t->state = 0;
590 atomic_set(&t->count, 0);
591 t->callback = callback;
592 t->use_callback = true;
593 t->data = 0;
594}
595EXPORT_SYMBOL(tasklet_setup);
596
597void tasklet_init(struct tasklet_struct *t,
598 void (*func)(unsigned long), unsigned long data)
599{
600 t->next = NULL;
601 t->state = 0;
602 atomic_set(&t->count, 0);
603 t->func = func;
604 t->use_callback = false;
605 t->data = data;
606}
607EXPORT_SYMBOL(tasklet_init);
608
609void tasklet_kill(struct tasklet_struct *t)
610{
611 if (in_interrupt())
612 pr_notice("Attempt to kill tasklet from interrupt\n");
613
614 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
615 do {
616 yield();
617 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
618 }
619 tasklet_unlock_wait(t);
620 clear_bit(TASKLET_STATE_SCHED, &t->state);
621}
622EXPORT_SYMBOL(tasklet_kill);
623
624void __init softirq_init(void)
625{
626 int cpu;
627
628 for_each_possible_cpu(cpu) {
629 per_cpu(tasklet_vec, cpu).tail =
630 &per_cpu(tasklet_vec, cpu).head;
631 per_cpu(tasklet_hi_vec, cpu).tail =
632 &per_cpu(tasklet_hi_vec, cpu).head;
633 }
634
635 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
636 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
637}
638
639static int ksoftirqd_should_run(unsigned int cpu)
640{
641 return local_softirq_pending();
642}
643
644static void run_ksoftirqd(unsigned int cpu)
645{
646 local_irq_disable();
647 if (local_softirq_pending()) {
648 /*
649 * We can safely run softirq on inline stack, as we are not deep
650 * in the task stack here.
651 */
652 __do_softirq();
653 local_irq_enable();
654 cond_resched();
655 return;
656 }
657 local_irq_enable();
658}
659
660#ifdef CONFIG_HOTPLUG_CPU
661/*
662 * tasklet_kill_immediate is called to remove a tasklet which can already be
663 * scheduled for execution on @cpu.
664 *
665 * Unlike tasklet_kill, this function removes the tasklet
666 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
667 *
668 * When this function is called, @cpu must be in the CPU_DEAD state.
669 */
670void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
671{
672 struct tasklet_struct **i;
673
674 BUG_ON(cpu_online(cpu));
675 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
676
677 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
678 return;
679
680 /* CPU is dead, so no lock needed. */
681 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
682 if (*i == t) {
683 *i = t->next;
684 /* If this was the tail element, move the tail ptr */
685 if (*i == NULL)
686 per_cpu(tasklet_vec, cpu).tail = i;
687 return;
688 }
689 }
690 BUG();
691}
692
693static int takeover_tasklets(unsigned int cpu)
694{
695 /* CPU is dead, so no lock needed. */
696 local_irq_disable();
697
698 /* Find end, append list for that CPU. */
699 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
700 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
701 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
702 per_cpu(tasklet_vec, cpu).head = NULL;
703 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
704 }
705 raise_softirq_irqoff(TASKLET_SOFTIRQ);
706
707 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
708 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
709 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
710 per_cpu(tasklet_hi_vec, cpu).head = NULL;
711 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
712 }
713 raise_softirq_irqoff(HI_SOFTIRQ);
714
715 local_irq_enable();
716 return 0;
717}
718#else
719#define takeover_tasklets NULL
720#endif /* CONFIG_HOTPLUG_CPU */
721
722static struct smp_hotplug_thread softirq_threads = {
723 .store = &ksoftirqd,
724 .thread_should_run = ksoftirqd_should_run,
725 .thread_fn = run_ksoftirqd,
726 .thread_comm = "ksoftirqd/%u",
727};
728
729static __init int spawn_ksoftirqd(void)
730{
731 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
732 takeover_tasklets);
733 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
734
735 return 0;
736}
737early_initcall(spawn_ksoftirqd);
738
739/*
740 * [ These __weak aliases are kept in a separate compilation unit, so that
741 * GCC does not inline them incorrectly. ]
742 */
743
744int __init __weak early_irq_init(void)
745{
746 return 0;
747}
748
749int __init __weak arch_probe_nr_irqs(void)
750{
751 return NR_IRQS_LEGACY;
752}
753
754int __init __weak arch_early_irq_init(void)
755{
756 return 0;
757}
758
759unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
760{
761 return from;
762}