Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/notifier.h>
18#include <linux/percpu.h>
19#include <linux/cpu.h>
20#include <linux/freezer.h>
21#include <linux/kthread.h>
22#include <linux/rcupdate.h>
23#include <linux/ftrace.h>
24#include <linux/smp.h>
25#include <linux/smpboot.h>
26#include <linux/tick.h>
27#include <linux/irq.h>
28
29#define CREATE_TRACE_POINTS
30#include <trace/events/irq.h>
31
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52EXPORT_PER_CPU_SYMBOL(irq_stat);
53#endif
54
55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56
57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
59const char * const softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62};
63
64/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
70static void wakeup_softirqd(void)
71{
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79/*
80 * If ksoftirqd is scheduled, we do not want to process pending softirqs
81 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
82 * unless we're doing some of the synchronous softirqs.
83 */
84#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85static bool ksoftirqd_running(unsigned long pending)
86{
87 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
88
89 if (pending & SOFTIRQ_NOW_MASK)
90 return false;
91 return tsk && (tsk->state == TASK_RUNNING) &&
92 !__kthread_should_park(tsk);
93}
94
95/*
96 * preempt_count and SOFTIRQ_OFFSET usage:
97 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
98 * softirq processing.
99 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100 * on local_bh_disable or local_bh_enable.
101 * This lets us distinguish between whether we are currently processing
102 * softirq and whether we just have bh disabled.
103 */
104
105/*
106 * This one is for softirq.c-internal use,
107 * where hardirqs are disabled legitimately:
108 */
109#ifdef CONFIG_TRACE_IRQFLAGS
110void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
111{
112 unsigned long flags;
113
114 WARN_ON_ONCE(in_irq());
115
116 raw_local_irq_save(flags);
117 /*
118 * The preempt tracer hooks into preempt_count_add and will break
119 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
120 * is set and before current->softirq_enabled is cleared.
121 * We must manually increment preempt_count here and manually
122 * call the trace_preempt_off later.
123 */
124 __preempt_count_add(cnt);
125 /*
126 * Were softirqs turned off above:
127 */
128 if (softirq_count() == (cnt & SOFTIRQ_MASK))
129 trace_softirqs_off(ip);
130 raw_local_irq_restore(flags);
131
132 if (preempt_count() == cnt) {
133#ifdef CONFIG_DEBUG_PREEMPT
134 current->preempt_disable_ip = get_lock_parent_ip();
135#endif
136 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
137 }
138}
139EXPORT_SYMBOL(__local_bh_disable_ip);
140#endif /* CONFIG_TRACE_IRQFLAGS */
141
142static void __local_bh_enable(unsigned int cnt)
143{
144 lockdep_assert_irqs_disabled();
145
146 if (preempt_count() == cnt)
147 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
148
149 if (softirq_count() == (cnt & SOFTIRQ_MASK))
150 trace_softirqs_on(_RET_IP_);
151
152 __preempt_count_sub(cnt);
153}
154
155/*
156 * Special-case - softirqs can safely be enabled by __do_softirq(),
157 * without processing still-pending softirqs:
158 */
159void _local_bh_enable(void)
160{
161 WARN_ON_ONCE(in_irq());
162 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
163}
164EXPORT_SYMBOL(_local_bh_enable);
165
166void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
167{
168 WARN_ON_ONCE(in_irq());
169 lockdep_assert_irqs_enabled();
170#ifdef CONFIG_TRACE_IRQFLAGS
171 local_irq_disable();
172#endif
173 /*
174 * Are softirqs going to be turned on now:
175 */
176 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
177 trace_softirqs_on(ip);
178 /*
179 * Keep preemption disabled until we are done with
180 * softirq processing:
181 */
182 preempt_count_sub(cnt - 1);
183
184 if (unlikely(!in_interrupt() && local_softirq_pending())) {
185 /*
186 * Run softirq if any pending. And do it in its own stack
187 * as we may be calling this deep in a task call stack already.
188 */
189 do_softirq();
190 }
191
192 preempt_count_dec();
193#ifdef CONFIG_TRACE_IRQFLAGS
194 local_irq_enable();
195#endif
196 preempt_check_resched();
197}
198EXPORT_SYMBOL(__local_bh_enable_ip);
199
200/*
201 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
202 * but break the loop if need_resched() is set or after 2 ms.
203 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
204 * certain cases, such as stop_machine(), jiffies may cease to
205 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
206 * well to make sure we eventually return from this method.
207 *
208 * These limits have been established via experimentation.
209 * The two things to balance is latency against fairness -
210 * we want to handle softirqs as soon as possible, but they
211 * should not be able to lock up the box.
212 */
213#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
214#define MAX_SOFTIRQ_RESTART 10
215
216#ifdef CONFIG_TRACE_IRQFLAGS
217/*
218 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
219 * to keep the lockdep irq context tracking as tight as possible in order to
220 * not miss-qualify lock contexts and miss possible deadlocks.
221 */
222
223static inline bool lockdep_softirq_start(void)
224{
225 bool in_hardirq = false;
226
227 if (trace_hardirq_context(current)) {
228 in_hardirq = true;
229 trace_hardirq_exit();
230 }
231
232 lockdep_softirq_enter();
233
234 return in_hardirq;
235}
236
237static inline void lockdep_softirq_end(bool in_hardirq)
238{
239 lockdep_softirq_exit();
240
241 if (in_hardirq)
242 trace_hardirq_enter();
243}
244#else
245static inline bool lockdep_softirq_start(void) { return false; }
246static inline void lockdep_softirq_end(bool in_hardirq) { }
247#endif
248
249asmlinkage __visible void __softirq_entry __do_softirq(void)
250{
251 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
252 unsigned long old_flags = current->flags;
253 int max_restart = MAX_SOFTIRQ_RESTART;
254 struct softirq_action *h;
255 bool in_hardirq;
256 __u32 pending;
257 int softirq_bit;
258
259 /*
260 * Mask out PF_MEMALLOC as the current task context is borrowed for the
261 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
262 * again if the socket is related to swapping.
263 */
264 current->flags &= ~PF_MEMALLOC;
265
266 pending = local_softirq_pending();
267 account_irq_enter_time(current);
268
269 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
270 in_hardirq = lockdep_softirq_start();
271
272restart:
273 /* Reset the pending bitmask before enabling irqs */
274 set_softirq_pending(0);
275
276 local_irq_enable();
277
278 h = softirq_vec;
279
280 while ((softirq_bit = ffs(pending))) {
281 unsigned int vec_nr;
282 int prev_count;
283
284 h += softirq_bit - 1;
285
286 vec_nr = h - softirq_vec;
287 prev_count = preempt_count();
288
289 kstat_incr_softirqs_this_cpu(vec_nr);
290
291 trace_softirq_entry(vec_nr);
292 h->action(h);
293 trace_softirq_exit(vec_nr);
294 if (unlikely(prev_count != preempt_count())) {
295 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
296 vec_nr, softirq_to_name[vec_nr], h->action,
297 prev_count, preempt_count());
298 preempt_count_set(prev_count);
299 }
300 h++;
301 pending >>= softirq_bit;
302 }
303
304 if (__this_cpu_read(ksoftirqd) == current)
305 rcu_softirq_qs();
306 local_irq_disable();
307
308 pending = local_softirq_pending();
309 if (pending) {
310 if (time_before(jiffies, end) && !need_resched() &&
311 --max_restart)
312 goto restart;
313
314 wakeup_softirqd();
315 }
316
317 lockdep_softirq_end(in_hardirq);
318 account_irq_exit_time(current);
319 __local_bh_enable(SOFTIRQ_OFFSET);
320 WARN_ON_ONCE(in_interrupt());
321 current_restore_flags(old_flags, PF_MEMALLOC);
322}
323
324asmlinkage __visible void do_softirq(void)
325{
326 __u32 pending;
327 unsigned long flags;
328
329 if (in_interrupt())
330 return;
331
332 local_irq_save(flags);
333
334 pending = local_softirq_pending();
335
336 if (pending && !ksoftirqd_running(pending))
337 do_softirq_own_stack();
338
339 local_irq_restore(flags);
340}
341
342/*
343 * Enter an interrupt context.
344 */
345void irq_enter(void)
346{
347 rcu_irq_enter();
348 if (is_idle_task(current) && !in_interrupt()) {
349 /*
350 * Prevent raise_softirq from needlessly waking up ksoftirqd
351 * here, as softirq will be serviced on return from interrupt.
352 */
353 local_bh_disable();
354 tick_irq_enter();
355 _local_bh_enable();
356 }
357
358 __irq_enter();
359}
360
361static inline void invoke_softirq(void)
362{
363 if (ksoftirqd_running(local_softirq_pending()))
364 return;
365
366 if (!force_irqthreads) {
367#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
368 /*
369 * We can safely execute softirq on the current stack if
370 * it is the irq stack, because it should be near empty
371 * at this stage.
372 */
373 __do_softirq();
374#else
375 /*
376 * Otherwise, irq_exit() is called on the task stack that can
377 * be potentially deep already. So call softirq in its own stack
378 * to prevent from any overrun.
379 */
380 do_softirq_own_stack();
381#endif
382 } else {
383 wakeup_softirqd();
384 }
385}
386
387static inline void tick_irq_exit(void)
388{
389#ifdef CONFIG_NO_HZ_COMMON
390 int cpu = smp_processor_id();
391
392 /* Make sure that timer wheel updates are propagated */
393 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
394 if (!in_irq())
395 tick_nohz_irq_exit();
396 }
397#endif
398}
399
400/*
401 * Exit an interrupt context. Process softirqs if needed and possible:
402 */
403void irq_exit(void)
404{
405#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
406 local_irq_disable();
407#else
408 lockdep_assert_irqs_disabled();
409#endif
410 account_irq_exit_time(current);
411 preempt_count_sub(HARDIRQ_OFFSET);
412 if (!in_interrupt() && local_softirq_pending())
413 invoke_softirq();
414
415 tick_irq_exit();
416 rcu_irq_exit();
417 trace_hardirq_exit(); /* must be last! */
418}
419
420/*
421 * This function must run with irqs disabled!
422 */
423inline void raise_softirq_irqoff(unsigned int nr)
424{
425 __raise_softirq_irqoff(nr);
426
427 /*
428 * If we're in an interrupt or softirq, we're done
429 * (this also catches softirq-disabled code). We will
430 * actually run the softirq once we return from
431 * the irq or softirq.
432 *
433 * Otherwise we wake up ksoftirqd to make sure we
434 * schedule the softirq soon.
435 */
436 if (!in_interrupt())
437 wakeup_softirqd();
438}
439
440void raise_softirq(unsigned int nr)
441{
442 unsigned long flags;
443
444 local_irq_save(flags);
445 raise_softirq_irqoff(nr);
446 local_irq_restore(flags);
447}
448
449void __raise_softirq_irqoff(unsigned int nr)
450{
451 trace_softirq_raise(nr);
452 or_softirq_pending(1UL << nr);
453}
454
455void open_softirq(int nr, void (*action)(struct softirq_action *))
456{
457 softirq_vec[nr].action = action;
458}
459
460/*
461 * Tasklets
462 */
463struct tasklet_head {
464 struct tasklet_struct *head;
465 struct tasklet_struct **tail;
466};
467
468static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
469static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
470
471static void __tasklet_schedule_common(struct tasklet_struct *t,
472 struct tasklet_head __percpu *headp,
473 unsigned int softirq_nr)
474{
475 struct tasklet_head *head;
476 unsigned long flags;
477
478 local_irq_save(flags);
479 head = this_cpu_ptr(headp);
480 t->next = NULL;
481 *head->tail = t;
482 head->tail = &(t->next);
483 raise_softirq_irqoff(softirq_nr);
484 local_irq_restore(flags);
485}
486
487void __tasklet_schedule(struct tasklet_struct *t)
488{
489 __tasklet_schedule_common(t, &tasklet_vec,
490 TASKLET_SOFTIRQ);
491}
492EXPORT_SYMBOL(__tasklet_schedule);
493
494void __tasklet_hi_schedule(struct tasklet_struct *t)
495{
496 __tasklet_schedule_common(t, &tasklet_hi_vec,
497 HI_SOFTIRQ);
498}
499EXPORT_SYMBOL(__tasklet_hi_schedule);
500
501static void tasklet_action_common(struct softirq_action *a,
502 struct tasklet_head *tl_head,
503 unsigned int softirq_nr)
504{
505 struct tasklet_struct *list;
506
507 local_irq_disable();
508 list = tl_head->head;
509 tl_head->head = NULL;
510 tl_head->tail = &tl_head->head;
511 local_irq_enable();
512
513 while (list) {
514 struct tasklet_struct *t = list;
515
516 list = list->next;
517
518 if (tasklet_trylock(t)) {
519 if (!atomic_read(&t->count)) {
520 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
521 &t->state))
522 BUG();
523 t->func(t->data);
524 tasklet_unlock(t);
525 continue;
526 }
527 tasklet_unlock(t);
528 }
529
530 local_irq_disable();
531 t->next = NULL;
532 *tl_head->tail = t;
533 tl_head->tail = &t->next;
534 __raise_softirq_irqoff(softirq_nr);
535 local_irq_enable();
536 }
537}
538
539static __latent_entropy void tasklet_action(struct softirq_action *a)
540{
541 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
542}
543
544static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
545{
546 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
547}
548
549void tasklet_init(struct tasklet_struct *t,
550 void (*func)(unsigned long), unsigned long data)
551{
552 t->next = NULL;
553 t->state = 0;
554 atomic_set(&t->count, 0);
555 t->func = func;
556 t->data = data;
557}
558EXPORT_SYMBOL(tasklet_init);
559
560void tasklet_kill(struct tasklet_struct *t)
561{
562 if (in_interrupt())
563 pr_notice("Attempt to kill tasklet from interrupt\n");
564
565 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
566 do {
567 yield();
568 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
569 }
570 tasklet_unlock_wait(t);
571 clear_bit(TASKLET_STATE_SCHED, &t->state);
572}
573EXPORT_SYMBOL(tasklet_kill);
574
575void __init softirq_init(void)
576{
577 int cpu;
578
579 for_each_possible_cpu(cpu) {
580 per_cpu(tasklet_vec, cpu).tail =
581 &per_cpu(tasklet_vec, cpu).head;
582 per_cpu(tasklet_hi_vec, cpu).tail =
583 &per_cpu(tasklet_hi_vec, cpu).head;
584 }
585
586 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
587 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
588}
589
590static int ksoftirqd_should_run(unsigned int cpu)
591{
592 return local_softirq_pending();
593}
594
595static void run_ksoftirqd(unsigned int cpu)
596{
597 local_irq_disable();
598 if (local_softirq_pending()) {
599 /*
600 * We can safely run softirq on inline stack, as we are not deep
601 * in the task stack here.
602 */
603 __do_softirq();
604 local_irq_enable();
605 cond_resched();
606 return;
607 }
608 local_irq_enable();
609}
610
611#ifdef CONFIG_HOTPLUG_CPU
612/*
613 * tasklet_kill_immediate is called to remove a tasklet which can already be
614 * scheduled for execution on @cpu.
615 *
616 * Unlike tasklet_kill, this function removes the tasklet
617 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
618 *
619 * When this function is called, @cpu must be in the CPU_DEAD state.
620 */
621void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
622{
623 struct tasklet_struct **i;
624
625 BUG_ON(cpu_online(cpu));
626 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
627
628 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
629 return;
630
631 /* CPU is dead, so no lock needed. */
632 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
633 if (*i == t) {
634 *i = t->next;
635 /* If this was the tail element, move the tail ptr */
636 if (*i == NULL)
637 per_cpu(tasklet_vec, cpu).tail = i;
638 return;
639 }
640 }
641 BUG();
642}
643
644static int takeover_tasklets(unsigned int cpu)
645{
646 /* CPU is dead, so no lock needed. */
647 local_irq_disable();
648
649 /* Find end, append list for that CPU. */
650 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
651 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
652 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
653 per_cpu(tasklet_vec, cpu).head = NULL;
654 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
655 }
656 raise_softirq_irqoff(TASKLET_SOFTIRQ);
657
658 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
659 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
660 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
661 per_cpu(tasklet_hi_vec, cpu).head = NULL;
662 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
663 }
664 raise_softirq_irqoff(HI_SOFTIRQ);
665
666 local_irq_enable();
667 return 0;
668}
669#else
670#define takeover_tasklets NULL
671#endif /* CONFIG_HOTPLUG_CPU */
672
673static struct smp_hotplug_thread softirq_threads = {
674 .store = &ksoftirqd,
675 .thread_should_run = ksoftirqd_should_run,
676 .thread_fn = run_ksoftirqd,
677 .thread_comm = "ksoftirqd/%u",
678};
679
680static __init int spawn_ksoftirqd(void)
681{
682 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
683 takeover_tasklets);
684 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
685
686 return 0;
687}
688early_initcall(spawn_ksoftirqd);
689
690/*
691 * [ These __weak aliases are kept in a separate compilation unit, so that
692 * GCC does not inline them incorrectly. ]
693 */
694
695int __init __weak early_irq_init(void)
696{
697 return 0;
698}
699
700int __init __weak arch_probe_nr_irqs(void)
701{
702 return NR_IRQS_LEGACY;
703}
704
705int __init __weak arch_early_irq_init(void)
706{
707 return 0;
708}
709
710unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
711{
712 return from;
713}
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
11 */
12
13#include <linux/export.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/tick.h>
27
28#define CREATE_TRACE_POINTS
29#include <trace/events/irq.h>
30
31#include <asm/irq.h>
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52EXPORT_SYMBOL(irq_stat);
53#endif
54
55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56
57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
59char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62};
63
64/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
70static void wakeup_softirqd(void)
71{
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79/*
80 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
82 * softirq processing.
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
87 */
88
89/*
90 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
92 */
93#ifdef CONFIG_TRACE_IRQFLAGS
94static void __local_bh_disable(unsigned long ip, unsigned int cnt)
95{
96 unsigned long flags;
97
98 WARN_ON_ONCE(in_irq());
99
100 raw_local_irq_save(flags);
101 /*
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
107 */
108 preempt_count() += cnt;
109 /*
110 * Were softirqs turned off above:
111 */
112 if (softirq_count() == cnt)
113 trace_softirqs_off(ip);
114 raw_local_irq_restore(flags);
115
116 if (preempt_count() == cnt)
117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
118}
119#else /* !CONFIG_TRACE_IRQFLAGS */
120static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
121{
122 add_preempt_count(cnt);
123 barrier();
124}
125#endif /* CONFIG_TRACE_IRQFLAGS */
126
127void local_bh_disable(void)
128{
129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET);
131}
132
133EXPORT_SYMBOL(local_bh_disable);
134
135static void __local_bh_enable(unsigned int cnt)
136{
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
139
140 if (softirq_count() == cnt)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt);
143}
144
145/*
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
149 */
150void _local_bh_enable(void)
151{
152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
153}
154
155EXPORT_SYMBOL(_local_bh_enable);
156
157static inline void _local_bh_enable_ip(unsigned long ip)
158{
159 WARN_ON_ONCE(in_irq() || irqs_disabled());
160#ifdef CONFIG_TRACE_IRQFLAGS
161 local_irq_disable();
162#endif
163 /*
164 * Are softirqs going to be turned on now:
165 */
166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
167 trace_softirqs_on(ip);
168 /*
169 * Keep preemption disabled until we are done with
170 * softirq processing:
171 */
172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
173
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
175 do_softirq();
176
177 dec_preempt_count();
178#ifdef CONFIG_TRACE_IRQFLAGS
179 local_irq_enable();
180#endif
181 preempt_check_resched();
182}
183
184void local_bh_enable(void)
185{
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
187}
188EXPORT_SYMBOL(local_bh_enable);
189
190void local_bh_enable_ip(unsigned long ip)
191{
192 _local_bh_enable_ip(ip);
193}
194EXPORT_SYMBOL(local_bh_enable_ip);
195
196/*
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that.
199 *
200 * This number has been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
204 */
205#define MAX_SOFTIRQ_RESTART 10
206
207asmlinkage void __do_softirq(void)
208{
209 struct softirq_action *h;
210 __u32 pending;
211 int max_restart = MAX_SOFTIRQ_RESTART;
212 int cpu;
213
214 pending = local_softirq_pending();
215 account_system_vtime(current);
216
217 __local_bh_disable((unsigned long)__builtin_return_address(0),
218 SOFTIRQ_OFFSET);
219 lockdep_softirq_enter();
220
221 cpu = smp_processor_id();
222restart:
223 /* Reset the pending bitmask before enabling irqs */
224 set_softirq_pending(0);
225
226 local_irq_enable();
227
228 h = softirq_vec;
229
230 do {
231 if (pending & 1) {
232 unsigned int vec_nr = h - softirq_vec;
233 int prev_count = preempt_count();
234
235 kstat_incr_softirqs_this_cpu(vec_nr);
236
237 trace_softirq_entry(vec_nr);
238 h->action(h);
239 trace_softirq_exit(vec_nr);
240 if (unlikely(prev_count != preempt_count())) {
241 printk(KERN_ERR "huh, entered softirq %u %s %p"
242 "with preempt_count %08x,"
243 " exited with %08x?\n", vec_nr,
244 softirq_to_name[vec_nr], h->action,
245 prev_count, preempt_count());
246 preempt_count() = prev_count;
247 }
248
249 rcu_bh_qs(cpu);
250 }
251 h++;
252 pending >>= 1;
253 } while (pending);
254
255 local_irq_disable();
256
257 pending = local_softirq_pending();
258 if (pending && --max_restart)
259 goto restart;
260
261 if (pending)
262 wakeup_softirqd();
263
264 lockdep_softirq_exit();
265
266 account_system_vtime(current);
267 __local_bh_enable(SOFTIRQ_OFFSET);
268}
269
270#ifndef __ARCH_HAS_DO_SOFTIRQ
271
272asmlinkage void do_softirq(void)
273{
274 __u32 pending;
275 unsigned long flags;
276
277 if (in_interrupt())
278 return;
279
280 local_irq_save(flags);
281
282 pending = local_softirq_pending();
283
284 if (pending)
285 __do_softirq();
286
287 local_irq_restore(flags);
288}
289
290#endif
291
292/*
293 * Enter an interrupt context.
294 */
295void irq_enter(void)
296{
297 int cpu = smp_processor_id();
298
299 rcu_irq_enter();
300 if (is_idle_task(current) && !in_interrupt()) {
301 /*
302 * Prevent raise_softirq from needlessly waking up ksoftirqd
303 * here, as softirq will be serviced on return from interrupt.
304 */
305 local_bh_disable();
306 tick_check_idle(cpu);
307 _local_bh_enable();
308 }
309
310 __irq_enter();
311}
312
313static inline void invoke_softirq(void)
314{
315 if (!force_irqthreads) {
316#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
317 __do_softirq();
318#else
319 do_softirq();
320#endif
321 } else {
322 __local_bh_disable((unsigned long)__builtin_return_address(0),
323 SOFTIRQ_OFFSET);
324 wakeup_softirqd();
325 __local_bh_enable(SOFTIRQ_OFFSET);
326 }
327}
328
329/*
330 * Exit an interrupt context. Process softirqs if needed and possible:
331 */
332void irq_exit(void)
333{
334 account_system_vtime(current);
335 trace_hardirq_exit();
336 sub_preempt_count(IRQ_EXIT_OFFSET);
337 if (!in_interrupt() && local_softirq_pending())
338 invoke_softirq();
339
340#ifdef CONFIG_NO_HZ
341 /* Make sure that timer wheel updates are propagated */
342 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
343 tick_nohz_irq_exit();
344#endif
345 rcu_irq_exit();
346 sched_preempt_enable_no_resched();
347}
348
349/*
350 * This function must run with irqs disabled!
351 */
352inline void raise_softirq_irqoff(unsigned int nr)
353{
354 __raise_softirq_irqoff(nr);
355
356 /*
357 * If we're in an interrupt or softirq, we're done
358 * (this also catches softirq-disabled code). We will
359 * actually run the softirq once we return from
360 * the irq or softirq.
361 *
362 * Otherwise we wake up ksoftirqd to make sure we
363 * schedule the softirq soon.
364 */
365 if (!in_interrupt())
366 wakeup_softirqd();
367}
368
369void raise_softirq(unsigned int nr)
370{
371 unsigned long flags;
372
373 local_irq_save(flags);
374 raise_softirq_irqoff(nr);
375 local_irq_restore(flags);
376}
377
378void __raise_softirq_irqoff(unsigned int nr)
379{
380 trace_softirq_raise(nr);
381 or_softirq_pending(1UL << nr);
382}
383
384void open_softirq(int nr, void (*action)(struct softirq_action *))
385{
386 softirq_vec[nr].action = action;
387}
388
389/*
390 * Tasklets
391 */
392struct tasklet_head
393{
394 struct tasklet_struct *head;
395 struct tasklet_struct **tail;
396};
397
398static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
399static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
400
401void __tasklet_schedule(struct tasklet_struct *t)
402{
403 unsigned long flags;
404
405 local_irq_save(flags);
406 t->next = NULL;
407 *__this_cpu_read(tasklet_vec.tail) = t;
408 __this_cpu_write(tasklet_vec.tail, &(t->next));
409 raise_softirq_irqoff(TASKLET_SOFTIRQ);
410 local_irq_restore(flags);
411}
412
413EXPORT_SYMBOL(__tasklet_schedule);
414
415void __tasklet_hi_schedule(struct tasklet_struct *t)
416{
417 unsigned long flags;
418
419 local_irq_save(flags);
420 t->next = NULL;
421 *__this_cpu_read(tasklet_hi_vec.tail) = t;
422 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
423 raise_softirq_irqoff(HI_SOFTIRQ);
424 local_irq_restore(flags);
425}
426
427EXPORT_SYMBOL(__tasklet_hi_schedule);
428
429void __tasklet_hi_schedule_first(struct tasklet_struct *t)
430{
431 BUG_ON(!irqs_disabled());
432
433 t->next = __this_cpu_read(tasklet_hi_vec.head);
434 __this_cpu_write(tasklet_hi_vec.head, t);
435 __raise_softirq_irqoff(HI_SOFTIRQ);
436}
437
438EXPORT_SYMBOL(__tasklet_hi_schedule_first);
439
440static void tasklet_action(struct softirq_action *a)
441{
442 struct tasklet_struct *list;
443
444 local_irq_disable();
445 list = __this_cpu_read(tasklet_vec.head);
446 __this_cpu_write(tasklet_vec.head, NULL);
447 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
448 local_irq_enable();
449
450 while (list) {
451 struct tasklet_struct *t = list;
452
453 list = list->next;
454
455 if (tasklet_trylock(t)) {
456 if (!atomic_read(&t->count)) {
457 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
458 BUG();
459 t->func(t->data);
460 tasklet_unlock(t);
461 continue;
462 }
463 tasklet_unlock(t);
464 }
465
466 local_irq_disable();
467 t->next = NULL;
468 *__this_cpu_read(tasklet_vec.tail) = t;
469 __this_cpu_write(tasklet_vec.tail, &(t->next));
470 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
471 local_irq_enable();
472 }
473}
474
475static void tasklet_hi_action(struct softirq_action *a)
476{
477 struct tasklet_struct *list;
478
479 local_irq_disable();
480 list = __this_cpu_read(tasklet_hi_vec.head);
481 __this_cpu_write(tasklet_hi_vec.head, NULL);
482 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
483 local_irq_enable();
484
485 while (list) {
486 struct tasklet_struct *t = list;
487
488 list = list->next;
489
490 if (tasklet_trylock(t)) {
491 if (!atomic_read(&t->count)) {
492 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
493 BUG();
494 t->func(t->data);
495 tasklet_unlock(t);
496 continue;
497 }
498 tasklet_unlock(t);
499 }
500
501 local_irq_disable();
502 t->next = NULL;
503 *__this_cpu_read(tasklet_hi_vec.tail) = t;
504 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
505 __raise_softirq_irqoff(HI_SOFTIRQ);
506 local_irq_enable();
507 }
508}
509
510
511void tasklet_init(struct tasklet_struct *t,
512 void (*func)(unsigned long), unsigned long data)
513{
514 t->next = NULL;
515 t->state = 0;
516 atomic_set(&t->count, 0);
517 t->func = func;
518 t->data = data;
519}
520
521EXPORT_SYMBOL(tasklet_init);
522
523void tasklet_kill(struct tasklet_struct *t)
524{
525 if (in_interrupt())
526 printk("Attempt to kill tasklet from interrupt\n");
527
528 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
529 do {
530 yield();
531 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
532 }
533 tasklet_unlock_wait(t);
534 clear_bit(TASKLET_STATE_SCHED, &t->state);
535}
536
537EXPORT_SYMBOL(tasklet_kill);
538
539/*
540 * tasklet_hrtimer
541 */
542
543/*
544 * The trampoline is called when the hrtimer expires. It schedules a tasklet
545 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
546 * hrtimer callback, but from softirq context.
547 */
548static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
549{
550 struct tasklet_hrtimer *ttimer =
551 container_of(timer, struct tasklet_hrtimer, timer);
552
553 tasklet_hi_schedule(&ttimer->tasklet);
554 return HRTIMER_NORESTART;
555}
556
557/*
558 * Helper function which calls the hrtimer callback from
559 * tasklet/softirq context
560 */
561static void __tasklet_hrtimer_trampoline(unsigned long data)
562{
563 struct tasklet_hrtimer *ttimer = (void *)data;
564 enum hrtimer_restart restart;
565
566 restart = ttimer->function(&ttimer->timer);
567 if (restart != HRTIMER_NORESTART)
568 hrtimer_restart(&ttimer->timer);
569}
570
571/**
572 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
573 * @ttimer: tasklet_hrtimer which is initialized
574 * @function: hrtimer callback function which gets called from softirq context
575 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
576 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
577 */
578void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
579 enum hrtimer_restart (*function)(struct hrtimer *),
580 clockid_t which_clock, enum hrtimer_mode mode)
581{
582 hrtimer_init(&ttimer->timer, which_clock, mode);
583 ttimer->timer.function = __hrtimer_tasklet_trampoline;
584 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
585 (unsigned long)ttimer);
586 ttimer->function = function;
587}
588EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
589
590/*
591 * Remote softirq bits
592 */
593
594DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
595EXPORT_PER_CPU_SYMBOL(softirq_work_list);
596
597static void __local_trigger(struct call_single_data *cp, int softirq)
598{
599 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
600
601 list_add_tail(&cp->list, head);
602
603 /* Trigger the softirq only if the list was previously empty. */
604 if (head->next == &cp->list)
605 raise_softirq_irqoff(softirq);
606}
607
608#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
609static void remote_softirq_receive(void *data)
610{
611 struct call_single_data *cp = data;
612 unsigned long flags;
613 int softirq;
614
615 softirq = cp->priv;
616
617 local_irq_save(flags);
618 __local_trigger(cp, softirq);
619 local_irq_restore(flags);
620}
621
622static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
623{
624 if (cpu_online(cpu)) {
625 cp->func = remote_softirq_receive;
626 cp->info = cp;
627 cp->flags = 0;
628 cp->priv = softirq;
629
630 __smp_call_function_single(cpu, cp, 0);
631 return 0;
632 }
633 return 1;
634}
635#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
636static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
637{
638 return 1;
639}
640#endif
641
642/**
643 * __send_remote_softirq - try to schedule softirq work on a remote cpu
644 * @cp: private SMP call function data area
645 * @cpu: the remote cpu
646 * @this_cpu: the currently executing cpu
647 * @softirq: the softirq for the work
648 *
649 * Attempt to schedule softirq work on a remote cpu. If this cannot be
650 * done, the work is instead queued up on the local cpu.
651 *
652 * Interrupts must be disabled.
653 */
654void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
655{
656 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
657 __local_trigger(cp, softirq);
658}
659EXPORT_SYMBOL(__send_remote_softirq);
660
661/**
662 * send_remote_softirq - try to schedule softirq work on a remote cpu
663 * @cp: private SMP call function data area
664 * @cpu: the remote cpu
665 * @softirq: the softirq for the work
666 *
667 * Like __send_remote_softirq except that disabling interrupts and
668 * computing the current cpu is done for the caller.
669 */
670void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
671{
672 unsigned long flags;
673 int this_cpu;
674
675 local_irq_save(flags);
676 this_cpu = smp_processor_id();
677 __send_remote_softirq(cp, cpu, this_cpu, softirq);
678 local_irq_restore(flags);
679}
680EXPORT_SYMBOL(send_remote_softirq);
681
682static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
683 unsigned long action, void *hcpu)
684{
685 /*
686 * If a CPU goes away, splice its entries to the current CPU
687 * and trigger a run of the softirq
688 */
689 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
690 int cpu = (unsigned long) hcpu;
691 int i;
692
693 local_irq_disable();
694 for (i = 0; i < NR_SOFTIRQS; i++) {
695 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
696 struct list_head *local_head;
697
698 if (list_empty(head))
699 continue;
700
701 local_head = &__get_cpu_var(softirq_work_list[i]);
702 list_splice_init(head, local_head);
703 raise_softirq_irqoff(i);
704 }
705 local_irq_enable();
706 }
707
708 return NOTIFY_OK;
709}
710
711static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
712 .notifier_call = remote_softirq_cpu_notify,
713};
714
715void __init softirq_init(void)
716{
717 int cpu;
718
719 for_each_possible_cpu(cpu) {
720 int i;
721
722 per_cpu(tasklet_vec, cpu).tail =
723 &per_cpu(tasklet_vec, cpu).head;
724 per_cpu(tasklet_hi_vec, cpu).tail =
725 &per_cpu(tasklet_hi_vec, cpu).head;
726 for (i = 0; i < NR_SOFTIRQS; i++)
727 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
728 }
729
730 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
731
732 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
733 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
734}
735
736static int run_ksoftirqd(void * __bind_cpu)
737{
738 set_current_state(TASK_INTERRUPTIBLE);
739
740 while (!kthread_should_stop()) {
741 preempt_disable();
742 if (!local_softirq_pending()) {
743 schedule_preempt_disabled();
744 }
745
746 __set_current_state(TASK_RUNNING);
747
748 while (local_softirq_pending()) {
749 /* Preempt disable stops cpu going offline.
750 If already offline, we'll be on wrong CPU:
751 don't process */
752 if (cpu_is_offline((long)__bind_cpu))
753 goto wait_to_die;
754 local_irq_disable();
755 if (local_softirq_pending())
756 __do_softirq();
757 local_irq_enable();
758 sched_preempt_enable_no_resched();
759 cond_resched();
760 preempt_disable();
761 rcu_note_context_switch((long)__bind_cpu);
762 }
763 preempt_enable();
764 set_current_state(TASK_INTERRUPTIBLE);
765 }
766 __set_current_state(TASK_RUNNING);
767 return 0;
768
769wait_to_die:
770 preempt_enable();
771 /* Wait for kthread_stop */
772 set_current_state(TASK_INTERRUPTIBLE);
773 while (!kthread_should_stop()) {
774 schedule();
775 set_current_state(TASK_INTERRUPTIBLE);
776 }
777 __set_current_state(TASK_RUNNING);
778 return 0;
779}
780
781#ifdef CONFIG_HOTPLUG_CPU
782/*
783 * tasklet_kill_immediate is called to remove a tasklet which can already be
784 * scheduled for execution on @cpu.
785 *
786 * Unlike tasklet_kill, this function removes the tasklet
787 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
788 *
789 * When this function is called, @cpu must be in the CPU_DEAD state.
790 */
791void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
792{
793 struct tasklet_struct **i;
794
795 BUG_ON(cpu_online(cpu));
796 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
797
798 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
799 return;
800
801 /* CPU is dead, so no lock needed. */
802 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
803 if (*i == t) {
804 *i = t->next;
805 /* If this was the tail element, move the tail ptr */
806 if (*i == NULL)
807 per_cpu(tasklet_vec, cpu).tail = i;
808 return;
809 }
810 }
811 BUG();
812}
813
814static void takeover_tasklets(unsigned int cpu)
815{
816 /* CPU is dead, so no lock needed. */
817 local_irq_disable();
818
819 /* Find end, append list for that CPU. */
820 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
821 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
822 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
823 per_cpu(tasklet_vec, cpu).head = NULL;
824 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
825 }
826 raise_softirq_irqoff(TASKLET_SOFTIRQ);
827
828 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
829 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
830 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
831 per_cpu(tasklet_hi_vec, cpu).head = NULL;
832 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
833 }
834 raise_softirq_irqoff(HI_SOFTIRQ);
835
836 local_irq_enable();
837}
838#endif /* CONFIG_HOTPLUG_CPU */
839
840static int __cpuinit cpu_callback(struct notifier_block *nfb,
841 unsigned long action,
842 void *hcpu)
843{
844 int hotcpu = (unsigned long)hcpu;
845 struct task_struct *p;
846
847 switch (action) {
848 case CPU_UP_PREPARE:
849 case CPU_UP_PREPARE_FROZEN:
850 p = kthread_create_on_node(run_ksoftirqd,
851 hcpu,
852 cpu_to_node(hotcpu),
853 "ksoftirqd/%d", hotcpu);
854 if (IS_ERR(p)) {
855 printk("ksoftirqd for %i failed\n", hotcpu);
856 return notifier_from_errno(PTR_ERR(p));
857 }
858 kthread_bind(p, hotcpu);
859 per_cpu(ksoftirqd, hotcpu) = p;
860 break;
861 case CPU_ONLINE:
862 case CPU_ONLINE_FROZEN:
863 wake_up_process(per_cpu(ksoftirqd, hotcpu));
864 break;
865#ifdef CONFIG_HOTPLUG_CPU
866 case CPU_UP_CANCELED:
867 case CPU_UP_CANCELED_FROZEN:
868 if (!per_cpu(ksoftirqd, hotcpu))
869 break;
870 /* Unbind so it can run. Fall thru. */
871 kthread_bind(per_cpu(ksoftirqd, hotcpu),
872 cpumask_any(cpu_online_mask));
873 case CPU_DEAD:
874 case CPU_DEAD_FROZEN: {
875 static const struct sched_param param = {
876 .sched_priority = MAX_RT_PRIO-1
877 };
878
879 p = per_cpu(ksoftirqd, hotcpu);
880 per_cpu(ksoftirqd, hotcpu) = NULL;
881 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
882 kthread_stop(p);
883 takeover_tasklets(hotcpu);
884 break;
885 }
886#endif /* CONFIG_HOTPLUG_CPU */
887 }
888 return NOTIFY_OK;
889}
890
891static struct notifier_block __cpuinitdata cpu_nfb = {
892 .notifier_call = cpu_callback
893};
894
895static __init int spawn_ksoftirqd(void)
896{
897 void *cpu = (void *)(long)smp_processor_id();
898 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
899
900 BUG_ON(err != NOTIFY_OK);
901 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
902 register_cpu_notifier(&cpu_nfb);
903 return 0;
904}
905early_initcall(spawn_ksoftirqd);
906
907/*
908 * [ These __weak aliases are kept in a separate compilation unit, so that
909 * GCC does not inline them incorrectly. ]
910 */
911
912int __init __weak early_irq_init(void)
913{
914 return 0;
915}
916
917#ifdef CONFIG_GENERIC_HARDIRQS
918int __init __weak arch_probe_nr_irqs(void)
919{
920 return NR_IRQS_LEGACY;
921}
922
923int __init __weak arch_early_irq_init(void)
924{
925 return 0;
926}
927#endif