Loading...
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/irq.h>
32
33/*
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
36 by its own spinlocks.
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
40 or will not.
41
42 Examples:
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
49 */
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59
60const char * const softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63};
64
65/*
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
70 */
71static void wakeup_softirqd(void)
72{
73 /* Interrupts are disabled: no need to stop preemption */
74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
80/*
81 * preempt_count and SOFTIRQ_OFFSET usage:
82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * softirq processing.
84 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
85 * on local_bh_disable or local_bh_enable.
86 * This lets us distinguish between whether we are currently processing
87 * softirq and whether we just have bh disabled.
88 */
89
90/*
91 * This one is for softirq.c-internal use,
92 * where hardirqs are disabled legitimately:
93 */
94#ifdef CONFIG_TRACE_IRQFLAGS
95void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
96{
97 unsigned long flags;
98
99 WARN_ON_ONCE(in_irq());
100
101 raw_local_irq_save(flags);
102 /*
103 * The preempt tracer hooks into preempt_count_add and will break
104 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
105 * is set and before current->softirq_enabled is cleared.
106 * We must manually increment preempt_count here and manually
107 * call the trace_preempt_off later.
108 */
109 __preempt_count_add(cnt);
110 /*
111 * Were softirqs turned off above:
112 */
113 if (softirq_count() == (cnt & SOFTIRQ_MASK))
114 trace_softirqs_off(ip);
115 raw_local_irq_restore(flags);
116
117 if (preempt_count() == cnt)
118 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
119}
120EXPORT_SYMBOL(__local_bh_disable_ip);
121#endif /* CONFIG_TRACE_IRQFLAGS */
122
123static void __local_bh_enable(unsigned int cnt)
124{
125 WARN_ON_ONCE(!irqs_disabled());
126
127 if (softirq_count() == (cnt & SOFTIRQ_MASK))
128 trace_softirqs_on(_RET_IP_);
129 preempt_count_sub(cnt);
130}
131
132/*
133 * Special-case - softirqs can safely be enabled in
134 * cond_resched_softirq(), or by __do_softirq(),
135 * without processing still-pending softirqs:
136 */
137void _local_bh_enable(void)
138{
139 WARN_ON_ONCE(in_irq());
140 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
141}
142EXPORT_SYMBOL(_local_bh_enable);
143
144void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
145{
146 WARN_ON_ONCE(in_irq() || irqs_disabled());
147#ifdef CONFIG_TRACE_IRQFLAGS
148 local_irq_disable();
149#endif
150 /*
151 * Are softirqs going to be turned on now:
152 */
153 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
154 trace_softirqs_on(ip);
155 /*
156 * Keep preemption disabled until we are done with
157 * softirq processing:
158 */
159 preempt_count_sub(cnt - 1);
160
161 if (unlikely(!in_interrupt() && local_softirq_pending())) {
162 /*
163 * Run softirq if any pending. And do it in its own stack
164 * as we may be calling this deep in a task call stack already.
165 */
166 do_softirq();
167 }
168
169 preempt_count_dec();
170#ifdef CONFIG_TRACE_IRQFLAGS
171 local_irq_enable();
172#endif
173 preempt_check_resched();
174}
175EXPORT_SYMBOL(__local_bh_enable_ip);
176
177/*
178 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
179 * but break the loop if need_resched() is set or after 2 ms.
180 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
181 * certain cases, such as stop_machine(), jiffies may cease to
182 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
183 * well to make sure we eventually return from this method.
184 *
185 * These limits have been established via experimentation.
186 * The two things to balance is latency against fairness -
187 * we want to handle softirqs as soon as possible, but they
188 * should not be able to lock up the box.
189 */
190#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
191#define MAX_SOFTIRQ_RESTART 10
192
193#ifdef CONFIG_TRACE_IRQFLAGS
194/*
195 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
196 * to keep the lockdep irq context tracking as tight as possible in order to
197 * not miss-qualify lock contexts and miss possible deadlocks.
198 */
199
200static inline bool lockdep_softirq_start(void)
201{
202 bool in_hardirq = false;
203
204 if (trace_hardirq_context(current)) {
205 in_hardirq = true;
206 trace_hardirq_exit();
207 }
208
209 lockdep_softirq_enter();
210
211 return in_hardirq;
212}
213
214static inline void lockdep_softirq_end(bool in_hardirq)
215{
216 lockdep_softirq_exit();
217
218 if (in_hardirq)
219 trace_hardirq_enter();
220}
221#else
222static inline bool lockdep_softirq_start(void) { return false; }
223static inline void lockdep_softirq_end(bool in_hardirq) { }
224#endif
225
226asmlinkage __visible void __do_softirq(void)
227{
228 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
229 unsigned long old_flags = current->flags;
230 int max_restart = MAX_SOFTIRQ_RESTART;
231 struct softirq_action *h;
232 bool in_hardirq;
233 __u32 pending;
234 int softirq_bit;
235 int cpu;
236
237 /*
238 * Mask out PF_MEMALLOC s current task context is borrowed for the
239 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
240 * again if the socket is related to swap
241 */
242 current->flags &= ~PF_MEMALLOC;
243
244 pending = local_softirq_pending();
245 account_irq_enter_time(current);
246
247 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
248 in_hardirq = lockdep_softirq_start();
249
250 cpu = smp_processor_id();
251restart:
252 /* Reset the pending bitmask before enabling irqs */
253 set_softirq_pending(0);
254
255 local_irq_enable();
256
257 h = softirq_vec;
258
259 while ((softirq_bit = ffs(pending))) {
260 unsigned int vec_nr;
261 int prev_count;
262
263 h += softirq_bit - 1;
264
265 vec_nr = h - softirq_vec;
266 prev_count = preempt_count();
267
268 kstat_incr_softirqs_this_cpu(vec_nr);
269
270 trace_softirq_entry(vec_nr);
271 h->action(h);
272 trace_softirq_exit(vec_nr);
273 if (unlikely(prev_count != preempt_count())) {
274 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
275 vec_nr, softirq_to_name[vec_nr], h->action,
276 prev_count, preempt_count());
277 preempt_count_set(prev_count);
278 }
279 rcu_bh_qs(cpu);
280 h++;
281 pending >>= softirq_bit;
282 }
283
284 local_irq_disable();
285
286 pending = local_softirq_pending();
287 if (pending) {
288 if (time_before(jiffies, end) && !need_resched() &&
289 --max_restart)
290 goto restart;
291
292 wakeup_softirqd();
293 }
294
295 lockdep_softirq_end(in_hardirq);
296 account_irq_exit_time(current);
297 __local_bh_enable(SOFTIRQ_OFFSET);
298 WARN_ON_ONCE(in_interrupt());
299 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
300}
301
302asmlinkage __visible void do_softirq(void)
303{
304 __u32 pending;
305 unsigned long flags;
306
307 if (in_interrupt())
308 return;
309
310 local_irq_save(flags);
311
312 pending = local_softirq_pending();
313
314 if (pending)
315 do_softirq_own_stack();
316
317 local_irq_restore(flags);
318}
319
320/*
321 * Enter an interrupt context.
322 */
323void irq_enter(void)
324{
325 rcu_irq_enter();
326 if (is_idle_task(current) && !in_interrupt()) {
327 /*
328 * Prevent raise_softirq from needlessly waking up ksoftirqd
329 * here, as softirq will be serviced on return from interrupt.
330 */
331 local_bh_disable();
332 tick_irq_enter();
333 _local_bh_enable();
334 }
335
336 __irq_enter();
337}
338
339static inline void invoke_softirq(void)
340{
341 if (!force_irqthreads) {
342#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
343 /*
344 * We can safely execute softirq on the current stack if
345 * it is the irq stack, because it should be near empty
346 * at this stage.
347 */
348 __do_softirq();
349#else
350 /*
351 * Otherwise, irq_exit() is called on the task stack that can
352 * be potentially deep already. So call softirq in its own stack
353 * to prevent from any overrun.
354 */
355 do_softirq_own_stack();
356#endif
357 } else {
358 wakeup_softirqd();
359 }
360}
361
362static inline void tick_irq_exit(void)
363{
364#ifdef CONFIG_NO_HZ_COMMON
365 int cpu = smp_processor_id();
366
367 /* Make sure that timer wheel updates are propagated */
368 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
369 if (!in_interrupt())
370 tick_nohz_irq_exit();
371 }
372#endif
373}
374
375/*
376 * Exit an interrupt context. Process softirqs if needed and possible:
377 */
378void irq_exit(void)
379{
380#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
381 local_irq_disable();
382#else
383 WARN_ON_ONCE(!irqs_disabled());
384#endif
385
386 account_irq_exit_time(current);
387 preempt_count_sub(HARDIRQ_OFFSET);
388 if (!in_interrupt() && local_softirq_pending())
389 invoke_softirq();
390
391 tick_irq_exit();
392 rcu_irq_exit();
393 trace_hardirq_exit(); /* must be last! */
394}
395
396/*
397 * This function must run with irqs disabled!
398 */
399inline void raise_softirq_irqoff(unsigned int nr)
400{
401 __raise_softirq_irqoff(nr);
402
403 /*
404 * If we're in an interrupt or softirq, we're done
405 * (this also catches softirq-disabled code). We will
406 * actually run the softirq once we return from
407 * the irq or softirq.
408 *
409 * Otherwise we wake up ksoftirqd to make sure we
410 * schedule the softirq soon.
411 */
412 if (!in_interrupt())
413 wakeup_softirqd();
414}
415
416void raise_softirq(unsigned int nr)
417{
418 unsigned long flags;
419
420 local_irq_save(flags);
421 raise_softirq_irqoff(nr);
422 local_irq_restore(flags);
423}
424
425void __raise_softirq_irqoff(unsigned int nr)
426{
427 trace_softirq_raise(nr);
428 or_softirq_pending(1UL << nr);
429}
430
431void open_softirq(int nr, void (*action)(struct softirq_action *))
432{
433 softirq_vec[nr].action = action;
434}
435
436/*
437 * Tasklets
438 */
439struct tasklet_head {
440 struct tasklet_struct *head;
441 struct tasklet_struct **tail;
442};
443
444static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
445static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
446
447void __tasklet_schedule(struct tasklet_struct *t)
448{
449 unsigned long flags;
450
451 local_irq_save(flags);
452 t->next = NULL;
453 *__this_cpu_read(tasklet_vec.tail) = t;
454 __this_cpu_write(tasklet_vec.tail, &(t->next));
455 raise_softirq_irqoff(TASKLET_SOFTIRQ);
456 local_irq_restore(flags);
457}
458EXPORT_SYMBOL(__tasklet_schedule);
459
460void __tasklet_hi_schedule(struct tasklet_struct *t)
461{
462 unsigned long flags;
463
464 local_irq_save(flags);
465 t->next = NULL;
466 *__this_cpu_read(tasklet_hi_vec.tail) = t;
467 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
468 raise_softirq_irqoff(HI_SOFTIRQ);
469 local_irq_restore(flags);
470}
471EXPORT_SYMBOL(__tasklet_hi_schedule);
472
473void __tasklet_hi_schedule_first(struct tasklet_struct *t)
474{
475 BUG_ON(!irqs_disabled());
476
477 t->next = __this_cpu_read(tasklet_hi_vec.head);
478 __this_cpu_write(tasklet_hi_vec.head, t);
479 __raise_softirq_irqoff(HI_SOFTIRQ);
480}
481EXPORT_SYMBOL(__tasklet_hi_schedule_first);
482
483static void tasklet_action(struct softirq_action *a)
484{
485 struct tasklet_struct *list;
486
487 local_irq_disable();
488 list = __this_cpu_read(tasklet_vec.head);
489 __this_cpu_write(tasklet_vec.head, NULL);
490 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
491 local_irq_enable();
492
493 while (list) {
494 struct tasklet_struct *t = list;
495
496 list = list->next;
497
498 if (tasklet_trylock(t)) {
499 if (!atomic_read(&t->count)) {
500 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
501 &t->state))
502 BUG();
503 t->func(t->data);
504 tasklet_unlock(t);
505 continue;
506 }
507 tasklet_unlock(t);
508 }
509
510 local_irq_disable();
511 t->next = NULL;
512 *__this_cpu_read(tasklet_vec.tail) = t;
513 __this_cpu_write(tasklet_vec.tail, &(t->next));
514 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
515 local_irq_enable();
516 }
517}
518
519static void tasklet_hi_action(struct softirq_action *a)
520{
521 struct tasklet_struct *list;
522
523 local_irq_disable();
524 list = __this_cpu_read(tasklet_hi_vec.head);
525 __this_cpu_write(tasklet_hi_vec.head, NULL);
526 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
527 local_irq_enable();
528
529 while (list) {
530 struct tasklet_struct *t = list;
531
532 list = list->next;
533
534 if (tasklet_trylock(t)) {
535 if (!atomic_read(&t->count)) {
536 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
537 &t->state))
538 BUG();
539 t->func(t->data);
540 tasklet_unlock(t);
541 continue;
542 }
543 tasklet_unlock(t);
544 }
545
546 local_irq_disable();
547 t->next = NULL;
548 *__this_cpu_read(tasklet_hi_vec.tail) = t;
549 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
550 __raise_softirq_irqoff(HI_SOFTIRQ);
551 local_irq_enable();
552 }
553}
554
555void tasklet_init(struct tasklet_struct *t,
556 void (*func)(unsigned long), unsigned long data)
557{
558 t->next = NULL;
559 t->state = 0;
560 atomic_set(&t->count, 0);
561 t->func = func;
562 t->data = data;
563}
564EXPORT_SYMBOL(tasklet_init);
565
566void tasklet_kill(struct tasklet_struct *t)
567{
568 if (in_interrupt())
569 pr_notice("Attempt to kill tasklet from interrupt\n");
570
571 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
572 do {
573 yield();
574 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
575 }
576 tasklet_unlock_wait(t);
577 clear_bit(TASKLET_STATE_SCHED, &t->state);
578}
579EXPORT_SYMBOL(tasklet_kill);
580
581/*
582 * tasklet_hrtimer
583 */
584
585/*
586 * The trampoline is called when the hrtimer expires. It schedules a tasklet
587 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
588 * hrtimer callback, but from softirq context.
589 */
590static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
591{
592 struct tasklet_hrtimer *ttimer =
593 container_of(timer, struct tasklet_hrtimer, timer);
594
595 tasklet_hi_schedule(&ttimer->tasklet);
596 return HRTIMER_NORESTART;
597}
598
599/*
600 * Helper function which calls the hrtimer callback from
601 * tasklet/softirq context
602 */
603static void __tasklet_hrtimer_trampoline(unsigned long data)
604{
605 struct tasklet_hrtimer *ttimer = (void *)data;
606 enum hrtimer_restart restart;
607
608 restart = ttimer->function(&ttimer->timer);
609 if (restart != HRTIMER_NORESTART)
610 hrtimer_restart(&ttimer->timer);
611}
612
613/**
614 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
615 * @ttimer: tasklet_hrtimer which is initialized
616 * @function: hrtimer callback function which gets called from softirq context
617 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
618 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
619 */
620void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
621 enum hrtimer_restart (*function)(struct hrtimer *),
622 clockid_t which_clock, enum hrtimer_mode mode)
623{
624 hrtimer_init(&ttimer->timer, which_clock, mode);
625 ttimer->timer.function = __hrtimer_tasklet_trampoline;
626 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
627 (unsigned long)ttimer);
628 ttimer->function = function;
629}
630EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
631
632void __init softirq_init(void)
633{
634 int cpu;
635
636 for_each_possible_cpu(cpu) {
637 per_cpu(tasklet_vec, cpu).tail =
638 &per_cpu(tasklet_vec, cpu).head;
639 per_cpu(tasklet_hi_vec, cpu).tail =
640 &per_cpu(tasklet_hi_vec, cpu).head;
641 }
642
643 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
644 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
645}
646
647static int ksoftirqd_should_run(unsigned int cpu)
648{
649 return local_softirq_pending();
650}
651
652static void run_ksoftirqd(unsigned int cpu)
653{
654 local_irq_disable();
655 if (local_softirq_pending()) {
656 /*
657 * We can safely run softirq on inline stack, as we are not deep
658 * in the task stack here.
659 */
660 __do_softirq();
661 rcu_note_context_switch(cpu);
662 local_irq_enable();
663 cond_resched();
664 return;
665 }
666 local_irq_enable();
667}
668
669#ifdef CONFIG_HOTPLUG_CPU
670/*
671 * tasklet_kill_immediate is called to remove a tasklet which can already be
672 * scheduled for execution on @cpu.
673 *
674 * Unlike tasklet_kill, this function removes the tasklet
675 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
676 *
677 * When this function is called, @cpu must be in the CPU_DEAD state.
678 */
679void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
680{
681 struct tasklet_struct **i;
682
683 BUG_ON(cpu_online(cpu));
684 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
685
686 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
687 return;
688
689 /* CPU is dead, so no lock needed. */
690 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
691 if (*i == t) {
692 *i = t->next;
693 /* If this was the tail element, move the tail ptr */
694 if (*i == NULL)
695 per_cpu(tasklet_vec, cpu).tail = i;
696 return;
697 }
698 }
699 BUG();
700}
701
702static void takeover_tasklets(unsigned int cpu)
703{
704 /* CPU is dead, so no lock needed. */
705 local_irq_disable();
706
707 /* Find end, append list for that CPU. */
708 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
709 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
710 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
711 per_cpu(tasklet_vec, cpu).head = NULL;
712 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
713 }
714 raise_softirq_irqoff(TASKLET_SOFTIRQ);
715
716 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
717 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
718 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
719 per_cpu(tasklet_hi_vec, cpu).head = NULL;
720 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
721 }
722 raise_softirq_irqoff(HI_SOFTIRQ);
723
724 local_irq_enable();
725}
726#endif /* CONFIG_HOTPLUG_CPU */
727
728static int cpu_callback(struct notifier_block *nfb, unsigned long action,
729 void *hcpu)
730{
731 switch (action) {
732#ifdef CONFIG_HOTPLUG_CPU
733 case CPU_DEAD:
734 case CPU_DEAD_FROZEN:
735 takeover_tasklets((unsigned long)hcpu);
736 break;
737#endif /* CONFIG_HOTPLUG_CPU */
738 }
739 return NOTIFY_OK;
740}
741
742static struct notifier_block cpu_nfb = {
743 .notifier_call = cpu_callback
744};
745
746static struct smp_hotplug_thread softirq_threads = {
747 .store = &ksoftirqd,
748 .thread_should_run = ksoftirqd_should_run,
749 .thread_fn = run_ksoftirqd,
750 .thread_comm = "ksoftirqd/%u",
751};
752
753static __init int spawn_ksoftirqd(void)
754{
755 register_cpu_notifier(&cpu_nfb);
756
757 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
758
759 return 0;
760}
761early_initcall(spawn_ksoftirqd);
762
763/*
764 * [ These __weak aliases are kept in a separate compilation unit, so that
765 * GCC does not inline them incorrectly. ]
766 */
767
768int __init __weak early_irq_init(void)
769{
770 return 0;
771}
772
773int __init __weak arch_probe_nr_irqs(void)
774{
775 return NR_IRQS_LEGACY;
776}
777
778int __init __weak arch_early_irq_init(void)
779{
780 return 0;
781}
782
783unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
784{
785 return from;
786}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/local_lock.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29#include <linux/wait_bit.h>
30
31#include <asm/softirq_stack.h>
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/irq.h>
35
36/*
37 - No shared variables, all the data are CPU local.
38 - If a softirq needs serialization, let it serialize itself
39 by its own spinlocks.
40 - Even if softirq is serialized, only local cpu is marked for
41 execution. Hence, we get something sort of weak cpu binding.
42 Though it is still not clear, will it result in better locality
43 or will not.
44
45 Examples:
46 - NET RX softirq. It is multithreaded and does not require
47 any global serialization.
48 - NET TX softirq. It kicks software netdevice queues, hence
49 it is logically serialized per device, but this serialization
50 is invisible to common code.
51 - Tasklets: serialized wrt itself.
52 */
53
54#ifndef __ARCH_IRQ_STAT
55DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
56EXPORT_PER_CPU_SYMBOL(irq_stat);
57#endif
58
59static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
60
61DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62
63const char * const softirq_to_name[NR_SOFTIRQS] = {
64 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
65 "TASKLET", "SCHED", "HRTIMER", "RCU"
66};
67
68/*
69 * we cannot loop indefinitely here to avoid userspace starvation,
70 * but we also don't want to introduce a worst case 1/HZ latency
71 * to the pending events, so lets the scheduler to balance
72 * the softirq load for us.
73 */
74static void wakeup_softirqd(void)
75{
76 /* Interrupts are disabled: no need to stop preemption */
77 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
78
79 if (tsk)
80 wake_up_process(tsk);
81}
82
83#ifdef CONFIG_TRACE_IRQFLAGS
84DEFINE_PER_CPU(int, hardirqs_enabled);
85DEFINE_PER_CPU(int, hardirq_context);
86EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
87EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
88#endif
89
90/*
91 * SOFTIRQ_OFFSET usage:
92 *
93 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
94 * to a per CPU counter and to task::softirqs_disabled_cnt.
95 *
96 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
97 * processing.
98 *
99 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100 * on local_bh_disable or local_bh_enable.
101 *
102 * This lets us distinguish between whether we are currently processing
103 * softirq and whether we just have bh disabled.
104 */
105#ifdef CONFIG_PREEMPT_RT
106
107/*
108 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
109 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
110 * softirq disabled section to be preempted.
111 *
112 * The per task counter is used for softirq_count(), in_softirq() and
113 * in_serving_softirqs() because these counts are only valid when the task
114 * holding softirq_ctrl::lock is running.
115 *
116 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
117 * the task which is in a softirq disabled section is preempted or blocks.
118 */
119struct softirq_ctrl {
120 local_lock_t lock;
121 int cnt;
122};
123
124static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
125 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
126};
127
128/**
129 * local_bh_blocked() - Check for idle whether BH processing is blocked
130 *
131 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
132 *
133 * This is invoked from the idle task to guard against false positive
134 * softirq pending warnings, which would happen when the task which holds
135 * softirq_ctrl::lock was the only running task on the CPU and blocks on
136 * some other lock.
137 */
138bool local_bh_blocked(void)
139{
140 return __this_cpu_read(softirq_ctrl.cnt) != 0;
141}
142
143void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
144{
145 unsigned long flags;
146 int newcnt;
147
148 WARN_ON_ONCE(in_hardirq());
149
150 /* First entry of a task into a BH disabled section? */
151 if (!current->softirq_disable_cnt) {
152 if (preemptible()) {
153 local_lock(&softirq_ctrl.lock);
154 /* Required to meet the RCU bottomhalf requirements. */
155 rcu_read_lock();
156 } else {
157 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
158 }
159 }
160
161 /*
162 * Track the per CPU softirq disabled state. On RT this is per CPU
163 * state to allow preemption of bottom half disabled sections.
164 */
165 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
166 /*
167 * Reflect the result in the task state to prevent recursion on the
168 * local lock and to make softirq_count() & al work.
169 */
170 current->softirq_disable_cnt = newcnt;
171
172 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
173 raw_local_irq_save(flags);
174 lockdep_softirqs_off(ip);
175 raw_local_irq_restore(flags);
176 }
177}
178EXPORT_SYMBOL(__local_bh_disable_ip);
179
180static void __local_bh_enable(unsigned int cnt, bool unlock)
181{
182 unsigned long flags;
183 int newcnt;
184
185 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
186 this_cpu_read(softirq_ctrl.cnt));
187
188 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
189 raw_local_irq_save(flags);
190 lockdep_softirqs_on(_RET_IP_);
191 raw_local_irq_restore(flags);
192 }
193
194 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
195 current->softirq_disable_cnt = newcnt;
196
197 if (!newcnt && unlock) {
198 rcu_read_unlock();
199 local_unlock(&softirq_ctrl.lock);
200 }
201}
202
203void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
204{
205 bool preempt_on = preemptible();
206 unsigned long flags;
207 u32 pending;
208 int curcnt;
209
210 WARN_ON_ONCE(in_hardirq());
211 lockdep_assert_irqs_enabled();
212
213 local_irq_save(flags);
214 curcnt = __this_cpu_read(softirq_ctrl.cnt);
215
216 /*
217 * If this is not reenabling soft interrupts, no point in trying to
218 * run pending ones.
219 */
220 if (curcnt != cnt)
221 goto out;
222
223 pending = local_softirq_pending();
224 if (!pending)
225 goto out;
226
227 /*
228 * If this was called from non preemptible context, wake up the
229 * softirq daemon.
230 */
231 if (!preempt_on) {
232 wakeup_softirqd();
233 goto out;
234 }
235
236 /*
237 * Adjust softirq count to SOFTIRQ_OFFSET which makes
238 * in_serving_softirq() become true.
239 */
240 cnt = SOFTIRQ_OFFSET;
241 __local_bh_enable(cnt, false);
242 __do_softirq();
243
244out:
245 __local_bh_enable(cnt, preempt_on);
246 local_irq_restore(flags);
247}
248EXPORT_SYMBOL(__local_bh_enable_ip);
249
250/*
251 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
252 * to acquire the per CPU local lock for reentrancy protection.
253 */
254static inline void ksoftirqd_run_begin(void)
255{
256 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
257 local_irq_disable();
258}
259
260/* Counterpart to ksoftirqd_run_begin() */
261static inline void ksoftirqd_run_end(void)
262{
263 __local_bh_enable(SOFTIRQ_OFFSET, true);
264 WARN_ON_ONCE(in_interrupt());
265 local_irq_enable();
266}
267
268static inline void softirq_handle_begin(void) { }
269static inline void softirq_handle_end(void) { }
270
271static inline bool should_wake_ksoftirqd(void)
272{
273 return !this_cpu_read(softirq_ctrl.cnt);
274}
275
276static inline void invoke_softirq(void)
277{
278 if (should_wake_ksoftirqd())
279 wakeup_softirqd();
280}
281
282/*
283 * flush_smp_call_function_queue() can raise a soft interrupt in a function
284 * call. On RT kernels this is undesired and the only known functionality
285 * in the block layer which does this is disabled on RT. If soft interrupts
286 * get raised which haven't been raised before the flush, warn so it can be
287 * investigated.
288 */
289void do_softirq_post_smp_call_flush(unsigned int was_pending)
290{
291 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
292 invoke_softirq();
293}
294
295#else /* CONFIG_PREEMPT_RT */
296
297/*
298 * This one is for softirq.c-internal use, where hardirqs are disabled
299 * legitimately:
300 */
301#ifdef CONFIG_TRACE_IRQFLAGS
302void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
303{
304 unsigned long flags;
305
306 WARN_ON_ONCE(in_hardirq());
307
308 raw_local_irq_save(flags);
309 /*
310 * The preempt tracer hooks into preempt_count_add and will break
311 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
312 * is set and before current->softirq_enabled is cleared.
313 * We must manually increment preempt_count here and manually
314 * call the trace_preempt_off later.
315 */
316 __preempt_count_add(cnt);
317 /*
318 * Were softirqs turned off above:
319 */
320 if (softirq_count() == (cnt & SOFTIRQ_MASK))
321 lockdep_softirqs_off(ip);
322 raw_local_irq_restore(flags);
323
324 if (preempt_count() == cnt) {
325#ifdef CONFIG_DEBUG_PREEMPT
326 current->preempt_disable_ip = get_lock_parent_ip();
327#endif
328 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
329 }
330}
331EXPORT_SYMBOL(__local_bh_disable_ip);
332#endif /* CONFIG_TRACE_IRQFLAGS */
333
334static void __local_bh_enable(unsigned int cnt)
335{
336 lockdep_assert_irqs_disabled();
337
338 if (preempt_count() == cnt)
339 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
340
341 if (softirq_count() == (cnt & SOFTIRQ_MASK))
342 lockdep_softirqs_on(_RET_IP_);
343
344 __preempt_count_sub(cnt);
345}
346
347/*
348 * Special-case - softirqs can safely be enabled by __do_softirq(),
349 * without processing still-pending softirqs:
350 */
351void _local_bh_enable(void)
352{
353 WARN_ON_ONCE(in_hardirq());
354 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
355}
356EXPORT_SYMBOL(_local_bh_enable);
357
358void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
359{
360 WARN_ON_ONCE(in_hardirq());
361 lockdep_assert_irqs_enabled();
362#ifdef CONFIG_TRACE_IRQFLAGS
363 local_irq_disable();
364#endif
365 /*
366 * Are softirqs going to be turned on now:
367 */
368 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
369 lockdep_softirqs_on(ip);
370 /*
371 * Keep preemption disabled until we are done with
372 * softirq processing:
373 */
374 __preempt_count_sub(cnt - 1);
375
376 if (unlikely(!in_interrupt() && local_softirq_pending())) {
377 /*
378 * Run softirq if any pending. And do it in its own stack
379 * as we may be calling this deep in a task call stack already.
380 */
381 do_softirq();
382 }
383
384 preempt_count_dec();
385#ifdef CONFIG_TRACE_IRQFLAGS
386 local_irq_enable();
387#endif
388 preempt_check_resched();
389}
390EXPORT_SYMBOL(__local_bh_enable_ip);
391
392static inline void softirq_handle_begin(void)
393{
394 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
395}
396
397static inline void softirq_handle_end(void)
398{
399 __local_bh_enable(SOFTIRQ_OFFSET);
400 WARN_ON_ONCE(in_interrupt());
401}
402
403static inline void ksoftirqd_run_begin(void)
404{
405 local_irq_disable();
406}
407
408static inline void ksoftirqd_run_end(void)
409{
410 local_irq_enable();
411}
412
413static inline bool should_wake_ksoftirqd(void)
414{
415 return true;
416}
417
418static inline void invoke_softirq(void)
419{
420 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
421#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
422 /*
423 * We can safely execute softirq on the current stack if
424 * it is the irq stack, because it should be near empty
425 * at this stage.
426 */
427 __do_softirq();
428#else
429 /*
430 * Otherwise, irq_exit() is called on the task stack that can
431 * be potentially deep already. So call softirq in its own stack
432 * to prevent from any overrun.
433 */
434 do_softirq_own_stack();
435#endif
436 } else {
437 wakeup_softirqd();
438 }
439}
440
441asmlinkage __visible void do_softirq(void)
442{
443 __u32 pending;
444 unsigned long flags;
445
446 if (in_interrupt())
447 return;
448
449 local_irq_save(flags);
450
451 pending = local_softirq_pending();
452
453 if (pending)
454 do_softirq_own_stack();
455
456 local_irq_restore(flags);
457}
458
459#endif /* !CONFIG_PREEMPT_RT */
460
461/*
462 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
463 * but break the loop if need_resched() is set or after 2 ms.
464 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
465 * certain cases, such as stop_machine(), jiffies may cease to
466 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
467 * well to make sure we eventually return from this method.
468 *
469 * These limits have been established via experimentation.
470 * The two things to balance is latency against fairness -
471 * we want to handle softirqs as soon as possible, but they
472 * should not be able to lock up the box.
473 */
474#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
475#define MAX_SOFTIRQ_RESTART 10
476
477#ifdef CONFIG_TRACE_IRQFLAGS
478/*
479 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
480 * to keep the lockdep irq context tracking as tight as possible in order to
481 * not miss-qualify lock contexts and miss possible deadlocks.
482 */
483
484static inline bool lockdep_softirq_start(void)
485{
486 bool in_hardirq = false;
487
488 if (lockdep_hardirq_context()) {
489 in_hardirq = true;
490 lockdep_hardirq_exit();
491 }
492
493 lockdep_softirq_enter();
494
495 return in_hardirq;
496}
497
498static inline void lockdep_softirq_end(bool in_hardirq)
499{
500 lockdep_softirq_exit();
501
502 if (in_hardirq)
503 lockdep_hardirq_enter();
504}
505#else
506static inline bool lockdep_softirq_start(void) { return false; }
507static inline void lockdep_softirq_end(bool in_hardirq) { }
508#endif
509
510asmlinkage __visible void __softirq_entry __do_softirq(void)
511{
512 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
513 unsigned long old_flags = current->flags;
514 int max_restart = MAX_SOFTIRQ_RESTART;
515 struct softirq_action *h;
516 bool in_hardirq;
517 __u32 pending;
518 int softirq_bit;
519
520 /*
521 * Mask out PF_MEMALLOC as the current task context is borrowed for the
522 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
523 * again if the socket is related to swapping.
524 */
525 current->flags &= ~PF_MEMALLOC;
526
527 pending = local_softirq_pending();
528
529 softirq_handle_begin();
530 in_hardirq = lockdep_softirq_start();
531 account_softirq_enter(current);
532
533restart:
534 /* Reset the pending bitmask before enabling irqs */
535 set_softirq_pending(0);
536
537 local_irq_enable();
538
539 h = softirq_vec;
540
541 while ((softirq_bit = ffs(pending))) {
542 unsigned int vec_nr;
543 int prev_count;
544
545 h += softirq_bit - 1;
546
547 vec_nr = h - softirq_vec;
548 prev_count = preempt_count();
549
550 kstat_incr_softirqs_this_cpu(vec_nr);
551
552 trace_softirq_entry(vec_nr);
553 h->action(h);
554 trace_softirq_exit(vec_nr);
555 if (unlikely(prev_count != preempt_count())) {
556 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
557 vec_nr, softirq_to_name[vec_nr], h->action,
558 prev_count, preempt_count());
559 preempt_count_set(prev_count);
560 }
561 h++;
562 pending >>= softirq_bit;
563 }
564
565 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
566 __this_cpu_read(ksoftirqd) == current)
567 rcu_softirq_qs();
568
569 local_irq_disable();
570
571 pending = local_softirq_pending();
572 if (pending) {
573 if (time_before(jiffies, end) && !need_resched() &&
574 --max_restart)
575 goto restart;
576
577 wakeup_softirqd();
578 }
579
580 account_softirq_exit(current);
581 lockdep_softirq_end(in_hardirq);
582 softirq_handle_end();
583 current_restore_flags(old_flags, PF_MEMALLOC);
584}
585
586/**
587 * irq_enter_rcu - Enter an interrupt context with RCU watching
588 */
589void irq_enter_rcu(void)
590{
591 __irq_enter_raw();
592
593 if (tick_nohz_full_cpu(smp_processor_id()) ||
594 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
595 tick_irq_enter();
596
597 account_hardirq_enter(current);
598}
599
600/**
601 * irq_enter - Enter an interrupt context including RCU update
602 */
603void irq_enter(void)
604{
605 ct_irq_enter();
606 irq_enter_rcu();
607}
608
609static inline void tick_irq_exit(void)
610{
611#ifdef CONFIG_NO_HZ_COMMON
612 int cpu = smp_processor_id();
613
614 /* Make sure that timer wheel updates are propagated */
615 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
616 if (!in_hardirq())
617 tick_nohz_irq_exit();
618 }
619#endif
620}
621
622static inline void __irq_exit_rcu(void)
623{
624#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
625 local_irq_disable();
626#else
627 lockdep_assert_irqs_disabled();
628#endif
629 account_hardirq_exit(current);
630 preempt_count_sub(HARDIRQ_OFFSET);
631 if (!in_interrupt() && local_softirq_pending())
632 invoke_softirq();
633
634 tick_irq_exit();
635}
636
637/**
638 * irq_exit_rcu() - Exit an interrupt context without updating RCU
639 *
640 * Also processes softirqs if needed and possible.
641 */
642void irq_exit_rcu(void)
643{
644 __irq_exit_rcu();
645 /* must be last! */
646 lockdep_hardirq_exit();
647}
648
649/**
650 * irq_exit - Exit an interrupt context, update RCU and lockdep
651 *
652 * Also processes softirqs if needed and possible.
653 */
654void irq_exit(void)
655{
656 __irq_exit_rcu();
657 ct_irq_exit();
658 /* must be last! */
659 lockdep_hardirq_exit();
660}
661
662/*
663 * This function must run with irqs disabled!
664 */
665inline void raise_softirq_irqoff(unsigned int nr)
666{
667 __raise_softirq_irqoff(nr);
668
669 /*
670 * If we're in an interrupt or softirq, we're done
671 * (this also catches softirq-disabled code). We will
672 * actually run the softirq once we return from
673 * the irq or softirq.
674 *
675 * Otherwise we wake up ksoftirqd to make sure we
676 * schedule the softirq soon.
677 */
678 if (!in_interrupt() && should_wake_ksoftirqd())
679 wakeup_softirqd();
680}
681
682void raise_softirq(unsigned int nr)
683{
684 unsigned long flags;
685
686 local_irq_save(flags);
687 raise_softirq_irqoff(nr);
688 local_irq_restore(flags);
689}
690
691void __raise_softirq_irqoff(unsigned int nr)
692{
693 lockdep_assert_irqs_disabled();
694 trace_softirq_raise(nr);
695 or_softirq_pending(1UL << nr);
696}
697
698void open_softirq(int nr, void (*action)(struct softirq_action *))
699{
700 softirq_vec[nr].action = action;
701}
702
703/*
704 * Tasklets
705 */
706struct tasklet_head {
707 struct tasklet_struct *head;
708 struct tasklet_struct **tail;
709};
710
711static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
712static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
713
714static void __tasklet_schedule_common(struct tasklet_struct *t,
715 struct tasklet_head __percpu *headp,
716 unsigned int softirq_nr)
717{
718 struct tasklet_head *head;
719 unsigned long flags;
720
721 local_irq_save(flags);
722 head = this_cpu_ptr(headp);
723 t->next = NULL;
724 *head->tail = t;
725 head->tail = &(t->next);
726 raise_softirq_irqoff(softirq_nr);
727 local_irq_restore(flags);
728}
729
730void __tasklet_schedule(struct tasklet_struct *t)
731{
732 __tasklet_schedule_common(t, &tasklet_vec,
733 TASKLET_SOFTIRQ);
734}
735EXPORT_SYMBOL(__tasklet_schedule);
736
737void __tasklet_hi_schedule(struct tasklet_struct *t)
738{
739 __tasklet_schedule_common(t, &tasklet_hi_vec,
740 HI_SOFTIRQ);
741}
742EXPORT_SYMBOL(__tasklet_hi_schedule);
743
744static bool tasklet_clear_sched(struct tasklet_struct *t)
745{
746 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
747 wake_up_var(&t->state);
748 return true;
749 }
750
751 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
752 t->use_callback ? "callback" : "func",
753 t->use_callback ? (void *)t->callback : (void *)t->func);
754
755 return false;
756}
757
758static void tasklet_action_common(struct softirq_action *a,
759 struct tasklet_head *tl_head,
760 unsigned int softirq_nr)
761{
762 struct tasklet_struct *list;
763
764 local_irq_disable();
765 list = tl_head->head;
766 tl_head->head = NULL;
767 tl_head->tail = &tl_head->head;
768 local_irq_enable();
769
770 while (list) {
771 struct tasklet_struct *t = list;
772
773 list = list->next;
774
775 if (tasklet_trylock(t)) {
776 if (!atomic_read(&t->count)) {
777 if (tasklet_clear_sched(t)) {
778 if (t->use_callback) {
779 trace_tasklet_entry(t, t->callback);
780 t->callback(t);
781 trace_tasklet_exit(t, t->callback);
782 } else {
783 trace_tasklet_entry(t, t->func);
784 t->func(t->data);
785 trace_tasklet_exit(t, t->func);
786 }
787 }
788 tasklet_unlock(t);
789 continue;
790 }
791 tasklet_unlock(t);
792 }
793
794 local_irq_disable();
795 t->next = NULL;
796 *tl_head->tail = t;
797 tl_head->tail = &t->next;
798 __raise_softirq_irqoff(softirq_nr);
799 local_irq_enable();
800 }
801}
802
803static __latent_entropy void tasklet_action(struct softirq_action *a)
804{
805 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
806}
807
808static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
809{
810 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
811}
812
813void tasklet_setup(struct tasklet_struct *t,
814 void (*callback)(struct tasklet_struct *))
815{
816 t->next = NULL;
817 t->state = 0;
818 atomic_set(&t->count, 0);
819 t->callback = callback;
820 t->use_callback = true;
821 t->data = 0;
822}
823EXPORT_SYMBOL(tasklet_setup);
824
825void tasklet_init(struct tasklet_struct *t,
826 void (*func)(unsigned long), unsigned long data)
827{
828 t->next = NULL;
829 t->state = 0;
830 atomic_set(&t->count, 0);
831 t->func = func;
832 t->use_callback = false;
833 t->data = data;
834}
835EXPORT_SYMBOL(tasklet_init);
836
837#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
838/*
839 * Do not use in new code. Waiting for tasklets from atomic contexts is
840 * error prone and should be avoided.
841 */
842void tasklet_unlock_spin_wait(struct tasklet_struct *t)
843{
844 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
845 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
846 /*
847 * Prevent a live lock when current preempted soft
848 * interrupt processing or prevents ksoftirqd from
849 * running. If the tasklet runs on a different CPU
850 * then this has no effect other than doing the BH
851 * disable/enable dance for nothing.
852 */
853 local_bh_disable();
854 local_bh_enable();
855 } else {
856 cpu_relax();
857 }
858 }
859}
860EXPORT_SYMBOL(tasklet_unlock_spin_wait);
861#endif
862
863void tasklet_kill(struct tasklet_struct *t)
864{
865 if (in_interrupt())
866 pr_notice("Attempt to kill tasklet from interrupt\n");
867
868 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
869 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
870
871 tasklet_unlock_wait(t);
872 tasklet_clear_sched(t);
873}
874EXPORT_SYMBOL(tasklet_kill);
875
876#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
877void tasklet_unlock(struct tasklet_struct *t)
878{
879 smp_mb__before_atomic();
880 clear_bit(TASKLET_STATE_RUN, &t->state);
881 smp_mb__after_atomic();
882 wake_up_var(&t->state);
883}
884EXPORT_SYMBOL_GPL(tasklet_unlock);
885
886void tasklet_unlock_wait(struct tasklet_struct *t)
887{
888 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
889}
890EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
891#endif
892
893void __init softirq_init(void)
894{
895 int cpu;
896
897 for_each_possible_cpu(cpu) {
898 per_cpu(tasklet_vec, cpu).tail =
899 &per_cpu(tasklet_vec, cpu).head;
900 per_cpu(tasklet_hi_vec, cpu).tail =
901 &per_cpu(tasklet_hi_vec, cpu).head;
902 }
903
904 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
905 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
906}
907
908static int ksoftirqd_should_run(unsigned int cpu)
909{
910 return local_softirq_pending();
911}
912
913static void run_ksoftirqd(unsigned int cpu)
914{
915 ksoftirqd_run_begin();
916 if (local_softirq_pending()) {
917 /*
918 * We can safely run softirq on inline stack, as we are not deep
919 * in the task stack here.
920 */
921 __do_softirq();
922 ksoftirqd_run_end();
923 cond_resched();
924 return;
925 }
926 ksoftirqd_run_end();
927}
928
929#ifdef CONFIG_HOTPLUG_CPU
930static int takeover_tasklets(unsigned int cpu)
931{
932 /* CPU is dead, so no lock needed. */
933 local_irq_disable();
934
935 /* Find end, append list for that CPU. */
936 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
937 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
938 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
939 per_cpu(tasklet_vec, cpu).head = NULL;
940 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
941 }
942 raise_softirq_irqoff(TASKLET_SOFTIRQ);
943
944 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
945 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
946 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
947 per_cpu(tasklet_hi_vec, cpu).head = NULL;
948 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
949 }
950 raise_softirq_irqoff(HI_SOFTIRQ);
951
952 local_irq_enable();
953 return 0;
954}
955#else
956#define takeover_tasklets NULL
957#endif /* CONFIG_HOTPLUG_CPU */
958
959static struct smp_hotplug_thread softirq_threads = {
960 .store = &ksoftirqd,
961 .thread_should_run = ksoftirqd_should_run,
962 .thread_fn = run_ksoftirqd,
963 .thread_comm = "ksoftirqd/%u",
964};
965
966static __init int spawn_ksoftirqd(void)
967{
968 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
969 takeover_tasklets);
970 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
971
972 return 0;
973}
974early_initcall(spawn_ksoftirqd);
975
976/*
977 * [ These __weak aliases are kept in a separate compilation unit, so that
978 * GCC does not inline them incorrectly. ]
979 */
980
981int __init __weak early_irq_init(void)
982{
983 return 0;
984}
985
986int __init __weak arch_probe_nr_irqs(void)
987{
988 return NR_IRQS_LEGACY;
989}
990
991int __init __weak arch_early_irq_init(void)
992{
993 return 0;
994}
995
996unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
997{
998 return from;
999}