Loading...
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/irq.h>
32
33/*
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
36 by its own spinlocks.
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
40 or will not.
41
42 Examples:
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
49 */
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59
60const char * const softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63};
64
65/*
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
70 */
71static void wakeup_softirqd(void)
72{
73 /* Interrupts are disabled: no need to stop preemption */
74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
80/*
81 * preempt_count and SOFTIRQ_OFFSET usage:
82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * softirq processing.
84 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
85 * on local_bh_disable or local_bh_enable.
86 * This lets us distinguish between whether we are currently processing
87 * softirq and whether we just have bh disabled.
88 */
89
90/*
91 * This one is for softirq.c-internal use,
92 * where hardirqs are disabled legitimately:
93 */
94#ifdef CONFIG_TRACE_IRQFLAGS
95void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
96{
97 unsigned long flags;
98
99 WARN_ON_ONCE(in_irq());
100
101 raw_local_irq_save(flags);
102 /*
103 * The preempt tracer hooks into preempt_count_add and will break
104 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
105 * is set and before current->softirq_enabled is cleared.
106 * We must manually increment preempt_count here and manually
107 * call the trace_preempt_off later.
108 */
109 __preempt_count_add(cnt);
110 /*
111 * Were softirqs turned off above:
112 */
113 if (softirq_count() == (cnt & SOFTIRQ_MASK))
114 trace_softirqs_off(ip);
115 raw_local_irq_restore(flags);
116
117 if (preempt_count() == cnt) {
118#ifdef CONFIG_DEBUG_PREEMPT
119 current->preempt_disable_ip = get_lock_parent_ip();
120#endif
121 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
122 }
123}
124EXPORT_SYMBOL(__local_bh_disable_ip);
125#endif /* CONFIG_TRACE_IRQFLAGS */
126
127static void __local_bh_enable(unsigned int cnt)
128{
129 WARN_ON_ONCE(!irqs_disabled());
130
131 if (softirq_count() == (cnt & SOFTIRQ_MASK))
132 trace_softirqs_on(_RET_IP_);
133 preempt_count_sub(cnt);
134}
135
136/*
137 * Special-case - softirqs can safely be enabled in
138 * cond_resched_softirq(), or by __do_softirq(),
139 * without processing still-pending softirqs:
140 */
141void _local_bh_enable(void)
142{
143 WARN_ON_ONCE(in_irq());
144 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
145}
146EXPORT_SYMBOL(_local_bh_enable);
147
148void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
149{
150 WARN_ON_ONCE(in_irq() || irqs_disabled());
151#ifdef CONFIG_TRACE_IRQFLAGS
152 local_irq_disable();
153#endif
154 /*
155 * Are softirqs going to be turned on now:
156 */
157 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
158 trace_softirqs_on(ip);
159 /*
160 * Keep preemption disabled until we are done with
161 * softirq processing:
162 */
163 preempt_count_sub(cnt - 1);
164
165 if (unlikely(!in_interrupt() && local_softirq_pending())) {
166 /*
167 * Run softirq if any pending. And do it in its own stack
168 * as we may be calling this deep in a task call stack already.
169 */
170 do_softirq();
171 }
172
173 preempt_count_dec();
174#ifdef CONFIG_TRACE_IRQFLAGS
175 local_irq_enable();
176#endif
177 preempt_check_resched();
178}
179EXPORT_SYMBOL(__local_bh_enable_ip);
180
181/*
182 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
183 * but break the loop if need_resched() is set or after 2 ms.
184 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
185 * certain cases, such as stop_machine(), jiffies may cease to
186 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
187 * well to make sure we eventually return from this method.
188 *
189 * These limits have been established via experimentation.
190 * The two things to balance is latency against fairness -
191 * we want to handle softirqs as soon as possible, but they
192 * should not be able to lock up the box.
193 */
194#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
195#define MAX_SOFTIRQ_RESTART 10
196
197#ifdef CONFIG_TRACE_IRQFLAGS
198/*
199 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
200 * to keep the lockdep irq context tracking as tight as possible in order to
201 * not miss-qualify lock contexts and miss possible deadlocks.
202 */
203
204static inline bool lockdep_softirq_start(void)
205{
206 bool in_hardirq = false;
207
208 if (trace_hardirq_context(current)) {
209 in_hardirq = true;
210 trace_hardirq_exit();
211 }
212
213 lockdep_softirq_enter();
214
215 return in_hardirq;
216}
217
218static inline void lockdep_softirq_end(bool in_hardirq)
219{
220 lockdep_softirq_exit();
221
222 if (in_hardirq)
223 trace_hardirq_enter();
224}
225#else
226static inline bool lockdep_softirq_start(void) { return false; }
227static inline void lockdep_softirq_end(bool in_hardirq) { }
228#endif
229
230asmlinkage __visible void __softirq_entry __do_softirq(void)
231{
232 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
233 unsigned long old_flags = current->flags;
234 int max_restart = MAX_SOFTIRQ_RESTART;
235 struct softirq_action *h;
236 bool in_hardirq;
237 __u32 pending;
238 int softirq_bit;
239
240 /*
241 * Mask out PF_MEMALLOC s current task context is borrowed for the
242 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
243 * again if the socket is related to swap
244 */
245 current->flags &= ~PF_MEMALLOC;
246
247 pending = local_softirq_pending();
248 account_irq_enter_time(current);
249
250 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
251 in_hardirq = lockdep_softirq_start();
252
253restart:
254 /* Reset the pending bitmask before enabling irqs */
255 set_softirq_pending(0);
256
257 local_irq_enable();
258
259 h = softirq_vec;
260
261 while ((softirq_bit = ffs(pending))) {
262 unsigned int vec_nr;
263 int prev_count;
264
265 h += softirq_bit - 1;
266
267 vec_nr = h - softirq_vec;
268 prev_count = preempt_count();
269
270 kstat_incr_softirqs_this_cpu(vec_nr);
271
272 trace_softirq_entry(vec_nr);
273 h->action(h);
274 trace_softirq_exit(vec_nr);
275 if (unlikely(prev_count != preempt_count())) {
276 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
277 vec_nr, softirq_to_name[vec_nr], h->action,
278 prev_count, preempt_count());
279 preempt_count_set(prev_count);
280 }
281 h++;
282 pending >>= softirq_bit;
283 }
284
285 rcu_bh_qs();
286 local_irq_disable();
287
288 pending = local_softirq_pending();
289 if (pending) {
290 if (time_before(jiffies, end) && !need_resched() &&
291 --max_restart)
292 goto restart;
293
294 wakeup_softirqd();
295 }
296
297 lockdep_softirq_end(in_hardirq);
298 account_irq_exit_time(current);
299 __local_bh_enable(SOFTIRQ_OFFSET);
300 WARN_ON_ONCE(in_interrupt());
301 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
302}
303
304asmlinkage __visible void do_softirq(void)
305{
306 __u32 pending;
307 unsigned long flags;
308
309 if (in_interrupt())
310 return;
311
312 local_irq_save(flags);
313
314 pending = local_softirq_pending();
315
316 if (pending)
317 do_softirq_own_stack();
318
319 local_irq_restore(flags);
320}
321
322/*
323 * Enter an interrupt context.
324 */
325void irq_enter(void)
326{
327 rcu_irq_enter();
328 if (is_idle_task(current) && !in_interrupt()) {
329 /*
330 * Prevent raise_softirq from needlessly waking up ksoftirqd
331 * here, as softirq will be serviced on return from interrupt.
332 */
333 local_bh_disable();
334 tick_irq_enter();
335 _local_bh_enable();
336 }
337
338 __irq_enter();
339}
340
341static inline void invoke_softirq(void)
342{
343 if (!force_irqthreads) {
344#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
345 /*
346 * We can safely execute softirq on the current stack if
347 * it is the irq stack, because it should be near empty
348 * at this stage.
349 */
350 __do_softirq();
351#else
352 /*
353 * Otherwise, irq_exit() is called on the task stack that can
354 * be potentially deep already. So call softirq in its own stack
355 * to prevent from any overrun.
356 */
357 do_softirq_own_stack();
358#endif
359 } else {
360 wakeup_softirqd();
361 }
362}
363
364static inline void tick_irq_exit(void)
365{
366#ifdef CONFIG_NO_HZ_COMMON
367 int cpu = smp_processor_id();
368
369 /* Make sure that timer wheel updates are propagated */
370 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
371 if (!in_interrupt())
372 tick_nohz_irq_exit();
373 }
374#endif
375}
376
377/*
378 * Exit an interrupt context. Process softirqs if needed and possible:
379 */
380void irq_exit(void)
381{
382#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
383 local_irq_disable();
384#else
385 WARN_ON_ONCE(!irqs_disabled());
386#endif
387
388 account_irq_exit_time(current);
389 preempt_count_sub(HARDIRQ_OFFSET);
390 if (!in_interrupt() && local_softirq_pending())
391 invoke_softirq();
392
393 tick_irq_exit();
394 rcu_irq_exit();
395 trace_hardirq_exit(); /* must be last! */
396}
397
398/*
399 * This function must run with irqs disabled!
400 */
401inline void raise_softirq_irqoff(unsigned int nr)
402{
403 __raise_softirq_irqoff(nr);
404
405 /*
406 * If we're in an interrupt or softirq, we're done
407 * (this also catches softirq-disabled code). We will
408 * actually run the softirq once we return from
409 * the irq or softirq.
410 *
411 * Otherwise we wake up ksoftirqd to make sure we
412 * schedule the softirq soon.
413 */
414 if (!in_interrupt())
415 wakeup_softirqd();
416}
417
418void raise_softirq(unsigned int nr)
419{
420 unsigned long flags;
421
422 local_irq_save(flags);
423 raise_softirq_irqoff(nr);
424 local_irq_restore(flags);
425}
426
427void __raise_softirq_irqoff(unsigned int nr)
428{
429 trace_softirq_raise(nr);
430 or_softirq_pending(1UL << nr);
431}
432
433void open_softirq(int nr, void (*action)(struct softirq_action *))
434{
435 softirq_vec[nr].action = action;
436}
437
438/*
439 * Tasklets
440 */
441struct tasklet_head {
442 struct tasklet_struct *head;
443 struct tasklet_struct **tail;
444};
445
446static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
447static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
448
449void __tasklet_schedule(struct tasklet_struct *t)
450{
451 unsigned long flags;
452
453 local_irq_save(flags);
454 t->next = NULL;
455 *__this_cpu_read(tasklet_vec.tail) = t;
456 __this_cpu_write(tasklet_vec.tail, &(t->next));
457 raise_softirq_irqoff(TASKLET_SOFTIRQ);
458 local_irq_restore(flags);
459}
460EXPORT_SYMBOL(__tasklet_schedule);
461
462void __tasklet_hi_schedule(struct tasklet_struct *t)
463{
464 unsigned long flags;
465
466 local_irq_save(flags);
467 t->next = NULL;
468 *__this_cpu_read(tasklet_hi_vec.tail) = t;
469 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
470 raise_softirq_irqoff(HI_SOFTIRQ);
471 local_irq_restore(flags);
472}
473EXPORT_SYMBOL(__tasklet_hi_schedule);
474
475void __tasklet_hi_schedule_first(struct tasklet_struct *t)
476{
477 BUG_ON(!irqs_disabled());
478
479 t->next = __this_cpu_read(tasklet_hi_vec.head);
480 __this_cpu_write(tasklet_hi_vec.head, t);
481 __raise_softirq_irqoff(HI_SOFTIRQ);
482}
483EXPORT_SYMBOL(__tasklet_hi_schedule_first);
484
485static void tasklet_action(struct softirq_action *a)
486{
487 struct tasklet_struct *list;
488
489 local_irq_disable();
490 list = __this_cpu_read(tasklet_vec.head);
491 __this_cpu_write(tasklet_vec.head, NULL);
492 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
493 local_irq_enable();
494
495 while (list) {
496 struct tasklet_struct *t = list;
497
498 list = list->next;
499
500 if (tasklet_trylock(t)) {
501 if (!atomic_read(&t->count)) {
502 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
503 &t->state))
504 BUG();
505 t->func(t->data);
506 tasklet_unlock(t);
507 continue;
508 }
509 tasklet_unlock(t);
510 }
511
512 local_irq_disable();
513 t->next = NULL;
514 *__this_cpu_read(tasklet_vec.tail) = t;
515 __this_cpu_write(tasklet_vec.tail, &(t->next));
516 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
517 local_irq_enable();
518 }
519}
520
521static void tasklet_hi_action(struct softirq_action *a)
522{
523 struct tasklet_struct *list;
524
525 local_irq_disable();
526 list = __this_cpu_read(tasklet_hi_vec.head);
527 __this_cpu_write(tasklet_hi_vec.head, NULL);
528 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
529 local_irq_enable();
530
531 while (list) {
532 struct tasklet_struct *t = list;
533
534 list = list->next;
535
536 if (tasklet_trylock(t)) {
537 if (!atomic_read(&t->count)) {
538 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
539 &t->state))
540 BUG();
541 t->func(t->data);
542 tasklet_unlock(t);
543 continue;
544 }
545 tasklet_unlock(t);
546 }
547
548 local_irq_disable();
549 t->next = NULL;
550 *__this_cpu_read(tasklet_hi_vec.tail) = t;
551 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
552 __raise_softirq_irqoff(HI_SOFTIRQ);
553 local_irq_enable();
554 }
555}
556
557void tasklet_init(struct tasklet_struct *t,
558 void (*func)(unsigned long), unsigned long data)
559{
560 t->next = NULL;
561 t->state = 0;
562 atomic_set(&t->count, 0);
563 t->func = func;
564 t->data = data;
565}
566EXPORT_SYMBOL(tasklet_init);
567
568void tasklet_kill(struct tasklet_struct *t)
569{
570 if (in_interrupt())
571 pr_notice("Attempt to kill tasklet from interrupt\n");
572
573 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
574 do {
575 yield();
576 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
577 }
578 tasklet_unlock_wait(t);
579 clear_bit(TASKLET_STATE_SCHED, &t->state);
580}
581EXPORT_SYMBOL(tasklet_kill);
582
583/*
584 * tasklet_hrtimer
585 */
586
587/*
588 * The trampoline is called when the hrtimer expires. It schedules a tasklet
589 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
590 * hrtimer callback, but from softirq context.
591 */
592static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
593{
594 struct tasklet_hrtimer *ttimer =
595 container_of(timer, struct tasklet_hrtimer, timer);
596
597 tasklet_hi_schedule(&ttimer->tasklet);
598 return HRTIMER_NORESTART;
599}
600
601/*
602 * Helper function which calls the hrtimer callback from
603 * tasklet/softirq context
604 */
605static void __tasklet_hrtimer_trampoline(unsigned long data)
606{
607 struct tasklet_hrtimer *ttimer = (void *)data;
608 enum hrtimer_restart restart;
609
610 restart = ttimer->function(&ttimer->timer);
611 if (restart != HRTIMER_NORESTART)
612 hrtimer_restart(&ttimer->timer);
613}
614
615/**
616 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
617 * @ttimer: tasklet_hrtimer which is initialized
618 * @function: hrtimer callback function which gets called from softirq context
619 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
620 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
621 */
622void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
623 enum hrtimer_restart (*function)(struct hrtimer *),
624 clockid_t which_clock, enum hrtimer_mode mode)
625{
626 hrtimer_init(&ttimer->timer, which_clock, mode);
627 ttimer->timer.function = __hrtimer_tasklet_trampoline;
628 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
629 (unsigned long)ttimer);
630 ttimer->function = function;
631}
632EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
633
634void __init softirq_init(void)
635{
636 int cpu;
637
638 for_each_possible_cpu(cpu) {
639 per_cpu(tasklet_vec, cpu).tail =
640 &per_cpu(tasklet_vec, cpu).head;
641 per_cpu(tasklet_hi_vec, cpu).tail =
642 &per_cpu(tasklet_hi_vec, cpu).head;
643 }
644
645 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
646 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
647}
648
649static int ksoftirqd_should_run(unsigned int cpu)
650{
651 return local_softirq_pending();
652}
653
654static void run_ksoftirqd(unsigned int cpu)
655{
656 local_irq_disable();
657 if (local_softirq_pending()) {
658 /*
659 * We can safely run softirq on inline stack, as we are not deep
660 * in the task stack here.
661 */
662 __do_softirq();
663 local_irq_enable();
664 cond_resched_rcu_qs();
665 return;
666 }
667 local_irq_enable();
668}
669
670#ifdef CONFIG_HOTPLUG_CPU
671/*
672 * tasklet_kill_immediate is called to remove a tasklet which can already be
673 * scheduled for execution on @cpu.
674 *
675 * Unlike tasklet_kill, this function removes the tasklet
676 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
677 *
678 * When this function is called, @cpu must be in the CPU_DEAD state.
679 */
680void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
681{
682 struct tasklet_struct **i;
683
684 BUG_ON(cpu_online(cpu));
685 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
686
687 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
688 return;
689
690 /* CPU is dead, so no lock needed. */
691 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
692 if (*i == t) {
693 *i = t->next;
694 /* If this was the tail element, move the tail ptr */
695 if (*i == NULL)
696 per_cpu(tasklet_vec, cpu).tail = i;
697 return;
698 }
699 }
700 BUG();
701}
702
703static void takeover_tasklets(unsigned int cpu)
704{
705 /* CPU is dead, so no lock needed. */
706 local_irq_disable();
707
708 /* Find end, append list for that CPU. */
709 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
711 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
712 per_cpu(tasklet_vec, cpu).head = NULL;
713 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
714 }
715 raise_softirq_irqoff(TASKLET_SOFTIRQ);
716
717 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
718 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
719 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
720 per_cpu(tasklet_hi_vec, cpu).head = NULL;
721 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
722 }
723 raise_softirq_irqoff(HI_SOFTIRQ);
724
725 local_irq_enable();
726}
727#endif /* CONFIG_HOTPLUG_CPU */
728
729static int cpu_callback(struct notifier_block *nfb, unsigned long action,
730 void *hcpu)
731{
732 switch (action) {
733#ifdef CONFIG_HOTPLUG_CPU
734 case CPU_DEAD:
735 case CPU_DEAD_FROZEN:
736 takeover_tasklets((unsigned long)hcpu);
737 break;
738#endif /* CONFIG_HOTPLUG_CPU */
739 }
740 return NOTIFY_OK;
741}
742
743static struct notifier_block cpu_nfb = {
744 .notifier_call = cpu_callback
745};
746
747static struct smp_hotplug_thread softirq_threads = {
748 .store = &ksoftirqd,
749 .thread_should_run = ksoftirqd_should_run,
750 .thread_fn = run_ksoftirqd,
751 .thread_comm = "ksoftirqd/%u",
752};
753
754static __init int spawn_ksoftirqd(void)
755{
756 register_cpu_notifier(&cpu_nfb);
757
758 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
759
760 return 0;
761}
762early_initcall(spawn_ksoftirqd);
763
764/*
765 * [ These __weak aliases are kept in a separate compilation unit, so that
766 * GCC does not inline them incorrectly. ]
767 */
768
769int __init __weak early_irq_init(void)
770{
771 return 0;
772}
773
774int __init __weak arch_probe_nr_irqs(void)
775{
776 return NR_IRQS_LEGACY;
777}
778
779int __init __weak arch_early_irq_init(void)
780{
781 return 0;
782}
783
784unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
785{
786 return from;
787}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/local_lock.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29#include <linux/wait_bit.h>
30#include <linux/workqueue.h>
31
32#include <asm/softirq_stack.h>
33
34#define CREATE_TRACE_POINTS
35#include <trace/events/irq.h>
36
37/*
38 - No shared variables, all the data are CPU local.
39 - If a softirq needs serialization, let it serialize itself
40 by its own spinlocks.
41 - Even if softirq is serialized, only local cpu is marked for
42 execution. Hence, we get something sort of weak cpu binding.
43 Though it is still not clear, will it result in better locality
44 or will not.
45
46 Examples:
47 - NET RX softirq. It is multithreaded and does not require
48 any global serialization.
49 - NET TX softirq. It kicks software netdevice queues, hence
50 it is logically serialized per device, but this serialization
51 is invisible to common code.
52 - Tasklets: serialized wrt itself.
53 */
54
55#ifndef __ARCH_IRQ_STAT
56DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
57EXPORT_PER_CPU_SYMBOL(irq_stat);
58#endif
59
60static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
61
62DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
63
64const char * const softirq_to_name[NR_SOFTIRQS] = {
65 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
66 "TASKLET", "SCHED", "HRTIMER", "RCU"
67};
68
69/*
70 * we cannot loop indefinitely here to avoid userspace starvation,
71 * but we also don't want to introduce a worst case 1/HZ latency
72 * to the pending events, so lets the scheduler to balance
73 * the softirq load for us.
74 */
75static void wakeup_softirqd(void)
76{
77 /* Interrupts are disabled: no need to stop preemption */
78 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
79
80 if (tsk)
81 wake_up_process(tsk);
82}
83
84#ifdef CONFIG_TRACE_IRQFLAGS
85DEFINE_PER_CPU(int, hardirqs_enabled);
86DEFINE_PER_CPU(int, hardirq_context);
87EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
88EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
89#endif
90
91/*
92 * SOFTIRQ_OFFSET usage:
93 *
94 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
95 * to a per CPU counter and to task::softirqs_disabled_cnt.
96 *
97 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
98 * processing.
99 *
100 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
101 * on local_bh_disable or local_bh_enable.
102 *
103 * This lets us distinguish between whether we are currently processing
104 * softirq and whether we just have bh disabled.
105 */
106#ifdef CONFIG_PREEMPT_RT
107
108/*
109 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
110 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
111 * softirq disabled section to be preempted.
112 *
113 * The per task counter is used for softirq_count(), in_softirq() and
114 * in_serving_softirqs() because these counts are only valid when the task
115 * holding softirq_ctrl::lock is running.
116 *
117 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
118 * the task which is in a softirq disabled section is preempted or blocks.
119 */
120struct softirq_ctrl {
121 local_lock_t lock;
122 int cnt;
123};
124
125static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
126 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
127};
128
129/**
130 * local_bh_blocked() - Check for idle whether BH processing is blocked
131 *
132 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
133 *
134 * This is invoked from the idle task to guard against false positive
135 * softirq pending warnings, which would happen when the task which holds
136 * softirq_ctrl::lock was the only running task on the CPU and blocks on
137 * some other lock.
138 */
139bool local_bh_blocked(void)
140{
141 return __this_cpu_read(softirq_ctrl.cnt) != 0;
142}
143
144void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
145{
146 unsigned long flags;
147 int newcnt;
148
149 WARN_ON_ONCE(in_hardirq());
150
151 /* First entry of a task into a BH disabled section? */
152 if (!current->softirq_disable_cnt) {
153 if (preemptible()) {
154 local_lock(&softirq_ctrl.lock);
155 /* Required to meet the RCU bottomhalf requirements. */
156 rcu_read_lock();
157 } else {
158 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
159 }
160 }
161
162 /*
163 * Track the per CPU softirq disabled state. On RT this is per CPU
164 * state to allow preemption of bottom half disabled sections.
165 */
166 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
167 /*
168 * Reflect the result in the task state to prevent recursion on the
169 * local lock and to make softirq_count() & al work.
170 */
171 current->softirq_disable_cnt = newcnt;
172
173 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
174 raw_local_irq_save(flags);
175 lockdep_softirqs_off(ip);
176 raw_local_irq_restore(flags);
177 }
178}
179EXPORT_SYMBOL(__local_bh_disable_ip);
180
181static void __local_bh_enable(unsigned int cnt, bool unlock)
182{
183 unsigned long flags;
184 int newcnt;
185
186 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
187 this_cpu_read(softirq_ctrl.cnt));
188
189 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
190 raw_local_irq_save(flags);
191 lockdep_softirqs_on(_RET_IP_);
192 raw_local_irq_restore(flags);
193 }
194
195 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
196 current->softirq_disable_cnt = newcnt;
197
198 if (!newcnt && unlock) {
199 rcu_read_unlock();
200 local_unlock(&softirq_ctrl.lock);
201 }
202}
203
204void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
205{
206 bool preempt_on = preemptible();
207 unsigned long flags;
208 u32 pending;
209 int curcnt;
210
211 WARN_ON_ONCE(in_hardirq());
212 lockdep_assert_irqs_enabled();
213
214 local_irq_save(flags);
215 curcnt = __this_cpu_read(softirq_ctrl.cnt);
216
217 /*
218 * If this is not reenabling soft interrupts, no point in trying to
219 * run pending ones.
220 */
221 if (curcnt != cnt)
222 goto out;
223
224 pending = local_softirq_pending();
225 if (!pending)
226 goto out;
227
228 /*
229 * If this was called from non preemptible context, wake up the
230 * softirq daemon.
231 */
232 if (!preempt_on) {
233 wakeup_softirqd();
234 goto out;
235 }
236
237 /*
238 * Adjust softirq count to SOFTIRQ_OFFSET which makes
239 * in_serving_softirq() become true.
240 */
241 cnt = SOFTIRQ_OFFSET;
242 __local_bh_enable(cnt, false);
243 __do_softirq();
244
245out:
246 __local_bh_enable(cnt, preempt_on);
247 local_irq_restore(flags);
248}
249EXPORT_SYMBOL(__local_bh_enable_ip);
250
251/*
252 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
253 * to acquire the per CPU local lock for reentrancy protection.
254 */
255static inline void ksoftirqd_run_begin(void)
256{
257 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
258 local_irq_disable();
259}
260
261/* Counterpart to ksoftirqd_run_begin() */
262static inline void ksoftirqd_run_end(void)
263{
264 __local_bh_enable(SOFTIRQ_OFFSET, true);
265 WARN_ON_ONCE(in_interrupt());
266 local_irq_enable();
267}
268
269static inline void softirq_handle_begin(void) { }
270static inline void softirq_handle_end(void) { }
271
272static inline bool should_wake_ksoftirqd(void)
273{
274 return !this_cpu_read(softirq_ctrl.cnt);
275}
276
277static inline void invoke_softirq(void)
278{
279 if (should_wake_ksoftirqd())
280 wakeup_softirqd();
281}
282
283/*
284 * flush_smp_call_function_queue() can raise a soft interrupt in a function
285 * call. On RT kernels this is undesired and the only known functionality
286 * in the block layer which does this is disabled on RT. If soft interrupts
287 * get raised which haven't been raised before the flush, warn so it can be
288 * investigated.
289 */
290void do_softirq_post_smp_call_flush(unsigned int was_pending)
291{
292 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
293 invoke_softirq();
294}
295
296#else /* CONFIG_PREEMPT_RT */
297
298/*
299 * This one is for softirq.c-internal use, where hardirqs are disabled
300 * legitimately:
301 */
302#ifdef CONFIG_TRACE_IRQFLAGS
303void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
304{
305 unsigned long flags;
306
307 WARN_ON_ONCE(in_hardirq());
308
309 raw_local_irq_save(flags);
310 /*
311 * The preempt tracer hooks into preempt_count_add and will break
312 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
313 * is set and before current->softirq_enabled is cleared.
314 * We must manually increment preempt_count here and manually
315 * call the trace_preempt_off later.
316 */
317 __preempt_count_add(cnt);
318 /*
319 * Were softirqs turned off above:
320 */
321 if (softirq_count() == (cnt & SOFTIRQ_MASK))
322 lockdep_softirqs_off(ip);
323 raw_local_irq_restore(flags);
324
325 if (preempt_count() == cnt) {
326#ifdef CONFIG_DEBUG_PREEMPT
327 current->preempt_disable_ip = get_lock_parent_ip();
328#endif
329 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
330 }
331}
332EXPORT_SYMBOL(__local_bh_disable_ip);
333#endif /* CONFIG_TRACE_IRQFLAGS */
334
335static void __local_bh_enable(unsigned int cnt)
336{
337 lockdep_assert_irqs_disabled();
338
339 if (preempt_count() == cnt)
340 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
341
342 if (softirq_count() == (cnt & SOFTIRQ_MASK))
343 lockdep_softirqs_on(_RET_IP_);
344
345 __preempt_count_sub(cnt);
346}
347
348/*
349 * Special-case - softirqs can safely be enabled by __do_softirq(),
350 * without processing still-pending softirqs:
351 */
352void _local_bh_enable(void)
353{
354 WARN_ON_ONCE(in_hardirq());
355 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
356}
357EXPORT_SYMBOL(_local_bh_enable);
358
359void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
360{
361 WARN_ON_ONCE(in_hardirq());
362 lockdep_assert_irqs_enabled();
363#ifdef CONFIG_TRACE_IRQFLAGS
364 local_irq_disable();
365#endif
366 /*
367 * Are softirqs going to be turned on now:
368 */
369 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
370 lockdep_softirqs_on(ip);
371 /*
372 * Keep preemption disabled until we are done with
373 * softirq processing:
374 */
375 __preempt_count_sub(cnt - 1);
376
377 if (unlikely(!in_interrupt() && local_softirq_pending())) {
378 /*
379 * Run softirq if any pending. And do it in its own stack
380 * as we may be calling this deep in a task call stack already.
381 */
382 do_softirq();
383 }
384
385 preempt_count_dec();
386#ifdef CONFIG_TRACE_IRQFLAGS
387 local_irq_enable();
388#endif
389 preempt_check_resched();
390}
391EXPORT_SYMBOL(__local_bh_enable_ip);
392
393static inline void softirq_handle_begin(void)
394{
395 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
396}
397
398static inline void softirq_handle_end(void)
399{
400 __local_bh_enable(SOFTIRQ_OFFSET);
401 WARN_ON_ONCE(in_interrupt());
402}
403
404static inline void ksoftirqd_run_begin(void)
405{
406 local_irq_disable();
407}
408
409static inline void ksoftirqd_run_end(void)
410{
411 local_irq_enable();
412}
413
414static inline bool should_wake_ksoftirqd(void)
415{
416 return true;
417}
418
419static inline void invoke_softirq(void)
420{
421 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
422#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
423 /*
424 * We can safely execute softirq on the current stack if
425 * it is the irq stack, because it should be near empty
426 * at this stage.
427 */
428 __do_softirq();
429#else
430 /*
431 * Otherwise, irq_exit() is called on the task stack that can
432 * be potentially deep already. So call softirq in its own stack
433 * to prevent from any overrun.
434 */
435 do_softirq_own_stack();
436#endif
437 } else {
438 wakeup_softirqd();
439 }
440}
441
442asmlinkage __visible void do_softirq(void)
443{
444 __u32 pending;
445 unsigned long flags;
446
447 if (in_interrupt())
448 return;
449
450 local_irq_save(flags);
451
452 pending = local_softirq_pending();
453
454 if (pending)
455 do_softirq_own_stack();
456
457 local_irq_restore(flags);
458}
459
460#endif /* !CONFIG_PREEMPT_RT */
461
462/*
463 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
464 * but break the loop if need_resched() is set or after 2 ms.
465 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
466 * certain cases, such as stop_machine(), jiffies may cease to
467 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
468 * well to make sure we eventually return from this method.
469 *
470 * These limits have been established via experimentation.
471 * The two things to balance is latency against fairness -
472 * we want to handle softirqs as soon as possible, but they
473 * should not be able to lock up the box.
474 */
475#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
476#define MAX_SOFTIRQ_RESTART 10
477
478#ifdef CONFIG_TRACE_IRQFLAGS
479/*
480 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
481 * to keep the lockdep irq context tracking as tight as possible in order to
482 * not miss-qualify lock contexts and miss possible deadlocks.
483 */
484
485static inline bool lockdep_softirq_start(void)
486{
487 bool in_hardirq = false;
488
489 if (lockdep_hardirq_context()) {
490 in_hardirq = true;
491 lockdep_hardirq_exit();
492 }
493
494 lockdep_softirq_enter();
495
496 return in_hardirq;
497}
498
499static inline void lockdep_softirq_end(bool in_hardirq)
500{
501 lockdep_softirq_exit();
502
503 if (in_hardirq)
504 lockdep_hardirq_enter();
505}
506#else
507static inline bool lockdep_softirq_start(void) { return false; }
508static inline void lockdep_softirq_end(bool in_hardirq) { }
509#endif
510
511static void handle_softirqs(bool ksirqd)
512{
513 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
514 unsigned long old_flags = current->flags;
515 int max_restart = MAX_SOFTIRQ_RESTART;
516 struct softirq_action *h;
517 bool in_hardirq;
518 __u32 pending;
519 int softirq_bit;
520
521 /*
522 * Mask out PF_MEMALLOC as the current task context is borrowed for the
523 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
524 * again if the socket is related to swapping.
525 */
526 current->flags &= ~PF_MEMALLOC;
527
528 pending = local_softirq_pending();
529
530 softirq_handle_begin();
531 in_hardirq = lockdep_softirq_start();
532 account_softirq_enter(current);
533
534restart:
535 /* Reset the pending bitmask before enabling irqs */
536 set_softirq_pending(0);
537
538 local_irq_enable();
539
540 h = softirq_vec;
541
542 while ((softirq_bit = ffs(pending))) {
543 unsigned int vec_nr;
544 int prev_count;
545
546 h += softirq_bit - 1;
547
548 vec_nr = h - softirq_vec;
549 prev_count = preempt_count();
550
551 kstat_incr_softirqs_this_cpu(vec_nr);
552
553 trace_softirq_entry(vec_nr);
554 h->action(h);
555 trace_softirq_exit(vec_nr);
556 if (unlikely(prev_count != preempt_count())) {
557 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
558 vec_nr, softirq_to_name[vec_nr], h->action,
559 prev_count, preempt_count());
560 preempt_count_set(prev_count);
561 }
562 h++;
563 pending >>= softirq_bit;
564 }
565
566 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
567 rcu_softirq_qs();
568
569 local_irq_disable();
570
571 pending = local_softirq_pending();
572 if (pending) {
573 if (time_before(jiffies, end) && !need_resched() &&
574 --max_restart)
575 goto restart;
576
577 wakeup_softirqd();
578 }
579
580 account_softirq_exit(current);
581 lockdep_softirq_end(in_hardirq);
582 softirq_handle_end();
583 current_restore_flags(old_flags, PF_MEMALLOC);
584}
585
586asmlinkage __visible void __softirq_entry __do_softirq(void)
587{
588 handle_softirqs(false);
589}
590
591/**
592 * irq_enter_rcu - Enter an interrupt context with RCU watching
593 */
594void irq_enter_rcu(void)
595{
596 __irq_enter_raw();
597
598 if (tick_nohz_full_cpu(smp_processor_id()) ||
599 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
600 tick_irq_enter();
601
602 account_hardirq_enter(current);
603}
604
605/**
606 * irq_enter - Enter an interrupt context including RCU update
607 */
608void irq_enter(void)
609{
610 ct_irq_enter();
611 irq_enter_rcu();
612}
613
614static inline void tick_irq_exit(void)
615{
616#ifdef CONFIG_NO_HZ_COMMON
617 int cpu = smp_processor_id();
618
619 /* Make sure that timer wheel updates are propagated */
620 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
621 if (!in_hardirq())
622 tick_nohz_irq_exit();
623 }
624#endif
625}
626
627static inline void __irq_exit_rcu(void)
628{
629#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
630 local_irq_disable();
631#else
632 lockdep_assert_irqs_disabled();
633#endif
634 account_hardirq_exit(current);
635 preempt_count_sub(HARDIRQ_OFFSET);
636 if (!in_interrupt() && local_softirq_pending())
637 invoke_softirq();
638
639 tick_irq_exit();
640}
641
642/**
643 * irq_exit_rcu() - Exit an interrupt context without updating RCU
644 *
645 * Also processes softirqs if needed and possible.
646 */
647void irq_exit_rcu(void)
648{
649 __irq_exit_rcu();
650 /* must be last! */
651 lockdep_hardirq_exit();
652}
653
654/**
655 * irq_exit - Exit an interrupt context, update RCU and lockdep
656 *
657 * Also processes softirqs if needed and possible.
658 */
659void irq_exit(void)
660{
661 __irq_exit_rcu();
662 ct_irq_exit();
663 /* must be last! */
664 lockdep_hardirq_exit();
665}
666
667/*
668 * This function must run with irqs disabled!
669 */
670inline void raise_softirq_irqoff(unsigned int nr)
671{
672 __raise_softirq_irqoff(nr);
673
674 /*
675 * If we're in an interrupt or softirq, we're done
676 * (this also catches softirq-disabled code). We will
677 * actually run the softirq once we return from
678 * the irq or softirq.
679 *
680 * Otherwise we wake up ksoftirqd to make sure we
681 * schedule the softirq soon.
682 */
683 if (!in_interrupt() && should_wake_ksoftirqd())
684 wakeup_softirqd();
685}
686
687void raise_softirq(unsigned int nr)
688{
689 unsigned long flags;
690
691 local_irq_save(flags);
692 raise_softirq_irqoff(nr);
693 local_irq_restore(flags);
694}
695
696void __raise_softirq_irqoff(unsigned int nr)
697{
698 lockdep_assert_irqs_disabled();
699 trace_softirq_raise(nr);
700 or_softirq_pending(1UL << nr);
701}
702
703void open_softirq(int nr, void (*action)(struct softirq_action *))
704{
705 softirq_vec[nr].action = action;
706}
707
708/*
709 * Tasklets
710 */
711struct tasklet_head {
712 struct tasklet_struct *head;
713 struct tasklet_struct **tail;
714};
715
716static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
717static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
718
719static void __tasklet_schedule_common(struct tasklet_struct *t,
720 struct tasklet_head __percpu *headp,
721 unsigned int softirq_nr)
722{
723 struct tasklet_head *head;
724 unsigned long flags;
725
726 local_irq_save(flags);
727 head = this_cpu_ptr(headp);
728 t->next = NULL;
729 *head->tail = t;
730 head->tail = &(t->next);
731 raise_softirq_irqoff(softirq_nr);
732 local_irq_restore(flags);
733}
734
735void __tasklet_schedule(struct tasklet_struct *t)
736{
737 __tasklet_schedule_common(t, &tasklet_vec,
738 TASKLET_SOFTIRQ);
739}
740EXPORT_SYMBOL(__tasklet_schedule);
741
742void __tasklet_hi_schedule(struct tasklet_struct *t)
743{
744 __tasklet_schedule_common(t, &tasklet_hi_vec,
745 HI_SOFTIRQ);
746}
747EXPORT_SYMBOL(__tasklet_hi_schedule);
748
749static bool tasklet_clear_sched(struct tasklet_struct *t)
750{
751 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
752 wake_up_var(&t->state);
753 return true;
754 }
755
756 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
757 t->use_callback ? "callback" : "func",
758 t->use_callback ? (void *)t->callback : (void *)t->func);
759
760 return false;
761}
762
763static void tasklet_action_common(struct softirq_action *a,
764 struct tasklet_head *tl_head,
765 unsigned int softirq_nr)
766{
767 struct tasklet_struct *list;
768
769 local_irq_disable();
770 list = tl_head->head;
771 tl_head->head = NULL;
772 tl_head->tail = &tl_head->head;
773 local_irq_enable();
774
775 while (list) {
776 struct tasklet_struct *t = list;
777
778 list = list->next;
779
780 if (tasklet_trylock(t)) {
781 if (!atomic_read(&t->count)) {
782 if (tasklet_clear_sched(t)) {
783 if (t->use_callback) {
784 trace_tasklet_entry(t, t->callback);
785 t->callback(t);
786 trace_tasklet_exit(t, t->callback);
787 } else {
788 trace_tasklet_entry(t, t->func);
789 t->func(t->data);
790 trace_tasklet_exit(t, t->func);
791 }
792 }
793 tasklet_unlock(t);
794 continue;
795 }
796 tasklet_unlock(t);
797 }
798
799 local_irq_disable();
800 t->next = NULL;
801 *tl_head->tail = t;
802 tl_head->tail = &t->next;
803 __raise_softirq_irqoff(softirq_nr);
804 local_irq_enable();
805 }
806}
807
808static __latent_entropy void tasklet_action(struct softirq_action *a)
809{
810 workqueue_softirq_action(false);
811 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
812}
813
814static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
815{
816 workqueue_softirq_action(true);
817 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
818}
819
820void tasklet_setup(struct tasklet_struct *t,
821 void (*callback)(struct tasklet_struct *))
822{
823 t->next = NULL;
824 t->state = 0;
825 atomic_set(&t->count, 0);
826 t->callback = callback;
827 t->use_callback = true;
828 t->data = 0;
829}
830EXPORT_SYMBOL(tasklet_setup);
831
832void tasklet_init(struct tasklet_struct *t,
833 void (*func)(unsigned long), unsigned long data)
834{
835 t->next = NULL;
836 t->state = 0;
837 atomic_set(&t->count, 0);
838 t->func = func;
839 t->use_callback = false;
840 t->data = data;
841}
842EXPORT_SYMBOL(tasklet_init);
843
844#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
845/*
846 * Do not use in new code. Waiting for tasklets from atomic contexts is
847 * error prone and should be avoided.
848 */
849void tasklet_unlock_spin_wait(struct tasklet_struct *t)
850{
851 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
852 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
853 /*
854 * Prevent a live lock when current preempted soft
855 * interrupt processing or prevents ksoftirqd from
856 * running. If the tasklet runs on a different CPU
857 * then this has no effect other than doing the BH
858 * disable/enable dance for nothing.
859 */
860 local_bh_disable();
861 local_bh_enable();
862 } else {
863 cpu_relax();
864 }
865 }
866}
867EXPORT_SYMBOL(tasklet_unlock_spin_wait);
868#endif
869
870void tasklet_kill(struct tasklet_struct *t)
871{
872 if (in_interrupt())
873 pr_notice("Attempt to kill tasklet from interrupt\n");
874
875 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
876 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
877
878 tasklet_unlock_wait(t);
879 tasklet_clear_sched(t);
880}
881EXPORT_SYMBOL(tasklet_kill);
882
883#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
884void tasklet_unlock(struct tasklet_struct *t)
885{
886 smp_mb__before_atomic();
887 clear_bit(TASKLET_STATE_RUN, &t->state);
888 smp_mb__after_atomic();
889 wake_up_var(&t->state);
890}
891EXPORT_SYMBOL_GPL(tasklet_unlock);
892
893void tasklet_unlock_wait(struct tasklet_struct *t)
894{
895 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
896}
897EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
898#endif
899
900void __init softirq_init(void)
901{
902 int cpu;
903
904 for_each_possible_cpu(cpu) {
905 per_cpu(tasklet_vec, cpu).tail =
906 &per_cpu(tasklet_vec, cpu).head;
907 per_cpu(tasklet_hi_vec, cpu).tail =
908 &per_cpu(tasklet_hi_vec, cpu).head;
909 }
910
911 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
912 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
913}
914
915static int ksoftirqd_should_run(unsigned int cpu)
916{
917 return local_softirq_pending();
918}
919
920static void run_ksoftirqd(unsigned int cpu)
921{
922 ksoftirqd_run_begin();
923 if (local_softirq_pending()) {
924 /*
925 * We can safely run softirq on inline stack, as we are not deep
926 * in the task stack here.
927 */
928 handle_softirqs(true);
929 ksoftirqd_run_end();
930 cond_resched();
931 return;
932 }
933 ksoftirqd_run_end();
934}
935
936#ifdef CONFIG_HOTPLUG_CPU
937static int takeover_tasklets(unsigned int cpu)
938{
939 workqueue_softirq_dead(cpu);
940
941 /* CPU is dead, so no lock needed. */
942 local_irq_disable();
943
944 /* Find end, append list for that CPU. */
945 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
946 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
947 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
948 per_cpu(tasklet_vec, cpu).head = NULL;
949 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
950 }
951 raise_softirq_irqoff(TASKLET_SOFTIRQ);
952
953 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
954 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
955 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
956 per_cpu(tasklet_hi_vec, cpu).head = NULL;
957 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
958 }
959 raise_softirq_irqoff(HI_SOFTIRQ);
960
961 local_irq_enable();
962 return 0;
963}
964#else
965#define takeover_tasklets NULL
966#endif /* CONFIG_HOTPLUG_CPU */
967
968static struct smp_hotplug_thread softirq_threads = {
969 .store = &ksoftirqd,
970 .thread_should_run = ksoftirqd_should_run,
971 .thread_fn = run_ksoftirqd,
972 .thread_comm = "ksoftirqd/%u",
973};
974
975static __init int spawn_ksoftirqd(void)
976{
977 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
978 takeover_tasklets);
979 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
980
981 return 0;
982}
983early_initcall(spawn_ksoftirqd);
984
985/*
986 * [ These __weak aliases are kept in a separate compilation unit, so that
987 * GCC does not inline them incorrectly. ]
988 */
989
990int __init __weak early_irq_init(void)
991{
992 return 0;
993}
994
995int __init __weak arch_probe_nr_irqs(void)
996{
997 return NR_IRQS_LEGACY;
998}
999
1000int __init __weak arch_early_irq_init(void)
1001{
1002 return 0;
1003}
1004
1005unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1006{
1007 return from;
1008}