Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/local_lock.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29#include <linux/wait_bit.h>
30
31#include <asm/softirq_stack.h>
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/irq.h>
35
36/*
37 - No shared variables, all the data are CPU local.
38 - If a softirq needs serialization, let it serialize itself
39 by its own spinlocks.
40 - Even if softirq is serialized, only local cpu is marked for
41 execution. Hence, we get something sort of weak cpu binding.
42 Though it is still not clear, will it result in better locality
43 or will not.
44
45 Examples:
46 - NET RX softirq. It is multithreaded and does not require
47 any global serialization.
48 - NET TX softirq. It kicks software netdevice queues, hence
49 it is logically serialized per device, but this serialization
50 is invisible to common code.
51 - Tasklets: serialized wrt itself.
52 */
53
54#ifndef __ARCH_IRQ_STAT
55DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
56EXPORT_PER_CPU_SYMBOL(irq_stat);
57#endif
58
59static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
60
61DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62
63const char * const softirq_to_name[NR_SOFTIRQS] = {
64 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
65 "TASKLET", "SCHED", "HRTIMER", "RCU"
66};
67
68/*
69 * we cannot loop indefinitely here to avoid userspace starvation,
70 * but we also don't want to introduce a worst case 1/HZ latency
71 * to the pending events, so lets the scheduler to balance
72 * the softirq load for us.
73 */
74static void wakeup_softirqd(void)
75{
76 /* Interrupts are disabled: no need to stop preemption */
77 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
78
79 if (tsk)
80 wake_up_process(tsk);
81}
82
83/*
84 * If ksoftirqd is scheduled, we do not want to process pending softirqs
85 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
86 * unless we're doing some of the synchronous softirqs.
87 */
88#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
89static bool ksoftirqd_running(unsigned long pending)
90{
91 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
92
93 if (pending & SOFTIRQ_NOW_MASK)
94 return false;
95 return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
96}
97
98#ifdef CONFIG_TRACE_IRQFLAGS
99DEFINE_PER_CPU(int, hardirqs_enabled);
100DEFINE_PER_CPU(int, hardirq_context);
101EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
102EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
103#endif
104
105/*
106 * SOFTIRQ_OFFSET usage:
107 *
108 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
109 * to a per CPU counter and to task::softirqs_disabled_cnt.
110 *
111 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
112 * processing.
113 *
114 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
115 * on local_bh_disable or local_bh_enable.
116 *
117 * This lets us distinguish between whether we are currently processing
118 * softirq and whether we just have bh disabled.
119 */
120#ifdef CONFIG_PREEMPT_RT
121
122/*
123 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
124 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
125 * softirq disabled section to be preempted.
126 *
127 * The per task counter is used for softirq_count(), in_softirq() and
128 * in_serving_softirqs() because these counts are only valid when the task
129 * holding softirq_ctrl::lock is running.
130 *
131 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
132 * the task which is in a softirq disabled section is preempted or blocks.
133 */
134struct softirq_ctrl {
135 local_lock_t lock;
136 int cnt;
137};
138
139static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
140 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
141};
142
143/**
144 * local_bh_blocked() - Check for idle whether BH processing is blocked
145 *
146 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
147 *
148 * This is invoked from the idle task to guard against false positive
149 * softirq pending warnings, which would happen when the task which holds
150 * softirq_ctrl::lock was the only running task on the CPU and blocks on
151 * some other lock.
152 */
153bool local_bh_blocked(void)
154{
155 return __this_cpu_read(softirq_ctrl.cnt) != 0;
156}
157
158void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
159{
160 unsigned long flags;
161 int newcnt;
162
163 WARN_ON_ONCE(in_hardirq());
164
165 /* First entry of a task into a BH disabled section? */
166 if (!current->softirq_disable_cnt) {
167 if (preemptible()) {
168 local_lock(&softirq_ctrl.lock);
169 /* Required to meet the RCU bottomhalf requirements. */
170 rcu_read_lock();
171 } else {
172 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
173 }
174 }
175
176 /*
177 * Track the per CPU softirq disabled state. On RT this is per CPU
178 * state to allow preemption of bottom half disabled sections.
179 */
180 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
181 /*
182 * Reflect the result in the task state to prevent recursion on the
183 * local lock and to make softirq_count() & al work.
184 */
185 current->softirq_disable_cnt = newcnt;
186
187 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
188 raw_local_irq_save(flags);
189 lockdep_softirqs_off(ip);
190 raw_local_irq_restore(flags);
191 }
192}
193EXPORT_SYMBOL(__local_bh_disable_ip);
194
195static void __local_bh_enable(unsigned int cnt, bool unlock)
196{
197 unsigned long flags;
198 int newcnt;
199
200 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
201 this_cpu_read(softirq_ctrl.cnt));
202
203 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
204 raw_local_irq_save(flags);
205 lockdep_softirqs_on(_RET_IP_);
206 raw_local_irq_restore(flags);
207 }
208
209 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
210 current->softirq_disable_cnt = newcnt;
211
212 if (!newcnt && unlock) {
213 rcu_read_unlock();
214 local_unlock(&softirq_ctrl.lock);
215 }
216}
217
218void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
219{
220 bool preempt_on = preemptible();
221 unsigned long flags;
222 u32 pending;
223 int curcnt;
224
225 WARN_ON_ONCE(in_hardirq());
226 lockdep_assert_irqs_enabled();
227
228 local_irq_save(flags);
229 curcnt = __this_cpu_read(softirq_ctrl.cnt);
230
231 /*
232 * If this is not reenabling soft interrupts, no point in trying to
233 * run pending ones.
234 */
235 if (curcnt != cnt)
236 goto out;
237
238 pending = local_softirq_pending();
239 if (!pending || ksoftirqd_running(pending))
240 goto out;
241
242 /*
243 * If this was called from non preemptible context, wake up the
244 * softirq daemon.
245 */
246 if (!preempt_on) {
247 wakeup_softirqd();
248 goto out;
249 }
250
251 /*
252 * Adjust softirq count to SOFTIRQ_OFFSET which makes
253 * in_serving_softirq() become true.
254 */
255 cnt = SOFTIRQ_OFFSET;
256 __local_bh_enable(cnt, false);
257 __do_softirq();
258
259out:
260 __local_bh_enable(cnt, preempt_on);
261 local_irq_restore(flags);
262}
263EXPORT_SYMBOL(__local_bh_enable_ip);
264
265/*
266 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
267 * to acquire the per CPU local lock for reentrancy protection.
268 */
269static inline void ksoftirqd_run_begin(void)
270{
271 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
272 local_irq_disable();
273}
274
275/* Counterpart to ksoftirqd_run_begin() */
276static inline void ksoftirqd_run_end(void)
277{
278 __local_bh_enable(SOFTIRQ_OFFSET, true);
279 WARN_ON_ONCE(in_interrupt());
280 local_irq_enable();
281}
282
283static inline void softirq_handle_begin(void) { }
284static inline void softirq_handle_end(void) { }
285
286static inline bool should_wake_ksoftirqd(void)
287{
288 return !this_cpu_read(softirq_ctrl.cnt);
289}
290
291static inline void invoke_softirq(void)
292{
293 if (should_wake_ksoftirqd())
294 wakeup_softirqd();
295}
296
297/*
298 * flush_smp_call_function_queue() can raise a soft interrupt in a function
299 * call. On RT kernels this is undesired and the only known functionality
300 * in the block layer which does this is disabled on RT. If soft interrupts
301 * get raised which haven't been raised before the flush, warn so it can be
302 * investigated.
303 */
304void do_softirq_post_smp_call_flush(unsigned int was_pending)
305{
306 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
307 invoke_softirq();
308}
309
310#else /* CONFIG_PREEMPT_RT */
311
312/*
313 * This one is for softirq.c-internal use, where hardirqs are disabled
314 * legitimately:
315 */
316#ifdef CONFIG_TRACE_IRQFLAGS
317void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
318{
319 unsigned long flags;
320
321 WARN_ON_ONCE(in_hardirq());
322
323 raw_local_irq_save(flags);
324 /*
325 * The preempt tracer hooks into preempt_count_add and will break
326 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
327 * is set and before current->softirq_enabled is cleared.
328 * We must manually increment preempt_count here and manually
329 * call the trace_preempt_off later.
330 */
331 __preempt_count_add(cnt);
332 /*
333 * Were softirqs turned off above:
334 */
335 if (softirq_count() == (cnt & SOFTIRQ_MASK))
336 lockdep_softirqs_off(ip);
337 raw_local_irq_restore(flags);
338
339 if (preempt_count() == cnt) {
340#ifdef CONFIG_DEBUG_PREEMPT
341 current->preempt_disable_ip = get_lock_parent_ip();
342#endif
343 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
344 }
345}
346EXPORT_SYMBOL(__local_bh_disable_ip);
347#endif /* CONFIG_TRACE_IRQFLAGS */
348
349static void __local_bh_enable(unsigned int cnt)
350{
351 lockdep_assert_irqs_disabled();
352
353 if (preempt_count() == cnt)
354 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
355
356 if (softirq_count() == (cnt & SOFTIRQ_MASK))
357 lockdep_softirqs_on(_RET_IP_);
358
359 __preempt_count_sub(cnt);
360}
361
362/*
363 * Special-case - softirqs can safely be enabled by __do_softirq(),
364 * without processing still-pending softirqs:
365 */
366void _local_bh_enable(void)
367{
368 WARN_ON_ONCE(in_hardirq());
369 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
370}
371EXPORT_SYMBOL(_local_bh_enable);
372
373void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
374{
375 WARN_ON_ONCE(in_hardirq());
376 lockdep_assert_irqs_enabled();
377#ifdef CONFIG_TRACE_IRQFLAGS
378 local_irq_disable();
379#endif
380 /*
381 * Are softirqs going to be turned on now:
382 */
383 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
384 lockdep_softirqs_on(ip);
385 /*
386 * Keep preemption disabled until we are done with
387 * softirq processing:
388 */
389 __preempt_count_sub(cnt - 1);
390
391 if (unlikely(!in_interrupt() && local_softirq_pending())) {
392 /*
393 * Run softirq if any pending. And do it in its own stack
394 * as we may be calling this deep in a task call stack already.
395 */
396 do_softirq();
397 }
398
399 preempt_count_dec();
400#ifdef CONFIG_TRACE_IRQFLAGS
401 local_irq_enable();
402#endif
403 preempt_check_resched();
404}
405EXPORT_SYMBOL(__local_bh_enable_ip);
406
407static inline void softirq_handle_begin(void)
408{
409 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
410}
411
412static inline void softirq_handle_end(void)
413{
414 __local_bh_enable(SOFTIRQ_OFFSET);
415 WARN_ON_ONCE(in_interrupt());
416}
417
418static inline void ksoftirqd_run_begin(void)
419{
420 local_irq_disable();
421}
422
423static inline void ksoftirqd_run_end(void)
424{
425 local_irq_enable();
426}
427
428static inline bool should_wake_ksoftirqd(void)
429{
430 return true;
431}
432
433static inline void invoke_softirq(void)
434{
435 if (ksoftirqd_running(local_softirq_pending()))
436 return;
437
438 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
439#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
440 /*
441 * We can safely execute softirq on the current stack if
442 * it is the irq stack, because it should be near empty
443 * at this stage.
444 */
445 __do_softirq();
446#else
447 /*
448 * Otherwise, irq_exit() is called on the task stack that can
449 * be potentially deep already. So call softirq in its own stack
450 * to prevent from any overrun.
451 */
452 do_softirq_own_stack();
453#endif
454 } else {
455 wakeup_softirqd();
456 }
457}
458
459asmlinkage __visible void do_softirq(void)
460{
461 __u32 pending;
462 unsigned long flags;
463
464 if (in_interrupt())
465 return;
466
467 local_irq_save(flags);
468
469 pending = local_softirq_pending();
470
471 if (pending && !ksoftirqd_running(pending))
472 do_softirq_own_stack();
473
474 local_irq_restore(flags);
475}
476
477#endif /* !CONFIG_PREEMPT_RT */
478
479/*
480 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
481 * but break the loop if need_resched() is set or after 2 ms.
482 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
483 * certain cases, such as stop_machine(), jiffies may cease to
484 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
485 * well to make sure we eventually return from this method.
486 *
487 * These limits have been established via experimentation.
488 * The two things to balance is latency against fairness -
489 * we want to handle softirqs as soon as possible, but they
490 * should not be able to lock up the box.
491 */
492#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
493#define MAX_SOFTIRQ_RESTART 10
494
495#ifdef CONFIG_TRACE_IRQFLAGS
496/*
497 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
498 * to keep the lockdep irq context tracking as tight as possible in order to
499 * not miss-qualify lock contexts and miss possible deadlocks.
500 */
501
502static inline bool lockdep_softirq_start(void)
503{
504 bool in_hardirq = false;
505
506 if (lockdep_hardirq_context()) {
507 in_hardirq = true;
508 lockdep_hardirq_exit();
509 }
510
511 lockdep_softirq_enter();
512
513 return in_hardirq;
514}
515
516static inline void lockdep_softirq_end(bool in_hardirq)
517{
518 lockdep_softirq_exit();
519
520 if (in_hardirq)
521 lockdep_hardirq_enter();
522}
523#else
524static inline bool lockdep_softirq_start(void) { return false; }
525static inline void lockdep_softirq_end(bool in_hardirq) { }
526#endif
527
528asmlinkage __visible void __softirq_entry __do_softirq(void)
529{
530 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
531 unsigned long old_flags = current->flags;
532 int max_restart = MAX_SOFTIRQ_RESTART;
533 struct softirq_action *h;
534 bool in_hardirq;
535 __u32 pending;
536 int softirq_bit;
537
538 /*
539 * Mask out PF_MEMALLOC as the current task context is borrowed for the
540 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
541 * again if the socket is related to swapping.
542 */
543 current->flags &= ~PF_MEMALLOC;
544
545 pending = local_softirq_pending();
546
547 softirq_handle_begin();
548 in_hardirq = lockdep_softirq_start();
549 account_softirq_enter(current);
550
551restart:
552 /* Reset the pending bitmask before enabling irqs */
553 set_softirq_pending(0);
554
555 local_irq_enable();
556
557 h = softirq_vec;
558
559 while ((softirq_bit = ffs(pending))) {
560 unsigned int vec_nr;
561 int prev_count;
562
563 h += softirq_bit - 1;
564
565 vec_nr = h - softirq_vec;
566 prev_count = preempt_count();
567
568 kstat_incr_softirqs_this_cpu(vec_nr);
569
570 trace_softirq_entry(vec_nr);
571 h->action(h);
572 trace_softirq_exit(vec_nr);
573 if (unlikely(prev_count != preempt_count())) {
574 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
575 vec_nr, softirq_to_name[vec_nr], h->action,
576 prev_count, preempt_count());
577 preempt_count_set(prev_count);
578 }
579 h++;
580 pending >>= softirq_bit;
581 }
582
583 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
584 __this_cpu_read(ksoftirqd) == current)
585 rcu_softirq_qs();
586
587 local_irq_disable();
588
589 pending = local_softirq_pending();
590 if (pending) {
591 if (time_before(jiffies, end) && !need_resched() &&
592 --max_restart)
593 goto restart;
594
595 wakeup_softirqd();
596 }
597
598 account_softirq_exit(current);
599 lockdep_softirq_end(in_hardirq);
600 softirq_handle_end();
601 current_restore_flags(old_flags, PF_MEMALLOC);
602}
603
604/**
605 * irq_enter_rcu - Enter an interrupt context with RCU watching
606 */
607void irq_enter_rcu(void)
608{
609 __irq_enter_raw();
610
611 if (tick_nohz_full_cpu(smp_processor_id()) ||
612 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
613 tick_irq_enter();
614
615 account_hardirq_enter(current);
616}
617
618/**
619 * irq_enter - Enter an interrupt context including RCU update
620 */
621void irq_enter(void)
622{
623 ct_irq_enter();
624 irq_enter_rcu();
625}
626
627static inline void tick_irq_exit(void)
628{
629#ifdef CONFIG_NO_HZ_COMMON
630 int cpu = smp_processor_id();
631
632 /* Make sure that timer wheel updates are propagated */
633 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
634 if (!in_hardirq())
635 tick_nohz_irq_exit();
636 }
637#endif
638}
639
640static inline void __irq_exit_rcu(void)
641{
642#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
643 local_irq_disable();
644#else
645 lockdep_assert_irqs_disabled();
646#endif
647 account_hardirq_exit(current);
648 preempt_count_sub(HARDIRQ_OFFSET);
649 if (!in_interrupt() && local_softirq_pending())
650 invoke_softirq();
651
652 tick_irq_exit();
653}
654
655/**
656 * irq_exit_rcu() - Exit an interrupt context without updating RCU
657 *
658 * Also processes softirqs if needed and possible.
659 */
660void irq_exit_rcu(void)
661{
662 __irq_exit_rcu();
663 /* must be last! */
664 lockdep_hardirq_exit();
665}
666
667/**
668 * irq_exit - Exit an interrupt context, update RCU and lockdep
669 *
670 * Also processes softirqs if needed and possible.
671 */
672void irq_exit(void)
673{
674 __irq_exit_rcu();
675 ct_irq_exit();
676 /* must be last! */
677 lockdep_hardirq_exit();
678}
679
680/*
681 * This function must run with irqs disabled!
682 */
683inline void raise_softirq_irqoff(unsigned int nr)
684{
685 __raise_softirq_irqoff(nr);
686
687 /*
688 * If we're in an interrupt or softirq, we're done
689 * (this also catches softirq-disabled code). We will
690 * actually run the softirq once we return from
691 * the irq or softirq.
692 *
693 * Otherwise we wake up ksoftirqd to make sure we
694 * schedule the softirq soon.
695 */
696 if (!in_interrupt() && should_wake_ksoftirqd())
697 wakeup_softirqd();
698}
699
700void raise_softirq(unsigned int nr)
701{
702 unsigned long flags;
703
704 local_irq_save(flags);
705 raise_softirq_irqoff(nr);
706 local_irq_restore(flags);
707}
708
709void __raise_softirq_irqoff(unsigned int nr)
710{
711 lockdep_assert_irqs_disabled();
712 trace_softirq_raise(nr);
713 or_softirq_pending(1UL << nr);
714}
715
716void open_softirq(int nr, void (*action)(struct softirq_action *))
717{
718 softirq_vec[nr].action = action;
719}
720
721/*
722 * Tasklets
723 */
724struct tasklet_head {
725 struct tasklet_struct *head;
726 struct tasklet_struct **tail;
727};
728
729static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
730static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
731
732static void __tasklet_schedule_common(struct tasklet_struct *t,
733 struct tasklet_head __percpu *headp,
734 unsigned int softirq_nr)
735{
736 struct tasklet_head *head;
737 unsigned long flags;
738
739 local_irq_save(flags);
740 head = this_cpu_ptr(headp);
741 t->next = NULL;
742 *head->tail = t;
743 head->tail = &(t->next);
744 raise_softirq_irqoff(softirq_nr);
745 local_irq_restore(flags);
746}
747
748void __tasklet_schedule(struct tasklet_struct *t)
749{
750 __tasklet_schedule_common(t, &tasklet_vec,
751 TASKLET_SOFTIRQ);
752}
753EXPORT_SYMBOL(__tasklet_schedule);
754
755void __tasklet_hi_schedule(struct tasklet_struct *t)
756{
757 __tasklet_schedule_common(t, &tasklet_hi_vec,
758 HI_SOFTIRQ);
759}
760EXPORT_SYMBOL(__tasklet_hi_schedule);
761
762static bool tasklet_clear_sched(struct tasklet_struct *t)
763{
764 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
765 wake_up_var(&t->state);
766 return true;
767 }
768
769 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
770 t->use_callback ? "callback" : "func",
771 t->use_callback ? (void *)t->callback : (void *)t->func);
772
773 return false;
774}
775
776static void tasklet_action_common(struct softirq_action *a,
777 struct tasklet_head *tl_head,
778 unsigned int softirq_nr)
779{
780 struct tasklet_struct *list;
781
782 local_irq_disable();
783 list = tl_head->head;
784 tl_head->head = NULL;
785 tl_head->tail = &tl_head->head;
786 local_irq_enable();
787
788 while (list) {
789 struct tasklet_struct *t = list;
790
791 list = list->next;
792
793 if (tasklet_trylock(t)) {
794 if (!atomic_read(&t->count)) {
795 if (tasklet_clear_sched(t)) {
796 if (t->use_callback)
797 t->callback(t);
798 else
799 t->func(t->data);
800 }
801 tasklet_unlock(t);
802 continue;
803 }
804 tasklet_unlock(t);
805 }
806
807 local_irq_disable();
808 t->next = NULL;
809 *tl_head->tail = t;
810 tl_head->tail = &t->next;
811 __raise_softirq_irqoff(softirq_nr);
812 local_irq_enable();
813 }
814}
815
816static __latent_entropy void tasklet_action(struct softirq_action *a)
817{
818 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
819}
820
821static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
822{
823 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
824}
825
826void tasklet_setup(struct tasklet_struct *t,
827 void (*callback)(struct tasklet_struct *))
828{
829 t->next = NULL;
830 t->state = 0;
831 atomic_set(&t->count, 0);
832 t->callback = callback;
833 t->use_callback = true;
834 t->data = 0;
835}
836EXPORT_SYMBOL(tasklet_setup);
837
838void tasklet_init(struct tasklet_struct *t,
839 void (*func)(unsigned long), unsigned long data)
840{
841 t->next = NULL;
842 t->state = 0;
843 atomic_set(&t->count, 0);
844 t->func = func;
845 t->use_callback = false;
846 t->data = data;
847}
848EXPORT_SYMBOL(tasklet_init);
849
850#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
851/*
852 * Do not use in new code. Waiting for tasklets from atomic contexts is
853 * error prone and should be avoided.
854 */
855void tasklet_unlock_spin_wait(struct tasklet_struct *t)
856{
857 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
858 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
859 /*
860 * Prevent a live lock when current preempted soft
861 * interrupt processing or prevents ksoftirqd from
862 * running. If the tasklet runs on a different CPU
863 * then this has no effect other than doing the BH
864 * disable/enable dance for nothing.
865 */
866 local_bh_disable();
867 local_bh_enable();
868 } else {
869 cpu_relax();
870 }
871 }
872}
873EXPORT_SYMBOL(tasklet_unlock_spin_wait);
874#endif
875
876void tasklet_kill(struct tasklet_struct *t)
877{
878 if (in_interrupt())
879 pr_notice("Attempt to kill tasklet from interrupt\n");
880
881 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
882 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
883
884 tasklet_unlock_wait(t);
885 tasklet_clear_sched(t);
886}
887EXPORT_SYMBOL(tasklet_kill);
888
889#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
890void tasklet_unlock(struct tasklet_struct *t)
891{
892 smp_mb__before_atomic();
893 clear_bit(TASKLET_STATE_RUN, &t->state);
894 smp_mb__after_atomic();
895 wake_up_var(&t->state);
896}
897EXPORT_SYMBOL_GPL(tasklet_unlock);
898
899void tasklet_unlock_wait(struct tasklet_struct *t)
900{
901 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
902}
903EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
904#endif
905
906void __init softirq_init(void)
907{
908 int cpu;
909
910 for_each_possible_cpu(cpu) {
911 per_cpu(tasklet_vec, cpu).tail =
912 &per_cpu(tasklet_vec, cpu).head;
913 per_cpu(tasklet_hi_vec, cpu).tail =
914 &per_cpu(tasklet_hi_vec, cpu).head;
915 }
916
917 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
918 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
919}
920
921static int ksoftirqd_should_run(unsigned int cpu)
922{
923 return local_softirq_pending();
924}
925
926static void run_ksoftirqd(unsigned int cpu)
927{
928 ksoftirqd_run_begin();
929 if (local_softirq_pending()) {
930 /*
931 * We can safely run softirq on inline stack, as we are not deep
932 * in the task stack here.
933 */
934 __do_softirq();
935 ksoftirqd_run_end();
936 cond_resched();
937 return;
938 }
939 ksoftirqd_run_end();
940}
941
942#ifdef CONFIG_HOTPLUG_CPU
943static int takeover_tasklets(unsigned int cpu)
944{
945 /* CPU is dead, so no lock needed. */
946 local_irq_disable();
947
948 /* Find end, append list for that CPU. */
949 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
950 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
951 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
952 per_cpu(tasklet_vec, cpu).head = NULL;
953 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
954 }
955 raise_softirq_irqoff(TASKLET_SOFTIRQ);
956
957 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
958 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
959 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
960 per_cpu(tasklet_hi_vec, cpu).head = NULL;
961 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
962 }
963 raise_softirq_irqoff(HI_SOFTIRQ);
964
965 local_irq_enable();
966 return 0;
967}
968#else
969#define takeover_tasklets NULL
970#endif /* CONFIG_HOTPLUG_CPU */
971
972static struct smp_hotplug_thread softirq_threads = {
973 .store = &ksoftirqd,
974 .thread_should_run = ksoftirqd_should_run,
975 .thread_fn = run_ksoftirqd,
976 .thread_comm = "ksoftirqd/%u",
977};
978
979static __init int spawn_ksoftirqd(void)
980{
981 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
982 takeover_tasklets);
983 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
984
985 return 0;
986}
987early_initcall(spawn_ksoftirqd);
988
989/*
990 * [ These __weak aliases are kept in a separate compilation unit, so that
991 * GCC does not inline them incorrectly. ]
992 */
993
994int __init __weak early_irq_init(void)
995{
996 return 0;
997}
998
999int __init __weak arch_probe_nr_irqs(void)
1000{
1001 return NR_IRQS_LEGACY;
1002}
1003
1004int __init __weak arch_early_irq_init(void)
1005{
1006 return 0;
1007}
1008
1009unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1010{
1011 return from;
1012}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/local_lock.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29#include <linux/wait_bit.h>
30#include <linux/workqueue.h>
31
32#include <asm/softirq_stack.h>
33
34#define CREATE_TRACE_POINTS
35#include <trace/events/irq.h>
36
37/*
38 - No shared variables, all the data are CPU local.
39 - If a softirq needs serialization, let it serialize itself
40 by its own spinlocks.
41 - Even if softirq is serialized, only local cpu is marked for
42 execution. Hence, we get something sort of weak cpu binding.
43 Though it is still not clear, will it result in better locality
44 or will not.
45
46 Examples:
47 - NET RX softirq. It is multithreaded and does not require
48 any global serialization.
49 - NET TX softirq. It kicks software netdevice queues, hence
50 it is logically serialized per device, but this serialization
51 is invisible to common code.
52 - Tasklets: serialized wrt itself.
53 */
54
55#ifndef __ARCH_IRQ_STAT
56DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
57EXPORT_PER_CPU_SYMBOL(irq_stat);
58#endif
59
60static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
61
62DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
63
64const char * const softirq_to_name[NR_SOFTIRQS] = {
65 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
66 "TASKLET", "SCHED", "HRTIMER", "RCU"
67};
68
69/*
70 * we cannot loop indefinitely here to avoid userspace starvation,
71 * but we also don't want to introduce a worst case 1/HZ latency
72 * to the pending events, so lets the scheduler to balance
73 * the softirq load for us.
74 */
75static void wakeup_softirqd(void)
76{
77 /* Interrupts are disabled: no need to stop preemption */
78 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
79
80 if (tsk)
81 wake_up_process(tsk);
82}
83
84#ifdef CONFIG_TRACE_IRQFLAGS
85DEFINE_PER_CPU(int, hardirqs_enabled);
86DEFINE_PER_CPU(int, hardirq_context);
87EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
88EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
89#endif
90
91/*
92 * SOFTIRQ_OFFSET usage:
93 *
94 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
95 * to a per CPU counter and to task::softirqs_disabled_cnt.
96 *
97 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
98 * processing.
99 *
100 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
101 * on local_bh_disable or local_bh_enable.
102 *
103 * This lets us distinguish between whether we are currently processing
104 * softirq and whether we just have bh disabled.
105 */
106#ifdef CONFIG_PREEMPT_RT
107
108/*
109 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
110 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
111 * softirq disabled section to be preempted.
112 *
113 * The per task counter is used for softirq_count(), in_softirq() and
114 * in_serving_softirqs() because these counts are only valid when the task
115 * holding softirq_ctrl::lock is running.
116 *
117 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
118 * the task which is in a softirq disabled section is preempted or blocks.
119 */
120struct softirq_ctrl {
121 local_lock_t lock;
122 int cnt;
123};
124
125static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
126 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
127};
128
129/**
130 * local_bh_blocked() - Check for idle whether BH processing is blocked
131 *
132 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
133 *
134 * This is invoked from the idle task to guard against false positive
135 * softirq pending warnings, which would happen when the task which holds
136 * softirq_ctrl::lock was the only running task on the CPU and blocks on
137 * some other lock.
138 */
139bool local_bh_blocked(void)
140{
141 return __this_cpu_read(softirq_ctrl.cnt) != 0;
142}
143
144void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
145{
146 unsigned long flags;
147 int newcnt;
148
149 WARN_ON_ONCE(in_hardirq());
150
151 /* First entry of a task into a BH disabled section? */
152 if (!current->softirq_disable_cnt) {
153 if (preemptible()) {
154 local_lock(&softirq_ctrl.lock);
155 /* Required to meet the RCU bottomhalf requirements. */
156 rcu_read_lock();
157 } else {
158 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
159 }
160 }
161
162 /*
163 * Track the per CPU softirq disabled state. On RT this is per CPU
164 * state to allow preemption of bottom half disabled sections.
165 */
166 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
167 /*
168 * Reflect the result in the task state to prevent recursion on the
169 * local lock and to make softirq_count() & al work.
170 */
171 current->softirq_disable_cnt = newcnt;
172
173 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
174 raw_local_irq_save(flags);
175 lockdep_softirqs_off(ip);
176 raw_local_irq_restore(flags);
177 }
178}
179EXPORT_SYMBOL(__local_bh_disable_ip);
180
181static void __local_bh_enable(unsigned int cnt, bool unlock)
182{
183 unsigned long flags;
184 int newcnt;
185
186 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
187 this_cpu_read(softirq_ctrl.cnt));
188
189 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
190 raw_local_irq_save(flags);
191 lockdep_softirqs_on(_RET_IP_);
192 raw_local_irq_restore(flags);
193 }
194
195 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
196 current->softirq_disable_cnt = newcnt;
197
198 if (!newcnt && unlock) {
199 rcu_read_unlock();
200 local_unlock(&softirq_ctrl.lock);
201 }
202}
203
204void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
205{
206 bool preempt_on = preemptible();
207 unsigned long flags;
208 u32 pending;
209 int curcnt;
210
211 WARN_ON_ONCE(in_hardirq());
212 lockdep_assert_irqs_enabled();
213
214 local_irq_save(flags);
215 curcnt = __this_cpu_read(softirq_ctrl.cnt);
216
217 /*
218 * If this is not reenabling soft interrupts, no point in trying to
219 * run pending ones.
220 */
221 if (curcnt != cnt)
222 goto out;
223
224 pending = local_softirq_pending();
225 if (!pending)
226 goto out;
227
228 /*
229 * If this was called from non preemptible context, wake up the
230 * softirq daemon.
231 */
232 if (!preempt_on) {
233 wakeup_softirqd();
234 goto out;
235 }
236
237 /*
238 * Adjust softirq count to SOFTIRQ_OFFSET which makes
239 * in_serving_softirq() become true.
240 */
241 cnt = SOFTIRQ_OFFSET;
242 __local_bh_enable(cnt, false);
243 __do_softirq();
244
245out:
246 __local_bh_enable(cnt, preempt_on);
247 local_irq_restore(flags);
248}
249EXPORT_SYMBOL(__local_bh_enable_ip);
250
251/*
252 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
253 * to acquire the per CPU local lock for reentrancy protection.
254 */
255static inline void ksoftirqd_run_begin(void)
256{
257 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
258 local_irq_disable();
259}
260
261/* Counterpart to ksoftirqd_run_begin() */
262static inline void ksoftirqd_run_end(void)
263{
264 __local_bh_enable(SOFTIRQ_OFFSET, true);
265 WARN_ON_ONCE(in_interrupt());
266 local_irq_enable();
267}
268
269static inline void softirq_handle_begin(void) { }
270static inline void softirq_handle_end(void) { }
271
272static inline bool should_wake_ksoftirqd(void)
273{
274 return !this_cpu_read(softirq_ctrl.cnt);
275}
276
277static inline void invoke_softirq(void)
278{
279 if (should_wake_ksoftirqd())
280 wakeup_softirqd();
281}
282
283/*
284 * flush_smp_call_function_queue() can raise a soft interrupt in a function
285 * call. On RT kernels this is undesired and the only known functionality
286 * in the block layer which does this is disabled on RT. If soft interrupts
287 * get raised which haven't been raised before the flush, warn so it can be
288 * investigated.
289 */
290void do_softirq_post_smp_call_flush(unsigned int was_pending)
291{
292 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
293 invoke_softirq();
294}
295
296#else /* CONFIG_PREEMPT_RT */
297
298/*
299 * This one is for softirq.c-internal use, where hardirqs are disabled
300 * legitimately:
301 */
302#ifdef CONFIG_TRACE_IRQFLAGS
303void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
304{
305 unsigned long flags;
306
307 WARN_ON_ONCE(in_hardirq());
308
309 raw_local_irq_save(flags);
310 /*
311 * The preempt tracer hooks into preempt_count_add and will break
312 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
313 * is set and before current->softirq_enabled is cleared.
314 * We must manually increment preempt_count here and manually
315 * call the trace_preempt_off later.
316 */
317 __preempt_count_add(cnt);
318 /*
319 * Were softirqs turned off above:
320 */
321 if (softirq_count() == (cnt & SOFTIRQ_MASK))
322 lockdep_softirqs_off(ip);
323 raw_local_irq_restore(flags);
324
325 if (preempt_count() == cnt) {
326#ifdef CONFIG_DEBUG_PREEMPT
327 current->preempt_disable_ip = get_lock_parent_ip();
328#endif
329 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
330 }
331}
332EXPORT_SYMBOL(__local_bh_disable_ip);
333#endif /* CONFIG_TRACE_IRQFLAGS */
334
335static void __local_bh_enable(unsigned int cnt)
336{
337 lockdep_assert_irqs_disabled();
338
339 if (preempt_count() == cnt)
340 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
341
342 if (softirq_count() == (cnt & SOFTIRQ_MASK))
343 lockdep_softirqs_on(_RET_IP_);
344
345 __preempt_count_sub(cnt);
346}
347
348/*
349 * Special-case - softirqs can safely be enabled by __do_softirq(),
350 * without processing still-pending softirqs:
351 */
352void _local_bh_enable(void)
353{
354 WARN_ON_ONCE(in_hardirq());
355 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
356}
357EXPORT_SYMBOL(_local_bh_enable);
358
359void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
360{
361 WARN_ON_ONCE(in_hardirq());
362 lockdep_assert_irqs_enabled();
363#ifdef CONFIG_TRACE_IRQFLAGS
364 local_irq_disable();
365#endif
366 /*
367 * Are softirqs going to be turned on now:
368 */
369 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
370 lockdep_softirqs_on(ip);
371 /*
372 * Keep preemption disabled until we are done with
373 * softirq processing:
374 */
375 __preempt_count_sub(cnt - 1);
376
377 if (unlikely(!in_interrupt() && local_softirq_pending())) {
378 /*
379 * Run softirq if any pending. And do it in its own stack
380 * as we may be calling this deep in a task call stack already.
381 */
382 do_softirq();
383 }
384
385 preempt_count_dec();
386#ifdef CONFIG_TRACE_IRQFLAGS
387 local_irq_enable();
388#endif
389 preempt_check_resched();
390}
391EXPORT_SYMBOL(__local_bh_enable_ip);
392
393static inline void softirq_handle_begin(void)
394{
395 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
396}
397
398static inline void softirq_handle_end(void)
399{
400 __local_bh_enable(SOFTIRQ_OFFSET);
401 WARN_ON_ONCE(in_interrupt());
402}
403
404static inline void ksoftirqd_run_begin(void)
405{
406 local_irq_disable();
407}
408
409static inline void ksoftirqd_run_end(void)
410{
411 local_irq_enable();
412}
413
414static inline bool should_wake_ksoftirqd(void)
415{
416 return true;
417}
418
419static inline void invoke_softirq(void)
420{
421 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
422#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
423 /*
424 * We can safely execute softirq on the current stack if
425 * it is the irq stack, because it should be near empty
426 * at this stage.
427 */
428 __do_softirq();
429#else
430 /*
431 * Otherwise, irq_exit() is called on the task stack that can
432 * be potentially deep already. So call softirq in its own stack
433 * to prevent from any overrun.
434 */
435 do_softirq_own_stack();
436#endif
437 } else {
438 wakeup_softirqd();
439 }
440}
441
442asmlinkage __visible void do_softirq(void)
443{
444 __u32 pending;
445 unsigned long flags;
446
447 if (in_interrupt())
448 return;
449
450 local_irq_save(flags);
451
452 pending = local_softirq_pending();
453
454 if (pending)
455 do_softirq_own_stack();
456
457 local_irq_restore(flags);
458}
459
460#endif /* !CONFIG_PREEMPT_RT */
461
462/*
463 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
464 * but break the loop if need_resched() is set or after 2 ms.
465 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
466 * certain cases, such as stop_machine(), jiffies may cease to
467 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
468 * well to make sure we eventually return from this method.
469 *
470 * These limits have been established via experimentation.
471 * The two things to balance is latency against fairness -
472 * we want to handle softirqs as soon as possible, but they
473 * should not be able to lock up the box.
474 */
475#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
476#define MAX_SOFTIRQ_RESTART 10
477
478#ifdef CONFIG_TRACE_IRQFLAGS
479/*
480 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
481 * to keep the lockdep irq context tracking as tight as possible in order to
482 * not miss-qualify lock contexts and miss possible deadlocks.
483 */
484
485static inline bool lockdep_softirq_start(void)
486{
487 bool in_hardirq = false;
488
489 if (lockdep_hardirq_context()) {
490 in_hardirq = true;
491 lockdep_hardirq_exit();
492 }
493
494 lockdep_softirq_enter();
495
496 return in_hardirq;
497}
498
499static inline void lockdep_softirq_end(bool in_hardirq)
500{
501 lockdep_softirq_exit();
502
503 if (in_hardirq)
504 lockdep_hardirq_enter();
505}
506#else
507static inline bool lockdep_softirq_start(void) { return false; }
508static inline void lockdep_softirq_end(bool in_hardirq) { }
509#endif
510
511static void handle_softirqs(bool ksirqd)
512{
513 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
514 unsigned long old_flags = current->flags;
515 int max_restart = MAX_SOFTIRQ_RESTART;
516 struct softirq_action *h;
517 bool in_hardirq;
518 __u32 pending;
519 int softirq_bit;
520
521 /*
522 * Mask out PF_MEMALLOC as the current task context is borrowed for the
523 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
524 * again if the socket is related to swapping.
525 */
526 current->flags &= ~PF_MEMALLOC;
527
528 pending = local_softirq_pending();
529
530 softirq_handle_begin();
531 in_hardirq = lockdep_softirq_start();
532 account_softirq_enter(current);
533
534restart:
535 /* Reset the pending bitmask before enabling irqs */
536 set_softirq_pending(0);
537
538 local_irq_enable();
539
540 h = softirq_vec;
541
542 while ((softirq_bit = ffs(pending))) {
543 unsigned int vec_nr;
544 int prev_count;
545
546 h += softirq_bit - 1;
547
548 vec_nr = h - softirq_vec;
549 prev_count = preempt_count();
550
551 kstat_incr_softirqs_this_cpu(vec_nr);
552
553 trace_softirq_entry(vec_nr);
554 h->action(h);
555 trace_softirq_exit(vec_nr);
556 if (unlikely(prev_count != preempt_count())) {
557 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
558 vec_nr, softirq_to_name[vec_nr], h->action,
559 prev_count, preempt_count());
560 preempt_count_set(prev_count);
561 }
562 h++;
563 pending >>= softirq_bit;
564 }
565
566 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
567 rcu_softirq_qs();
568
569 local_irq_disable();
570
571 pending = local_softirq_pending();
572 if (pending) {
573 if (time_before(jiffies, end) && !need_resched() &&
574 --max_restart)
575 goto restart;
576
577 wakeup_softirqd();
578 }
579
580 account_softirq_exit(current);
581 lockdep_softirq_end(in_hardirq);
582 softirq_handle_end();
583 current_restore_flags(old_flags, PF_MEMALLOC);
584}
585
586asmlinkage __visible void __softirq_entry __do_softirq(void)
587{
588 handle_softirqs(false);
589}
590
591/**
592 * irq_enter_rcu - Enter an interrupt context with RCU watching
593 */
594void irq_enter_rcu(void)
595{
596 __irq_enter_raw();
597
598 if (tick_nohz_full_cpu(smp_processor_id()) ||
599 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
600 tick_irq_enter();
601
602 account_hardirq_enter(current);
603}
604
605/**
606 * irq_enter - Enter an interrupt context including RCU update
607 */
608void irq_enter(void)
609{
610 ct_irq_enter();
611 irq_enter_rcu();
612}
613
614static inline void tick_irq_exit(void)
615{
616#ifdef CONFIG_NO_HZ_COMMON
617 int cpu = smp_processor_id();
618
619 /* Make sure that timer wheel updates are propagated */
620 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
621 if (!in_hardirq())
622 tick_nohz_irq_exit();
623 }
624#endif
625}
626
627static inline void __irq_exit_rcu(void)
628{
629#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
630 local_irq_disable();
631#else
632 lockdep_assert_irqs_disabled();
633#endif
634 account_hardirq_exit(current);
635 preempt_count_sub(HARDIRQ_OFFSET);
636 if (!in_interrupt() && local_softirq_pending())
637 invoke_softirq();
638
639 tick_irq_exit();
640}
641
642/**
643 * irq_exit_rcu() - Exit an interrupt context without updating RCU
644 *
645 * Also processes softirqs if needed and possible.
646 */
647void irq_exit_rcu(void)
648{
649 __irq_exit_rcu();
650 /* must be last! */
651 lockdep_hardirq_exit();
652}
653
654/**
655 * irq_exit - Exit an interrupt context, update RCU and lockdep
656 *
657 * Also processes softirqs if needed and possible.
658 */
659void irq_exit(void)
660{
661 __irq_exit_rcu();
662 ct_irq_exit();
663 /* must be last! */
664 lockdep_hardirq_exit();
665}
666
667/*
668 * This function must run with irqs disabled!
669 */
670inline void raise_softirq_irqoff(unsigned int nr)
671{
672 __raise_softirq_irqoff(nr);
673
674 /*
675 * If we're in an interrupt or softirq, we're done
676 * (this also catches softirq-disabled code). We will
677 * actually run the softirq once we return from
678 * the irq or softirq.
679 *
680 * Otherwise we wake up ksoftirqd to make sure we
681 * schedule the softirq soon.
682 */
683 if (!in_interrupt() && should_wake_ksoftirqd())
684 wakeup_softirqd();
685}
686
687void raise_softirq(unsigned int nr)
688{
689 unsigned long flags;
690
691 local_irq_save(flags);
692 raise_softirq_irqoff(nr);
693 local_irq_restore(flags);
694}
695
696void __raise_softirq_irqoff(unsigned int nr)
697{
698 lockdep_assert_irqs_disabled();
699 trace_softirq_raise(nr);
700 or_softirq_pending(1UL << nr);
701}
702
703void open_softirq(int nr, void (*action)(struct softirq_action *))
704{
705 softirq_vec[nr].action = action;
706}
707
708/*
709 * Tasklets
710 */
711struct tasklet_head {
712 struct tasklet_struct *head;
713 struct tasklet_struct **tail;
714};
715
716static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
717static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
718
719static void __tasklet_schedule_common(struct tasklet_struct *t,
720 struct tasklet_head __percpu *headp,
721 unsigned int softirq_nr)
722{
723 struct tasklet_head *head;
724 unsigned long flags;
725
726 local_irq_save(flags);
727 head = this_cpu_ptr(headp);
728 t->next = NULL;
729 *head->tail = t;
730 head->tail = &(t->next);
731 raise_softirq_irqoff(softirq_nr);
732 local_irq_restore(flags);
733}
734
735void __tasklet_schedule(struct tasklet_struct *t)
736{
737 __tasklet_schedule_common(t, &tasklet_vec,
738 TASKLET_SOFTIRQ);
739}
740EXPORT_SYMBOL(__tasklet_schedule);
741
742void __tasklet_hi_schedule(struct tasklet_struct *t)
743{
744 __tasklet_schedule_common(t, &tasklet_hi_vec,
745 HI_SOFTIRQ);
746}
747EXPORT_SYMBOL(__tasklet_hi_schedule);
748
749static bool tasklet_clear_sched(struct tasklet_struct *t)
750{
751 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
752 wake_up_var(&t->state);
753 return true;
754 }
755
756 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
757 t->use_callback ? "callback" : "func",
758 t->use_callback ? (void *)t->callback : (void *)t->func);
759
760 return false;
761}
762
763static void tasklet_action_common(struct softirq_action *a,
764 struct tasklet_head *tl_head,
765 unsigned int softirq_nr)
766{
767 struct tasklet_struct *list;
768
769 local_irq_disable();
770 list = tl_head->head;
771 tl_head->head = NULL;
772 tl_head->tail = &tl_head->head;
773 local_irq_enable();
774
775 while (list) {
776 struct tasklet_struct *t = list;
777
778 list = list->next;
779
780 if (tasklet_trylock(t)) {
781 if (!atomic_read(&t->count)) {
782 if (tasklet_clear_sched(t)) {
783 if (t->use_callback) {
784 trace_tasklet_entry(t, t->callback);
785 t->callback(t);
786 trace_tasklet_exit(t, t->callback);
787 } else {
788 trace_tasklet_entry(t, t->func);
789 t->func(t->data);
790 trace_tasklet_exit(t, t->func);
791 }
792 }
793 tasklet_unlock(t);
794 continue;
795 }
796 tasklet_unlock(t);
797 }
798
799 local_irq_disable();
800 t->next = NULL;
801 *tl_head->tail = t;
802 tl_head->tail = &t->next;
803 __raise_softirq_irqoff(softirq_nr);
804 local_irq_enable();
805 }
806}
807
808static __latent_entropy void tasklet_action(struct softirq_action *a)
809{
810 workqueue_softirq_action(false);
811 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
812}
813
814static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
815{
816 workqueue_softirq_action(true);
817 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
818}
819
820void tasklet_setup(struct tasklet_struct *t,
821 void (*callback)(struct tasklet_struct *))
822{
823 t->next = NULL;
824 t->state = 0;
825 atomic_set(&t->count, 0);
826 t->callback = callback;
827 t->use_callback = true;
828 t->data = 0;
829}
830EXPORT_SYMBOL(tasklet_setup);
831
832void tasklet_init(struct tasklet_struct *t,
833 void (*func)(unsigned long), unsigned long data)
834{
835 t->next = NULL;
836 t->state = 0;
837 atomic_set(&t->count, 0);
838 t->func = func;
839 t->use_callback = false;
840 t->data = data;
841}
842EXPORT_SYMBOL(tasklet_init);
843
844#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
845/*
846 * Do not use in new code. Waiting for tasklets from atomic contexts is
847 * error prone and should be avoided.
848 */
849void tasklet_unlock_spin_wait(struct tasklet_struct *t)
850{
851 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
852 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
853 /*
854 * Prevent a live lock when current preempted soft
855 * interrupt processing or prevents ksoftirqd from
856 * running. If the tasklet runs on a different CPU
857 * then this has no effect other than doing the BH
858 * disable/enable dance for nothing.
859 */
860 local_bh_disable();
861 local_bh_enable();
862 } else {
863 cpu_relax();
864 }
865 }
866}
867EXPORT_SYMBOL(tasklet_unlock_spin_wait);
868#endif
869
870void tasklet_kill(struct tasklet_struct *t)
871{
872 if (in_interrupt())
873 pr_notice("Attempt to kill tasklet from interrupt\n");
874
875 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
876 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
877
878 tasklet_unlock_wait(t);
879 tasklet_clear_sched(t);
880}
881EXPORT_SYMBOL(tasklet_kill);
882
883#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
884void tasklet_unlock(struct tasklet_struct *t)
885{
886 smp_mb__before_atomic();
887 clear_bit(TASKLET_STATE_RUN, &t->state);
888 smp_mb__after_atomic();
889 wake_up_var(&t->state);
890}
891EXPORT_SYMBOL_GPL(tasklet_unlock);
892
893void tasklet_unlock_wait(struct tasklet_struct *t)
894{
895 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
896}
897EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
898#endif
899
900void __init softirq_init(void)
901{
902 int cpu;
903
904 for_each_possible_cpu(cpu) {
905 per_cpu(tasklet_vec, cpu).tail =
906 &per_cpu(tasklet_vec, cpu).head;
907 per_cpu(tasklet_hi_vec, cpu).tail =
908 &per_cpu(tasklet_hi_vec, cpu).head;
909 }
910
911 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
912 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
913}
914
915static int ksoftirqd_should_run(unsigned int cpu)
916{
917 return local_softirq_pending();
918}
919
920static void run_ksoftirqd(unsigned int cpu)
921{
922 ksoftirqd_run_begin();
923 if (local_softirq_pending()) {
924 /*
925 * We can safely run softirq on inline stack, as we are not deep
926 * in the task stack here.
927 */
928 handle_softirqs(true);
929 ksoftirqd_run_end();
930 cond_resched();
931 return;
932 }
933 ksoftirqd_run_end();
934}
935
936#ifdef CONFIG_HOTPLUG_CPU
937static int takeover_tasklets(unsigned int cpu)
938{
939 workqueue_softirq_dead(cpu);
940
941 /* CPU is dead, so no lock needed. */
942 local_irq_disable();
943
944 /* Find end, append list for that CPU. */
945 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
946 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
947 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
948 per_cpu(tasklet_vec, cpu).head = NULL;
949 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
950 }
951 raise_softirq_irqoff(TASKLET_SOFTIRQ);
952
953 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
954 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
955 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
956 per_cpu(tasklet_hi_vec, cpu).head = NULL;
957 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
958 }
959 raise_softirq_irqoff(HI_SOFTIRQ);
960
961 local_irq_enable();
962 return 0;
963}
964#else
965#define takeover_tasklets NULL
966#endif /* CONFIG_HOTPLUG_CPU */
967
968static struct smp_hotplug_thread softirq_threads = {
969 .store = &ksoftirqd,
970 .thread_should_run = ksoftirqd_should_run,
971 .thread_fn = run_ksoftirqd,
972 .thread_comm = "ksoftirqd/%u",
973};
974
975static __init int spawn_ksoftirqd(void)
976{
977 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
978 takeover_tasklets);
979 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
980
981 return 0;
982}
983early_initcall(spawn_ksoftirqd);
984
985/*
986 * [ These __weak aliases are kept in a separate compilation unit, so that
987 * GCC does not inline them incorrectly. ]
988 */
989
990int __init __weak early_irq_init(void)
991{
992 return 0;
993}
994
995int __init __weak arch_probe_nr_irqs(void)
996{
997 return NR_IRQS_LEGACY;
998}
999
1000int __init __weak arch_early_irq_init(void)
1001{
1002 return 0;
1003}
1004
1005unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1006{
1007 return from;
1008}