Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *	linux/kernel/softirq.c
  3 *
  4 *	Copyright (C) 1992 Linus Torvalds
  5 *
  6 *	Distribute under GPLv2.
  7 *
  8 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12
 13#include <linux/export.h>
 14#include <linux/kernel_stat.h>
 15#include <linux/interrupt.h>
 16#include <linux/init.h>
 
 17#include <linux/mm.h>
 18#include <linux/notifier.h>
 19#include <linux/percpu.h>
 20#include <linux/cpu.h>
 21#include <linux/freezer.h>
 22#include <linux/kthread.h>
 23#include <linux/rcupdate.h>
 24#include <linux/ftrace.h>
 25#include <linux/smp.h>
 26#include <linux/smpboot.h>
 27#include <linux/tick.h>
 28#include <linux/irq.h>
 
 
 
 29
 30#define CREATE_TRACE_POINTS
 31#include <trace/events/irq.h>
 32
 33/*
 34   - No shared variables, all the data are CPU local.
 35   - If a softirq needs serialization, let it serialize itself
 36     by its own spinlocks.
 37   - Even if softirq is serialized, only local cpu is marked for
 38     execution. Hence, we get something sort of weak cpu binding.
 39     Though it is still not clear, will it result in better locality
 40     or will not.
 41
 42   Examples:
 43   - NET RX softirq. It is multithreaded and does not require
 44     any global serialization.
 45   - NET TX softirq. It kicks software netdevice queues, hence
 46     it is logically serialized per device, but this serialization
 47     is invisible to common code.
 48   - Tasklets: serialized wrt itself.
 49 */
 50
 51#ifndef __ARCH_IRQ_STAT
 52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
 53EXPORT_SYMBOL(irq_stat);
 54#endif
 55
 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 57
 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 59
 60const char * const softirq_to_name[NR_SOFTIRQS] = {
 61	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
 62	"TASKLET", "SCHED", "HRTIMER", "RCU"
 63};
 64
 65/*
 66 * we cannot loop indefinitely here to avoid userspace starvation,
 67 * but we also don't want to introduce a worst case 1/HZ latency
 68 * to the pending events, so lets the scheduler to balance
 69 * the softirq load for us.
 70 */
 71static void wakeup_softirqd(void)
 72{
 73	/* Interrupts are disabled: no need to stop preemption */
 74	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 75
 76	if (tsk && tsk->state != TASK_RUNNING)
 77		wake_up_process(tsk);
 78}
 79
 80/*
 81 * preempt_count and SOFTIRQ_OFFSET usage:
 82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 83 *   softirq processing.
 84 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85 *   on local_bh_disable or local_bh_enable.
 
 86 * This lets us distinguish between whether we are currently processing
 87 * softirq and whether we just have bh disabled.
 88 */
 
 89
 90/*
 91 * This one is for softirq.c-internal use,
 92 * where hardirqs are disabled legitimately:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93 */
 94#ifdef CONFIG_TRACE_IRQFLAGS
 95void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 96{
 97	unsigned long flags;
 98
 99	WARN_ON_ONCE(in_irq());
100
101	raw_local_irq_save(flags);
102	/*
103	 * The preempt tracer hooks into preempt_count_add and will break
104	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
105	 * is set and before current->softirq_enabled is cleared.
106	 * We must manually increment preempt_count here and manually
107	 * call the trace_preempt_off later.
108	 */
109	__preempt_count_add(cnt);
110	/*
111	 * Were softirqs turned off above:
112	 */
113	if (softirq_count() == (cnt & SOFTIRQ_MASK))
114		trace_softirqs_off(ip);
115	raw_local_irq_restore(flags);
116
117	if (preempt_count() == cnt) {
118#ifdef CONFIG_DEBUG_PREEMPT
119		current->preempt_disable_ip = get_lock_parent_ip();
120#endif
121		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
122	}
123}
124EXPORT_SYMBOL(__local_bh_disable_ip);
125#endif /* CONFIG_TRACE_IRQFLAGS */
126
127static void __local_bh_enable(unsigned int cnt)
128{
129	WARN_ON_ONCE(!irqs_disabled());
 
 
 
130
131	if (softirq_count() == (cnt & SOFTIRQ_MASK))
132		trace_softirqs_on(_RET_IP_);
133	preempt_count_sub(cnt);
 
134}
135
136/*
137 * Special-case - softirqs can safely be enabled in
138 * cond_resched_softirq(), or by __do_softirq(),
139 * without processing still-pending softirqs:
140 */
141void _local_bh_enable(void)
142{
143	WARN_ON_ONCE(in_irq());
144	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
145}
146EXPORT_SYMBOL(_local_bh_enable);
147
148void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
149{
150	WARN_ON_ONCE(in_irq() || irqs_disabled());
 
151#ifdef CONFIG_TRACE_IRQFLAGS
152	local_irq_disable();
153#endif
154	/*
155	 * Are softirqs going to be turned on now:
156	 */
157	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
158		trace_softirqs_on(ip);
159	/*
160	 * Keep preemption disabled until we are done with
161	 * softirq processing:
162	 */
163	preempt_count_sub(cnt - 1);
164
165	if (unlikely(!in_interrupt() && local_softirq_pending())) {
166		/*
167		 * Run softirq if any pending. And do it in its own stack
168		 * as we may be calling this deep in a task call stack already.
169		 */
170		do_softirq();
171	}
172
173	preempt_count_dec();
174#ifdef CONFIG_TRACE_IRQFLAGS
175	local_irq_enable();
176#endif
177	preempt_check_resched();
178}
179EXPORT_SYMBOL(__local_bh_enable_ip);
180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181/*
182 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
183 * but break the loop if need_resched() is set or after 2 ms.
184 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
185 * certain cases, such as stop_machine(), jiffies may cease to
186 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
187 * well to make sure we eventually return from this method.
188 *
189 * These limits have been established via experimentation.
190 * The two things to balance is latency against fairness -
191 * we want to handle softirqs as soon as possible, but they
192 * should not be able to lock up the box.
193 */
194#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
195#define MAX_SOFTIRQ_RESTART 10
196
197#ifdef CONFIG_TRACE_IRQFLAGS
198/*
199 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
200 * to keep the lockdep irq context tracking as tight as possible in order to
201 * not miss-qualify lock contexts and miss possible deadlocks.
202 */
203
204static inline bool lockdep_softirq_start(void)
205{
206	bool in_hardirq = false;
207
208	if (trace_hardirq_context(current)) {
209		in_hardirq = true;
210		trace_hardirq_exit();
211	}
212
213	lockdep_softirq_enter();
214
215	return in_hardirq;
216}
217
218static inline void lockdep_softirq_end(bool in_hardirq)
219{
220	lockdep_softirq_exit();
221
222	if (in_hardirq)
223		trace_hardirq_enter();
224}
225#else
226static inline bool lockdep_softirq_start(void) { return false; }
227static inline void lockdep_softirq_end(bool in_hardirq) { }
228#endif
229
230asmlinkage __visible void __softirq_entry __do_softirq(void)
231{
232	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
233	unsigned long old_flags = current->flags;
234	int max_restart = MAX_SOFTIRQ_RESTART;
235	struct softirq_action *h;
236	bool in_hardirq;
237	__u32 pending;
238	int softirq_bit;
239
240	/*
241	 * Mask out PF_MEMALLOC s current task context is borrowed for the
242	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
243	 * again if the socket is related to swap
244	 */
245	current->flags &= ~PF_MEMALLOC;
246
247	pending = local_softirq_pending();
248	account_irq_enter_time(current);
249
250	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
251	in_hardirq = lockdep_softirq_start();
 
252
253restart:
254	/* Reset the pending bitmask before enabling irqs */
255	set_softirq_pending(0);
256
257	local_irq_enable();
258
259	h = softirq_vec;
260
261	while ((softirq_bit = ffs(pending))) {
262		unsigned int vec_nr;
263		int prev_count;
264
265		h += softirq_bit - 1;
266
267		vec_nr = h - softirq_vec;
268		prev_count = preempt_count();
269
270		kstat_incr_softirqs_this_cpu(vec_nr);
271
272		trace_softirq_entry(vec_nr);
273		h->action(h);
274		trace_softirq_exit(vec_nr);
275		if (unlikely(prev_count != preempt_count())) {
276			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
277			       vec_nr, softirq_to_name[vec_nr], h->action,
278			       prev_count, preempt_count());
279			preempt_count_set(prev_count);
280		}
281		h++;
282		pending >>= softirq_bit;
283	}
284
285	rcu_bh_qs();
 
 
 
286	local_irq_disable();
287
288	pending = local_softirq_pending();
289	if (pending) {
290		if (time_before(jiffies, end) && !need_resched() &&
291		    --max_restart)
292			goto restart;
293
294		wakeup_softirqd();
295	}
296
 
297	lockdep_softirq_end(in_hardirq);
298	account_irq_exit_time(current);
299	__local_bh_enable(SOFTIRQ_OFFSET);
300	WARN_ON_ONCE(in_interrupt());
301	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
302}
303
304asmlinkage __visible void do_softirq(void)
 
 
 
305{
306	__u32 pending;
307	unsigned long flags;
308
309	if (in_interrupt())
310		return;
311
312	local_irq_save(flags);
313
314	pending = local_softirq_pending();
315
316	if (pending)
317		do_softirq_own_stack();
318
319	local_irq_restore(flags);
320}
321
322/*
323 * Enter an interrupt context.
324 */
325void irq_enter(void)
326{
327	rcu_irq_enter();
328	if (is_idle_task(current) && !in_interrupt()) {
329		/*
330		 * Prevent raise_softirq from needlessly waking up ksoftirqd
331		 * here, as softirq will be serviced on return from interrupt.
332		 */
333		local_bh_disable();
334		tick_irq_enter();
335		_local_bh_enable();
336	}
337
338	__irq_enter();
339}
340
341static inline void invoke_softirq(void)
342{
343	if (!force_irqthreads) {
344#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
345		/*
346		 * We can safely execute softirq on the current stack if
347		 * it is the irq stack, because it should be near empty
348		 * at this stage.
349		 */
350		__do_softirq();
351#else
352		/*
353		 * Otherwise, irq_exit() is called on the task stack that can
354		 * be potentially deep already. So call softirq in its own stack
355		 * to prevent from any overrun.
356		 */
357		do_softirq_own_stack();
358#endif
359	} else {
360		wakeup_softirqd();
361	}
362}
363
364static inline void tick_irq_exit(void)
365{
366#ifdef CONFIG_NO_HZ_COMMON
367	int cpu = smp_processor_id();
368
369	/* Make sure that timer wheel updates are propagated */
370	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
371		if (!in_interrupt())
372			tick_nohz_irq_exit();
373	}
374#endif
375}
376
377/*
378 * Exit an interrupt context. Process softirqs if needed and possible:
379 */
380void irq_exit(void)
381{
382#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
383	local_irq_disable();
384#else
385	WARN_ON_ONCE(!irqs_disabled());
386#endif
387
388	account_irq_exit_time(current);
389	preempt_count_sub(HARDIRQ_OFFSET);
390	if (!in_interrupt() && local_softirq_pending())
391		invoke_softirq();
392
393	tick_irq_exit();
394	rcu_irq_exit();
395	trace_hardirq_exit(); /* must be last! */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396}
397
398/*
399 * This function must run with irqs disabled!
400 */
401inline void raise_softirq_irqoff(unsigned int nr)
402{
403	__raise_softirq_irqoff(nr);
404
405	/*
406	 * If we're in an interrupt or softirq, we're done
407	 * (this also catches softirq-disabled code). We will
408	 * actually run the softirq once we return from
409	 * the irq or softirq.
410	 *
411	 * Otherwise we wake up ksoftirqd to make sure we
412	 * schedule the softirq soon.
413	 */
414	if (!in_interrupt())
415		wakeup_softirqd();
416}
417
418void raise_softirq(unsigned int nr)
419{
420	unsigned long flags;
421
422	local_irq_save(flags);
423	raise_softirq_irqoff(nr);
424	local_irq_restore(flags);
425}
426
427void __raise_softirq_irqoff(unsigned int nr)
428{
 
429	trace_softirq_raise(nr);
430	or_softirq_pending(1UL << nr);
431}
432
433void open_softirq(int nr, void (*action)(struct softirq_action *))
434{
435	softirq_vec[nr].action = action;
436}
437
438/*
439 * Tasklets
440 */
441struct tasklet_head {
442	struct tasklet_struct *head;
443	struct tasklet_struct **tail;
444};
445
446static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
447static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
448
449void __tasklet_schedule(struct tasklet_struct *t)
 
 
450{
 
451	unsigned long flags;
452
453	local_irq_save(flags);
 
454	t->next = NULL;
455	*__this_cpu_read(tasklet_vec.tail) = t;
456	__this_cpu_write(tasklet_vec.tail, &(t->next));
457	raise_softirq_irqoff(TASKLET_SOFTIRQ);
458	local_irq_restore(flags);
459}
 
 
 
 
 
 
460EXPORT_SYMBOL(__tasklet_schedule);
461
462void __tasklet_hi_schedule(struct tasklet_struct *t)
463{
464	unsigned long flags;
465
466	local_irq_save(flags);
467	t->next = NULL;
468	*__this_cpu_read(tasklet_hi_vec.tail) = t;
469	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
470	raise_softirq_irqoff(HI_SOFTIRQ);
471	local_irq_restore(flags);
472}
473EXPORT_SYMBOL(__tasklet_hi_schedule);
474
475void __tasklet_hi_schedule_first(struct tasklet_struct *t)
476{
477	BUG_ON(!irqs_disabled());
 
 
 
 
 
 
 
478
479	t->next = __this_cpu_read(tasklet_hi_vec.head);
480	__this_cpu_write(tasklet_hi_vec.head, t);
481	__raise_softirq_irqoff(HI_SOFTIRQ);
482}
483EXPORT_SYMBOL(__tasklet_hi_schedule_first);
484
485static void tasklet_action(struct softirq_action *a)
 
 
486{
487	struct tasklet_struct *list;
488
489	local_irq_disable();
490	list = __this_cpu_read(tasklet_vec.head);
491	__this_cpu_write(tasklet_vec.head, NULL);
492	__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
493	local_irq_enable();
494
495	while (list) {
496		struct tasklet_struct *t = list;
497
498		list = list->next;
499
500		if (tasklet_trylock(t)) {
501			if (!atomic_read(&t->count)) {
502				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
503							&t->state))
504					BUG();
505				t->func(t->data);
 
 
506				tasklet_unlock(t);
507				continue;
508			}
509			tasklet_unlock(t);
510		}
511
512		local_irq_disable();
513		t->next = NULL;
514		*__this_cpu_read(tasklet_vec.tail) = t;
515		__this_cpu_write(tasklet_vec.tail, &(t->next));
516		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
517		local_irq_enable();
518	}
519}
520
521static void tasklet_hi_action(struct softirq_action *a)
522{
523	struct tasklet_struct *list;
524
525	local_irq_disable();
526	list = __this_cpu_read(tasklet_hi_vec.head);
527	__this_cpu_write(tasklet_hi_vec.head, NULL);
528	__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
529	local_irq_enable();
530
531	while (list) {
532		struct tasklet_struct *t = list;
533
534		list = list->next;
535
536		if (tasklet_trylock(t)) {
537			if (!atomic_read(&t->count)) {
538				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
539							&t->state))
540					BUG();
541				t->func(t->data);
542				tasklet_unlock(t);
543				continue;
544			}
545			tasklet_unlock(t);
546		}
547
548		local_irq_disable();
549		t->next = NULL;
550		*__this_cpu_read(tasklet_hi_vec.tail) = t;
551		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
552		__raise_softirq_irqoff(HI_SOFTIRQ);
553		local_irq_enable();
554	}
 
 
555}
 
556
557void tasklet_init(struct tasklet_struct *t,
558		  void (*func)(unsigned long), unsigned long data)
559{
560	t->next = NULL;
561	t->state = 0;
562	atomic_set(&t->count, 0);
563	t->func = func;
 
564	t->data = data;
565}
566EXPORT_SYMBOL(tasklet_init);
567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568void tasklet_kill(struct tasklet_struct *t)
569{
570	if (in_interrupt())
571		pr_notice("Attempt to kill tasklet from interrupt\n");
572
573	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
574		do {
575			yield();
576		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
577	}
578	tasklet_unlock_wait(t);
579	clear_bit(TASKLET_STATE_SCHED, &t->state);
580}
581EXPORT_SYMBOL(tasklet_kill);
582
583/*
584 * tasklet_hrtimer
585 */
586
587/*
588 * The trampoline is called when the hrtimer expires. It schedules a tasklet
589 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
590 * hrtimer callback, but from softirq context.
591 */
592static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
593{
594	struct tasklet_hrtimer *ttimer =
595		container_of(timer, struct tasklet_hrtimer, timer);
596
597	tasklet_hi_schedule(&ttimer->tasklet);
598	return HRTIMER_NORESTART;
599}
 
600
601/*
602 * Helper function which calls the hrtimer callback from
603 * tasklet/softirq context
604 */
605static void __tasklet_hrtimer_trampoline(unsigned long data)
606{
607	struct tasklet_hrtimer *ttimer = (void *)data;
608	enum hrtimer_restart restart;
609
610	restart = ttimer->function(&ttimer->timer);
611	if (restart != HRTIMER_NORESTART)
612		hrtimer_restart(&ttimer->timer);
613}
614
615/**
616 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
617 * @ttimer:	 tasklet_hrtimer which is initialized
618 * @function:	 hrtimer callback function which gets called from softirq context
619 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
620 * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
621 */
622void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
623			  enum hrtimer_restart (*function)(struct hrtimer *),
624			  clockid_t which_clock, enum hrtimer_mode mode)
625{
626	hrtimer_init(&ttimer->timer, which_clock, mode);
627	ttimer->timer.function = __hrtimer_tasklet_trampoline;
628	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
629		     (unsigned long)ttimer);
630	ttimer->function = function;
631}
632EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
633
634void __init softirq_init(void)
635{
636	int cpu;
637
638	for_each_possible_cpu(cpu) {
639		per_cpu(tasklet_vec, cpu).tail =
640			&per_cpu(tasklet_vec, cpu).head;
641		per_cpu(tasklet_hi_vec, cpu).tail =
642			&per_cpu(tasklet_hi_vec, cpu).head;
643	}
644
645	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
646	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
647}
648
649static int ksoftirqd_should_run(unsigned int cpu)
650{
651	return local_softirq_pending();
652}
653
654static void run_ksoftirqd(unsigned int cpu)
655{
656	local_irq_disable();
657	if (local_softirq_pending()) {
658		/*
659		 * We can safely run softirq on inline stack, as we are not deep
660		 * in the task stack here.
661		 */
662		__do_softirq();
663		local_irq_enable();
664		cond_resched_rcu_qs();
665		return;
666	}
667	local_irq_enable();
668}
669
670#ifdef CONFIG_HOTPLUG_CPU
671/*
672 * tasklet_kill_immediate is called to remove a tasklet which can already be
673 * scheduled for execution on @cpu.
674 *
675 * Unlike tasklet_kill, this function removes the tasklet
676 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
677 *
678 * When this function is called, @cpu must be in the CPU_DEAD state.
679 */
680void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
681{
682	struct tasklet_struct **i;
683
684	BUG_ON(cpu_online(cpu));
685	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
686
687	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
688		return;
689
690	/* CPU is dead, so no lock needed. */
691	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
692		if (*i == t) {
693			*i = t->next;
694			/* If this was the tail element, move the tail ptr */
695			if (*i == NULL)
696				per_cpu(tasklet_vec, cpu).tail = i;
697			return;
698		}
699	}
700	BUG();
701}
702
703static void takeover_tasklets(unsigned int cpu)
704{
705	/* CPU is dead, so no lock needed. */
706	local_irq_disable();
707
708	/* Find end, append list for that CPU. */
709	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
710		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
711		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
712		per_cpu(tasklet_vec, cpu).head = NULL;
713		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
714	}
715	raise_softirq_irqoff(TASKLET_SOFTIRQ);
716
717	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
718		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
719		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
720		per_cpu(tasklet_hi_vec, cpu).head = NULL;
721		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
722	}
723	raise_softirq_irqoff(HI_SOFTIRQ);
724
725	local_irq_enable();
 
726}
 
 
727#endif /* CONFIG_HOTPLUG_CPU */
728
729static int cpu_callback(struct notifier_block *nfb, unsigned long action,
730			void *hcpu)
731{
732	switch (action) {
733#ifdef CONFIG_HOTPLUG_CPU
734	case CPU_DEAD:
735	case CPU_DEAD_FROZEN:
736		takeover_tasklets((unsigned long)hcpu);
737		break;
738#endif /* CONFIG_HOTPLUG_CPU */
739	}
740	return NOTIFY_OK;
741}
742
743static struct notifier_block cpu_nfb = {
744	.notifier_call = cpu_callback
745};
746
747static struct smp_hotplug_thread softirq_threads = {
748	.store			= &ksoftirqd,
749	.thread_should_run	= ksoftirqd_should_run,
750	.thread_fn		= run_ksoftirqd,
751	.thread_comm		= "ksoftirqd/%u",
752};
753
754static __init int spawn_ksoftirqd(void)
755{
756	register_cpu_notifier(&cpu_nfb);
757
758	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
759
760	return 0;
761}
762early_initcall(spawn_ksoftirqd);
763
764/*
765 * [ These __weak aliases are kept in a separate compilation unit, so that
766 *   GCC does not inline them incorrectly. ]
767 */
768
769int __init __weak early_irq_init(void)
770{
771	return 0;
772}
773
774int __init __weak arch_probe_nr_irqs(void)
775{
776	return NR_IRQS_LEGACY;
777}
778
779int __init __weak arch_early_irq_init(void)
780{
781	return 0;
782}
783
784unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
785{
786	return from;
787}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	linux/kernel/softirq.c
   4 *
   5 *	Copyright (C) 1992 Linus Torvalds
   6 *
 
 
   7 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/export.h>
  13#include <linux/kernel_stat.h>
  14#include <linux/interrupt.h>
  15#include <linux/init.h>
  16#include <linux/local_lock.h>
  17#include <linux/mm.h>
  18#include <linux/notifier.h>
  19#include <linux/percpu.h>
  20#include <linux/cpu.h>
  21#include <linux/freezer.h>
  22#include <linux/kthread.h>
  23#include <linux/rcupdate.h>
  24#include <linux/ftrace.h>
  25#include <linux/smp.h>
  26#include <linux/smpboot.h>
  27#include <linux/tick.h>
  28#include <linux/irq.h>
  29#include <linux/wait_bit.h>
  30
  31#include <asm/softirq_stack.h>
  32
  33#define CREATE_TRACE_POINTS
  34#include <trace/events/irq.h>
  35
  36/*
  37   - No shared variables, all the data are CPU local.
  38   - If a softirq needs serialization, let it serialize itself
  39     by its own spinlocks.
  40   - Even if softirq is serialized, only local cpu is marked for
  41     execution. Hence, we get something sort of weak cpu binding.
  42     Though it is still not clear, will it result in better locality
  43     or will not.
  44
  45   Examples:
  46   - NET RX softirq. It is multithreaded and does not require
  47     any global serialization.
  48   - NET TX softirq. It kicks software netdevice queues, hence
  49     it is logically serialized per device, but this serialization
  50     is invisible to common code.
  51   - Tasklets: serialized wrt itself.
  52 */
  53
  54#ifndef __ARCH_IRQ_STAT
  55DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
  56EXPORT_PER_CPU_SYMBOL(irq_stat);
  57#endif
  58
  59static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
  60
  61DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
  62
  63const char * const softirq_to_name[NR_SOFTIRQS] = {
  64	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
  65	"TASKLET", "SCHED", "HRTIMER", "RCU"
  66};
  67
  68/*
  69 * we cannot loop indefinitely here to avoid userspace starvation,
  70 * but we also don't want to introduce a worst case 1/HZ latency
  71 * to the pending events, so lets the scheduler to balance
  72 * the softirq load for us.
  73 */
  74static void wakeup_softirqd(void)
  75{
  76	/* Interrupts are disabled: no need to stop preemption */
  77	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
  78
  79	if (tsk)
  80		wake_up_process(tsk);
  81}
  82
  83/*
  84 * If ksoftirqd is scheduled, we do not want to process pending softirqs
  85 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
  86 * unless we're doing some of the synchronous softirqs.
  87 */
  88#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
  89static bool ksoftirqd_running(unsigned long pending)
  90{
  91	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
  92
  93	if (pending & SOFTIRQ_NOW_MASK)
  94		return false;
  95	return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
  96}
  97
  98#ifdef CONFIG_TRACE_IRQFLAGS
  99DEFINE_PER_CPU(int, hardirqs_enabled);
 100DEFINE_PER_CPU(int, hardirq_context);
 101EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
 102EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
 103#endif
 104
 105/*
 106 * SOFTIRQ_OFFSET usage:
 107 *
 108 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
 109 * to a per CPU counter and to task::softirqs_disabled_cnt.
 110 *
 111 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
 112 *   processing.
 113 *
 114 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 115 *   on local_bh_disable or local_bh_enable.
 116 *
 117 * This lets us distinguish between whether we are currently processing
 118 * softirq and whether we just have bh disabled.
 119 */
 120#ifdef CONFIG_PREEMPT_RT
 121
 122/*
 123 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
 124 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
 125 * softirq disabled section to be preempted.
 126 *
 127 * The per task counter is used for softirq_count(), in_softirq() and
 128 * in_serving_softirqs() because these counts are only valid when the task
 129 * holding softirq_ctrl::lock is running.
 130 *
 131 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
 132 * the task which is in a softirq disabled section is preempted or blocks.
 133 */
 134struct softirq_ctrl {
 135	local_lock_t	lock;
 136	int		cnt;
 137};
 138
 139static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
 140	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
 141};
 142
 143/**
 144 * local_bh_blocked() - Check for idle whether BH processing is blocked
 145 *
 146 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
 147 *
 148 * This is invoked from the idle task to guard against false positive
 149 * softirq pending warnings, which would happen when the task which holds
 150 * softirq_ctrl::lock was the only running task on the CPU and blocks on
 151 * some other lock.
 152 */
 153bool local_bh_blocked(void)
 154{
 155	return __this_cpu_read(softirq_ctrl.cnt) != 0;
 156}
 157
 158void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 159{
 160	unsigned long flags;
 161	int newcnt;
 162
 163	WARN_ON_ONCE(in_hardirq());
 164
 165	/* First entry of a task into a BH disabled section? */
 166	if (!current->softirq_disable_cnt) {
 167		if (preemptible()) {
 168			local_lock(&softirq_ctrl.lock);
 169			/* Required to meet the RCU bottomhalf requirements. */
 170			rcu_read_lock();
 171		} else {
 172			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
 173		}
 174	}
 175
 176	/*
 177	 * Track the per CPU softirq disabled state. On RT this is per CPU
 178	 * state to allow preemption of bottom half disabled sections.
 179	 */
 180	newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
 181	/*
 182	 * Reflect the result in the task state to prevent recursion on the
 183	 * local lock and to make softirq_count() & al work.
 184	 */
 185	current->softirq_disable_cnt = newcnt;
 186
 187	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
 188		raw_local_irq_save(flags);
 189		lockdep_softirqs_off(ip);
 190		raw_local_irq_restore(flags);
 191	}
 192}
 193EXPORT_SYMBOL(__local_bh_disable_ip);
 194
 195static void __local_bh_enable(unsigned int cnt, bool unlock)
 196{
 197	unsigned long flags;
 198	int newcnt;
 199
 200	DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
 201			    this_cpu_read(softirq_ctrl.cnt));
 202
 203	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
 204		raw_local_irq_save(flags);
 205		lockdep_softirqs_on(_RET_IP_);
 206		raw_local_irq_restore(flags);
 207	}
 208
 209	newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
 210	current->softirq_disable_cnt = newcnt;
 211
 212	if (!newcnt && unlock) {
 213		rcu_read_unlock();
 214		local_unlock(&softirq_ctrl.lock);
 215	}
 216}
 217
 218void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 219{
 220	bool preempt_on = preemptible();
 221	unsigned long flags;
 222	u32 pending;
 223	int curcnt;
 224
 225	WARN_ON_ONCE(in_hardirq());
 226	lockdep_assert_irqs_enabled();
 227
 228	local_irq_save(flags);
 229	curcnt = __this_cpu_read(softirq_ctrl.cnt);
 230
 231	/*
 232	 * If this is not reenabling soft interrupts, no point in trying to
 233	 * run pending ones.
 234	 */
 235	if (curcnt != cnt)
 236		goto out;
 237
 238	pending = local_softirq_pending();
 239	if (!pending || ksoftirqd_running(pending))
 240		goto out;
 241
 242	/*
 243	 * If this was called from non preemptible context, wake up the
 244	 * softirq daemon.
 245	 */
 246	if (!preempt_on) {
 247		wakeup_softirqd();
 248		goto out;
 249	}
 250
 251	/*
 252	 * Adjust softirq count to SOFTIRQ_OFFSET which makes
 253	 * in_serving_softirq() become true.
 254	 */
 255	cnt = SOFTIRQ_OFFSET;
 256	__local_bh_enable(cnt, false);
 257	__do_softirq();
 258
 259out:
 260	__local_bh_enable(cnt, preempt_on);
 261	local_irq_restore(flags);
 262}
 263EXPORT_SYMBOL(__local_bh_enable_ip);
 264
 265/*
 266 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
 267 * to acquire the per CPU local lock for reentrancy protection.
 268 */
 269static inline void ksoftirqd_run_begin(void)
 270{
 271	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
 272	local_irq_disable();
 273}
 274
 275/* Counterpart to ksoftirqd_run_begin() */
 276static inline void ksoftirqd_run_end(void)
 277{
 278	__local_bh_enable(SOFTIRQ_OFFSET, true);
 279	WARN_ON_ONCE(in_interrupt());
 280	local_irq_enable();
 281}
 282
 283static inline void softirq_handle_begin(void) { }
 284static inline void softirq_handle_end(void) { }
 285
 286static inline bool should_wake_ksoftirqd(void)
 287{
 288	return !this_cpu_read(softirq_ctrl.cnt);
 289}
 290
 291static inline void invoke_softirq(void)
 292{
 293	if (should_wake_ksoftirqd())
 294		wakeup_softirqd();
 295}
 296
 297/*
 298 * flush_smp_call_function_queue() can raise a soft interrupt in a function
 299 * call. On RT kernels this is undesired and the only known functionality
 300 * in the block layer which does this is disabled on RT. If soft interrupts
 301 * get raised which haven't been raised before the flush, warn so it can be
 302 * investigated.
 303 */
 304void do_softirq_post_smp_call_flush(unsigned int was_pending)
 305{
 306	if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
 307		invoke_softirq();
 308}
 309
 310#else /* CONFIG_PREEMPT_RT */
 311
 312/*
 313 * This one is for softirq.c-internal use, where hardirqs are disabled
 314 * legitimately:
 315 */
 316#ifdef CONFIG_TRACE_IRQFLAGS
 317void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 318{
 319	unsigned long flags;
 320
 321	WARN_ON_ONCE(in_hardirq());
 322
 323	raw_local_irq_save(flags);
 324	/*
 325	 * The preempt tracer hooks into preempt_count_add and will break
 326	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
 327	 * is set and before current->softirq_enabled is cleared.
 328	 * We must manually increment preempt_count here and manually
 329	 * call the trace_preempt_off later.
 330	 */
 331	__preempt_count_add(cnt);
 332	/*
 333	 * Were softirqs turned off above:
 334	 */
 335	if (softirq_count() == (cnt & SOFTIRQ_MASK))
 336		lockdep_softirqs_off(ip);
 337	raw_local_irq_restore(flags);
 338
 339	if (preempt_count() == cnt) {
 340#ifdef CONFIG_DEBUG_PREEMPT
 341		current->preempt_disable_ip = get_lock_parent_ip();
 342#endif
 343		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
 344	}
 345}
 346EXPORT_SYMBOL(__local_bh_disable_ip);
 347#endif /* CONFIG_TRACE_IRQFLAGS */
 348
 349static void __local_bh_enable(unsigned int cnt)
 350{
 351	lockdep_assert_irqs_disabled();
 352
 353	if (preempt_count() == cnt)
 354		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
 355
 356	if (softirq_count() == (cnt & SOFTIRQ_MASK))
 357		lockdep_softirqs_on(_RET_IP_);
 358
 359	__preempt_count_sub(cnt);
 360}
 361
 362/*
 363 * Special-case - softirqs can safely be enabled by __do_softirq(),
 
 364 * without processing still-pending softirqs:
 365 */
 366void _local_bh_enable(void)
 367{
 368	WARN_ON_ONCE(in_hardirq());
 369	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
 370}
 371EXPORT_SYMBOL(_local_bh_enable);
 372
 373void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 374{
 375	WARN_ON_ONCE(in_hardirq());
 376	lockdep_assert_irqs_enabled();
 377#ifdef CONFIG_TRACE_IRQFLAGS
 378	local_irq_disable();
 379#endif
 380	/*
 381	 * Are softirqs going to be turned on now:
 382	 */
 383	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
 384		lockdep_softirqs_on(ip);
 385	/*
 386	 * Keep preemption disabled until we are done with
 387	 * softirq processing:
 388	 */
 389	__preempt_count_sub(cnt - 1);
 390
 391	if (unlikely(!in_interrupt() && local_softirq_pending())) {
 392		/*
 393		 * Run softirq if any pending. And do it in its own stack
 394		 * as we may be calling this deep in a task call stack already.
 395		 */
 396		do_softirq();
 397	}
 398
 399	preempt_count_dec();
 400#ifdef CONFIG_TRACE_IRQFLAGS
 401	local_irq_enable();
 402#endif
 403	preempt_check_resched();
 404}
 405EXPORT_SYMBOL(__local_bh_enable_ip);
 406
 407static inline void softirq_handle_begin(void)
 408{
 409	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
 410}
 411
 412static inline void softirq_handle_end(void)
 413{
 414	__local_bh_enable(SOFTIRQ_OFFSET);
 415	WARN_ON_ONCE(in_interrupt());
 416}
 417
 418static inline void ksoftirqd_run_begin(void)
 419{
 420	local_irq_disable();
 421}
 422
 423static inline void ksoftirqd_run_end(void)
 424{
 425	local_irq_enable();
 426}
 427
 428static inline bool should_wake_ksoftirqd(void)
 429{
 430	return true;
 431}
 432
 433static inline void invoke_softirq(void)
 434{
 435	if (ksoftirqd_running(local_softirq_pending()))
 436		return;
 437
 438	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
 439#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 440		/*
 441		 * We can safely execute softirq on the current stack if
 442		 * it is the irq stack, because it should be near empty
 443		 * at this stage.
 444		 */
 445		__do_softirq();
 446#else
 447		/*
 448		 * Otherwise, irq_exit() is called on the task stack that can
 449		 * be potentially deep already. So call softirq in its own stack
 450		 * to prevent from any overrun.
 451		 */
 452		do_softirq_own_stack();
 453#endif
 454	} else {
 455		wakeup_softirqd();
 456	}
 457}
 458
 459asmlinkage __visible void do_softirq(void)
 460{
 461	__u32 pending;
 462	unsigned long flags;
 463
 464	if (in_interrupt())
 465		return;
 466
 467	local_irq_save(flags);
 468
 469	pending = local_softirq_pending();
 470
 471	if (pending && !ksoftirqd_running(pending))
 472		do_softirq_own_stack();
 473
 474	local_irq_restore(flags);
 475}
 476
 477#endif /* !CONFIG_PREEMPT_RT */
 478
 479/*
 480 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
 481 * but break the loop if need_resched() is set or after 2 ms.
 482 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
 483 * certain cases, such as stop_machine(), jiffies may cease to
 484 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
 485 * well to make sure we eventually return from this method.
 486 *
 487 * These limits have been established via experimentation.
 488 * The two things to balance is latency against fairness -
 489 * we want to handle softirqs as soon as possible, but they
 490 * should not be able to lock up the box.
 491 */
 492#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
 493#define MAX_SOFTIRQ_RESTART 10
 494
 495#ifdef CONFIG_TRACE_IRQFLAGS
 496/*
 497 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
 498 * to keep the lockdep irq context tracking as tight as possible in order to
 499 * not miss-qualify lock contexts and miss possible deadlocks.
 500 */
 501
 502static inline bool lockdep_softirq_start(void)
 503{
 504	bool in_hardirq = false;
 505
 506	if (lockdep_hardirq_context()) {
 507		in_hardirq = true;
 508		lockdep_hardirq_exit();
 509	}
 510
 511	lockdep_softirq_enter();
 512
 513	return in_hardirq;
 514}
 515
 516static inline void lockdep_softirq_end(bool in_hardirq)
 517{
 518	lockdep_softirq_exit();
 519
 520	if (in_hardirq)
 521		lockdep_hardirq_enter();
 522}
 523#else
 524static inline bool lockdep_softirq_start(void) { return false; }
 525static inline void lockdep_softirq_end(bool in_hardirq) { }
 526#endif
 527
 528asmlinkage __visible void __softirq_entry __do_softirq(void)
 529{
 530	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
 531	unsigned long old_flags = current->flags;
 532	int max_restart = MAX_SOFTIRQ_RESTART;
 533	struct softirq_action *h;
 534	bool in_hardirq;
 535	__u32 pending;
 536	int softirq_bit;
 537
 538	/*
 539	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
 540	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
 541	 * again if the socket is related to swapping.
 542	 */
 543	current->flags &= ~PF_MEMALLOC;
 544
 545	pending = local_softirq_pending();
 
 546
 547	softirq_handle_begin();
 548	in_hardirq = lockdep_softirq_start();
 549	account_softirq_enter(current);
 550
 551restart:
 552	/* Reset the pending bitmask before enabling irqs */
 553	set_softirq_pending(0);
 554
 555	local_irq_enable();
 556
 557	h = softirq_vec;
 558
 559	while ((softirq_bit = ffs(pending))) {
 560		unsigned int vec_nr;
 561		int prev_count;
 562
 563		h += softirq_bit - 1;
 564
 565		vec_nr = h - softirq_vec;
 566		prev_count = preempt_count();
 567
 568		kstat_incr_softirqs_this_cpu(vec_nr);
 569
 570		trace_softirq_entry(vec_nr);
 571		h->action(h);
 572		trace_softirq_exit(vec_nr);
 573		if (unlikely(prev_count != preempt_count())) {
 574			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
 575			       vec_nr, softirq_to_name[vec_nr], h->action,
 576			       prev_count, preempt_count());
 577			preempt_count_set(prev_count);
 578		}
 579		h++;
 580		pending >>= softirq_bit;
 581	}
 582
 583	if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
 584	    __this_cpu_read(ksoftirqd) == current)
 585		rcu_softirq_qs();
 586
 587	local_irq_disable();
 588
 589	pending = local_softirq_pending();
 590	if (pending) {
 591		if (time_before(jiffies, end) && !need_resched() &&
 592		    --max_restart)
 593			goto restart;
 594
 595		wakeup_softirqd();
 596	}
 597
 598	account_softirq_exit(current);
 599	lockdep_softirq_end(in_hardirq);
 600	softirq_handle_end();
 601	current_restore_flags(old_flags, PF_MEMALLOC);
 
 
 602}
 603
 604/**
 605 * irq_enter_rcu - Enter an interrupt context with RCU watching
 606 */
 607void irq_enter_rcu(void)
 608{
 609	__irq_enter_raw();
 
 
 
 
 610
 611	if (tick_nohz_full_cpu(smp_processor_id()) ||
 612	    (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
 613		tick_irq_enter();
 
 
 
 614
 615	account_hardirq_enter(current);
 616}
 617
 618/**
 619 * irq_enter - Enter an interrupt context including RCU update
 620 */
 621void irq_enter(void)
 622{
 623	ct_irq_enter();
 624	irq_enter_rcu();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625}
 626
 627static inline void tick_irq_exit(void)
 628{
 629#ifdef CONFIG_NO_HZ_COMMON
 630	int cpu = smp_processor_id();
 631
 632	/* Make sure that timer wheel updates are propagated */
 633	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
 634		if (!in_hardirq())
 635			tick_nohz_irq_exit();
 636	}
 637#endif
 638}
 639
 640static inline void __irq_exit_rcu(void)
 
 
 
 641{
 642#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
 643	local_irq_disable();
 644#else
 645	lockdep_assert_irqs_disabled();
 646#endif
 647	account_hardirq_exit(current);
 
 648	preempt_count_sub(HARDIRQ_OFFSET);
 649	if (!in_interrupt() && local_softirq_pending())
 650		invoke_softirq();
 651
 652	tick_irq_exit();
 653}
 654
 655/**
 656 * irq_exit_rcu() - Exit an interrupt context without updating RCU
 657 *
 658 * Also processes softirqs if needed and possible.
 659 */
 660void irq_exit_rcu(void)
 661{
 662	__irq_exit_rcu();
 663	 /* must be last! */
 664	lockdep_hardirq_exit();
 665}
 666
 667/**
 668 * irq_exit - Exit an interrupt context, update RCU and lockdep
 669 *
 670 * Also processes softirqs if needed and possible.
 671 */
 672void irq_exit(void)
 673{
 674	__irq_exit_rcu();
 675	ct_irq_exit();
 676	 /* must be last! */
 677	lockdep_hardirq_exit();
 678}
 679
 680/*
 681 * This function must run with irqs disabled!
 682 */
 683inline void raise_softirq_irqoff(unsigned int nr)
 684{
 685	__raise_softirq_irqoff(nr);
 686
 687	/*
 688	 * If we're in an interrupt or softirq, we're done
 689	 * (this also catches softirq-disabled code). We will
 690	 * actually run the softirq once we return from
 691	 * the irq or softirq.
 692	 *
 693	 * Otherwise we wake up ksoftirqd to make sure we
 694	 * schedule the softirq soon.
 695	 */
 696	if (!in_interrupt() && should_wake_ksoftirqd())
 697		wakeup_softirqd();
 698}
 699
 700void raise_softirq(unsigned int nr)
 701{
 702	unsigned long flags;
 703
 704	local_irq_save(flags);
 705	raise_softirq_irqoff(nr);
 706	local_irq_restore(flags);
 707}
 708
 709void __raise_softirq_irqoff(unsigned int nr)
 710{
 711	lockdep_assert_irqs_disabled();
 712	trace_softirq_raise(nr);
 713	or_softirq_pending(1UL << nr);
 714}
 715
 716void open_softirq(int nr, void (*action)(struct softirq_action *))
 717{
 718	softirq_vec[nr].action = action;
 719}
 720
 721/*
 722 * Tasklets
 723 */
 724struct tasklet_head {
 725	struct tasklet_struct *head;
 726	struct tasklet_struct **tail;
 727};
 728
 729static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
 730static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
 731
 732static void __tasklet_schedule_common(struct tasklet_struct *t,
 733				      struct tasklet_head __percpu *headp,
 734				      unsigned int softirq_nr)
 735{
 736	struct tasklet_head *head;
 737	unsigned long flags;
 738
 739	local_irq_save(flags);
 740	head = this_cpu_ptr(headp);
 741	t->next = NULL;
 742	*head->tail = t;
 743	head->tail = &(t->next);
 744	raise_softirq_irqoff(softirq_nr);
 745	local_irq_restore(flags);
 746}
 747
 748void __tasklet_schedule(struct tasklet_struct *t)
 749{
 750	__tasklet_schedule_common(t, &tasklet_vec,
 751				  TASKLET_SOFTIRQ);
 752}
 753EXPORT_SYMBOL(__tasklet_schedule);
 754
 755void __tasklet_hi_schedule(struct tasklet_struct *t)
 756{
 757	__tasklet_schedule_common(t, &tasklet_hi_vec,
 758				  HI_SOFTIRQ);
 
 
 
 
 
 
 759}
 760EXPORT_SYMBOL(__tasklet_hi_schedule);
 761
 762static bool tasklet_clear_sched(struct tasklet_struct *t)
 763{
 764	if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
 765		wake_up_var(&t->state);
 766		return true;
 767	}
 768
 769	WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
 770		  t->use_callback ? "callback" : "func",
 771		  t->use_callback ? (void *)t->callback : (void *)t->func);
 772
 773	return false;
 
 
 774}
 
 775
 776static void tasklet_action_common(struct softirq_action *a,
 777				  struct tasklet_head *tl_head,
 778				  unsigned int softirq_nr)
 779{
 780	struct tasklet_struct *list;
 781
 782	local_irq_disable();
 783	list = tl_head->head;
 784	tl_head->head = NULL;
 785	tl_head->tail = &tl_head->head;
 786	local_irq_enable();
 787
 788	while (list) {
 789		struct tasklet_struct *t = list;
 790
 791		list = list->next;
 792
 793		if (tasklet_trylock(t)) {
 794			if (!atomic_read(&t->count)) {
 795				if (tasklet_clear_sched(t)) {
 796					if (t->use_callback)
 797						t->callback(t);
 798					else
 799						t->func(t->data);
 800				}
 801				tasklet_unlock(t);
 802				continue;
 803			}
 804			tasklet_unlock(t);
 805		}
 806
 807		local_irq_disable();
 808		t->next = NULL;
 809		*tl_head->tail = t;
 810		tl_head->tail = &t->next;
 811		__raise_softirq_irqoff(softirq_nr);
 812		local_irq_enable();
 813	}
 814}
 815
 816static __latent_entropy void tasklet_action(struct softirq_action *a)
 817{
 818	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
 819}
 
 
 
 
 
 
 
 
 
 
 820
 821static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
 822{
 823	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
 824}
 
 
 
 
 
 
 
 825
 826void tasklet_setup(struct tasklet_struct *t,
 827		   void (*callback)(struct tasklet_struct *))
 828{
 829	t->next = NULL;
 830	t->state = 0;
 831	atomic_set(&t->count, 0);
 832	t->callback = callback;
 833	t->use_callback = true;
 834	t->data = 0;
 835}
 836EXPORT_SYMBOL(tasklet_setup);
 837
 838void tasklet_init(struct tasklet_struct *t,
 839		  void (*func)(unsigned long), unsigned long data)
 840{
 841	t->next = NULL;
 842	t->state = 0;
 843	atomic_set(&t->count, 0);
 844	t->func = func;
 845	t->use_callback = false;
 846	t->data = data;
 847}
 848EXPORT_SYMBOL(tasklet_init);
 849
 850#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 851/*
 852 * Do not use in new code. Waiting for tasklets from atomic contexts is
 853 * error prone and should be avoided.
 854 */
 855void tasklet_unlock_spin_wait(struct tasklet_struct *t)
 856{
 857	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
 858		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
 859			/*
 860			 * Prevent a live lock when current preempted soft
 861			 * interrupt processing or prevents ksoftirqd from
 862			 * running. If the tasklet runs on a different CPU
 863			 * then this has no effect other than doing the BH
 864			 * disable/enable dance for nothing.
 865			 */
 866			local_bh_disable();
 867			local_bh_enable();
 868		} else {
 869			cpu_relax();
 870		}
 871	}
 872}
 873EXPORT_SYMBOL(tasklet_unlock_spin_wait);
 874#endif
 875
 876void tasklet_kill(struct tasklet_struct *t)
 877{
 878	if (in_interrupt())
 879		pr_notice("Attempt to kill tasklet from interrupt\n");
 880
 881	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
 882		wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
 883
 
 
 884	tasklet_unlock_wait(t);
 885	tasklet_clear_sched(t);
 886}
 887EXPORT_SYMBOL(tasklet_kill);
 888
 889#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 890void tasklet_unlock(struct tasklet_struct *t)
 
 
 
 
 
 
 
 
 891{
 892	smp_mb__before_atomic();
 893	clear_bit(TASKLET_STATE_RUN, &t->state);
 894	smp_mb__after_atomic();
 895	wake_up_var(&t->state);
 
 896}
 897EXPORT_SYMBOL_GPL(tasklet_unlock);
 898
 899void tasklet_unlock_wait(struct tasklet_struct *t)
 
 
 
 
 900{
 901	wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
 
 
 
 
 
 902}
 903EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
 904#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905
 906void __init softirq_init(void)
 907{
 908	int cpu;
 909
 910	for_each_possible_cpu(cpu) {
 911		per_cpu(tasklet_vec, cpu).tail =
 912			&per_cpu(tasklet_vec, cpu).head;
 913		per_cpu(tasklet_hi_vec, cpu).tail =
 914			&per_cpu(tasklet_hi_vec, cpu).head;
 915	}
 916
 917	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
 918	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 919}
 920
 921static int ksoftirqd_should_run(unsigned int cpu)
 922{
 923	return local_softirq_pending();
 924}
 925
 926static void run_ksoftirqd(unsigned int cpu)
 927{
 928	ksoftirqd_run_begin();
 929	if (local_softirq_pending()) {
 930		/*
 931		 * We can safely run softirq on inline stack, as we are not deep
 932		 * in the task stack here.
 933		 */
 934		__do_softirq();
 935		ksoftirqd_run_end();
 936		cond_resched();
 937		return;
 938	}
 939	ksoftirqd_run_end();
 940}
 941
 942#ifdef CONFIG_HOTPLUG_CPU
 943static int takeover_tasklets(unsigned int cpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944{
 945	/* CPU is dead, so no lock needed. */
 946	local_irq_disable();
 947
 948	/* Find end, append list for that CPU. */
 949	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
 950		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
 951		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
 952		per_cpu(tasklet_vec, cpu).head = NULL;
 953		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
 954	}
 955	raise_softirq_irqoff(TASKLET_SOFTIRQ);
 956
 957	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
 958		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
 959		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
 960		per_cpu(tasklet_hi_vec, cpu).head = NULL;
 961		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
 962	}
 963	raise_softirq_irqoff(HI_SOFTIRQ);
 964
 965	local_irq_enable();
 966	return 0;
 967}
 968#else
 969#define takeover_tasklets	NULL
 970#endif /* CONFIG_HOTPLUG_CPU */
 971
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972static struct smp_hotplug_thread softirq_threads = {
 973	.store			= &ksoftirqd,
 974	.thread_should_run	= ksoftirqd_should_run,
 975	.thread_fn		= run_ksoftirqd,
 976	.thread_comm		= "ksoftirqd/%u",
 977};
 978
 979static __init int spawn_ksoftirqd(void)
 980{
 981	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
 982				  takeover_tasklets);
 983	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
 984
 985	return 0;
 986}
 987early_initcall(spawn_ksoftirqd);
 988
 989/*
 990 * [ These __weak aliases are kept in a separate compilation unit, so that
 991 *   GCC does not inline them incorrectly. ]
 992 */
 993
 994int __init __weak early_irq_init(void)
 995{
 996	return 0;
 997}
 998
 999int __init __weak arch_probe_nr_irqs(void)
1000{
1001	return NR_IRQS_LEGACY;
1002}
1003
1004int __init __weak arch_early_irq_init(void)
1005{
1006	return 0;
1007}
1008
1009unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1010{
1011	return from;
1012}