Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 *	linux/kernel/softirq.c
  3 *
  4 *	Copyright (C) 1992 Linus Torvalds
  5 *
  6 *	Distribute under GPLv2.
  7 *
  8 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12
 13#include <linux/export.h>
 14#include <linux/kernel_stat.h>
 15#include <linux/interrupt.h>
 16#include <linux/init.h>
 
 17#include <linux/mm.h>
 18#include <linux/notifier.h>
 19#include <linux/percpu.h>
 20#include <linux/cpu.h>
 21#include <linux/freezer.h>
 22#include <linux/kthread.h>
 23#include <linux/rcupdate.h>
 24#include <linux/ftrace.h>
 25#include <linux/smp.h>
 26#include <linux/smpboot.h>
 27#include <linux/tick.h>
 28#include <linux/irq.h>
 
 
 
 
 29
 30#define CREATE_TRACE_POINTS
 31#include <trace/events/irq.h>
 32
 33/*
 34   - No shared variables, all the data are CPU local.
 35   - If a softirq needs serialization, let it serialize itself
 36     by its own spinlocks.
 37   - Even if softirq is serialized, only local cpu is marked for
 38     execution. Hence, we get something sort of weak cpu binding.
 39     Though it is still not clear, will it result in better locality
 40     or will not.
 41
 42   Examples:
 43   - NET RX softirq. It is multithreaded and does not require
 44     any global serialization.
 45   - NET TX softirq. It kicks software netdevice queues, hence
 46     it is logically serialized per device, but this serialization
 47     is invisible to common code.
 48   - Tasklets: serialized wrt itself.
 49 */
 50
 51#ifndef __ARCH_IRQ_STAT
 52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
 53EXPORT_SYMBOL(irq_stat);
 54#endif
 55
 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 57
 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 59
 60const char * const softirq_to_name[NR_SOFTIRQS] = {
 61	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 62	"TASKLET", "SCHED", "HRTIMER", "RCU"
 63};
 64
 65/*
 66 * we cannot loop indefinitely here to avoid userspace starvation,
 67 * but we also don't want to introduce a worst case 1/HZ latency
 68 * to the pending events, so lets the scheduler to balance
 69 * the softirq load for us.
 70 */
 71static void wakeup_softirqd(void)
 72{
 73	/* Interrupts are disabled: no need to stop preemption */
 74	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 75
 76	if (tsk && tsk->state != TASK_RUNNING)
 77		wake_up_process(tsk);
 78}
 79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80/*
 81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
 82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83 */
 84static bool ksoftirqd_running(void)
 85{
 86	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87
 88	return tsk && (tsk->state == TASK_RUNNING);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 89}
 
 90
 91/*
 92 * preempt_count and SOFTIRQ_OFFSET usage:
 93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 94 *   softirq processing.
 95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 96 *   on local_bh_disable or local_bh_enable.
 97 * This lets us distinguish between whether we are currently processing
 98 * softirq and whether we just have bh disabled.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99 */
 
 
 
 
 
 
 
100
101/*
102 * This one is for softirq.c-internal use,
103 * where hardirqs are disabled legitimately:
104 */
105#ifdef CONFIG_TRACE_IRQFLAGS
106void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
107{
108	unsigned long flags;
109
110	WARN_ON_ONCE(in_irq());
111
112	raw_local_irq_save(flags);
113	/*
114	 * The preempt tracer hooks into preempt_count_add and will break
115	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116	 * is set and before current->softirq_enabled is cleared.
117	 * We must manually increment preempt_count here and manually
118	 * call the trace_preempt_off later.
119	 */
120	__preempt_count_add(cnt);
121	/*
122	 * Were softirqs turned off above:
123	 */
124	if (softirq_count() == (cnt & SOFTIRQ_MASK))
125		trace_softirqs_off(ip);
126	raw_local_irq_restore(flags);
127
128	if (preempt_count() == cnt) {
129#ifdef CONFIG_DEBUG_PREEMPT
130		current->preempt_disable_ip = get_lock_parent_ip();
131#endif
132		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
133	}
134}
135EXPORT_SYMBOL(__local_bh_disable_ip);
136#endif /* CONFIG_TRACE_IRQFLAGS */
137
138static void __local_bh_enable(unsigned int cnt)
139{
140	WARN_ON_ONCE(!irqs_disabled());
 
 
 
141
142	if (softirq_count() == (cnt & SOFTIRQ_MASK))
143		trace_softirqs_on(_RET_IP_);
144	preempt_count_sub(cnt);
 
145}
146
147/*
148 * Special-case - softirqs can safely be enabled in
149 * cond_resched_softirq(), or by __do_softirq(),
150 * without processing still-pending softirqs:
151 */
152void _local_bh_enable(void)
153{
154	WARN_ON_ONCE(in_irq());
155	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
156}
157EXPORT_SYMBOL(_local_bh_enable);
158
159void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
160{
161	WARN_ON_ONCE(in_irq() || irqs_disabled());
 
162#ifdef CONFIG_TRACE_IRQFLAGS
163	local_irq_disable();
164#endif
165	/*
166	 * Are softirqs going to be turned on now:
167	 */
168	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
169		trace_softirqs_on(ip);
170	/*
171	 * Keep preemption disabled until we are done with
172	 * softirq processing:
173	 */
174	preempt_count_sub(cnt - 1);
175
176	if (unlikely(!in_interrupt() && local_softirq_pending())) {
177		/*
178		 * Run softirq if any pending. And do it in its own stack
179		 * as we may be calling this deep in a task call stack already.
180		 */
181		do_softirq();
182	}
183
184	preempt_count_dec();
185#ifdef CONFIG_TRACE_IRQFLAGS
186	local_irq_enable();
187#endif
188	preempt_check_resched();
189}
190EXPORT_SYMBOL(__local_bh_enable_ip);
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192/*
193 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
194 * but break the loop if need_resched() is set or after 2 ms.
195 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
196 * certain cases, such as stop_machine(), jiffies may cease to
197 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
198 * well to make sure we eventually return from this method.
199 *
200 * These limits have been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
204 */
205#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
206#define MAX_SOFTIRQ_RESTART 10
207
208#ifdef CONFIG_TRACE_IRQFLAGS
209/*
210 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
211 * to keep the lockdep irq context tracking as tight as possible in order to
212 * not miss-qualify lock contexts and miss possible deadlocks.
213 */
214
215static inline bool lockdep_softirq_start(void)
216{
217	bool in_hardirq = false;
218
219	if (trace_hardirq_context(current)) {
220		in_hardirq = true;
221		trace_hardirq_exit();
222	}
223
224	lockdep_softirq_enter();
225
226	return in_hardirq;
227}
228
229static inline void lockdep_softirq_end(bool in_hardirq)
230{
231	lockdep_softirq_exit();
232
233	if (in_hardirq)
234		trace_hardirq_enter();
235}
236#else
237static inline bool lockdep_softirq_start(void) { return false; }
238static inline void lockdep_softirq_end(bool in_hardirq) { }
239#endif
240
241asmlinkage __visible void __softirq_entry __do_softirq(void)
242{
243	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
244	unsigned long old_flags = current->flags;
245	int max_restart = MAX_SOFTIRQ_RESTART;
246	struct softirq_action *h;
247	bool in_hardirq;
248	__u32 pending;
249	int softirq_bit;
250
251	/*
252	 * Mask out PF_MEMALLOC s current task context is borrowed for the
253	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
254	 * again if the socket is related to swap
255	 */
256	current->flags &= ~PF_MEMALLOC;
257
258	pending = local_softirq_pending();
259	account_irq_enter_time(current);
260
261	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
262	in_hardirq = lockdep_softirq_start();
 
263
264restart:
265	/* Reset the pending bitmask before enabling irqs */
266	set_softirq_pending(0);
267
268	local_irq_enable();
269
270	h = softirq_vec;
271
272	while ((softirq_bit = ffs(pending))) {
273		unsigned int vec_nr;
274		int prev_count;
275
276		h += softirq_bit - 1;
277
278		vec_nr = h - softirq_vec;
279		prev_count = preempt_count();
280
281		kstat_incr_softirqs_this_cpu(vec_nr);
282
283		trace_softirq_entry(vec_nr);
284		h->action(h);
285		trace_softirq_exit(vec_nr);
286		if (unlikely(prev_count != preempt_count())) {
287			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
288			       vec_nr, softirq_to_name[vec_nr], h->action,
289			       prev_count, preempt_count());
290			preempt_count_set(prev_count);
291		}
292		h++;
293		pending >>= softirq_bit;
294	}
295
296	rcu_bh_qs();
 
 
297	local_irq_disable();
298
299	pending = local_softirq_pending();
300	if (pending) {
301		if (time_before(jiffies, end) && !need_resched() &&
302		    --max_restart)
303			goto restart;
304
305		wakeup_softirqd();
306	}
307
 
308	lockdep_softirq_end(in_hardirq);
309	account_irq_exit_time(current);
310	__local_bh_enable(SOFTIRQ_OFFSET);
311	WARN_ON_ONCE(in_interrupt());
312	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
313}
314
315asmlinkage __visible void do_softirq(void)
316{
317	__u32 pending;
318	unsigned long flags;
319
320	if (in_interrupt())
321		return;
322
323	local_irq_save(flags);
324
325	pending = local_softirq_pending();
326
327	if (pending && !ksoftirqd_running())
328		do_softirq_own_stack();
329
330	local_irq_restore(flags);
331}
332
333/*
334 * Enter an interrupt context.
335 */
336void irq_enter(void)
337{
338	rcu_irq_enter();
339	if (is_idle_task(current) && !in_interrupt()) {
340		/*
341		 * Prevent raise_softirq from needlessly waking up ksoftirqd
342		 * here, as softirq will be serviced on return from interrupt.
343		 */
344		local_bh_disable();
345		tick_irq_enter();
346		_local_bh_enable();
347	}
348
349	__irq_enter();
350}
351
352static inline void invoke_softirq(void)
 
 
 
353{
354	if (ksoftirqd_running())
355		return;
356
357	if (!force_irqthreads) {
358#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
359		/*
360		 * We can safely execute softirq on the current stack if
361		 * it is the irq stack, because it should be near empty
362		 * at this stage.
363		 */
364		__do_softirq();
365#else
366		/*
367		 * Otherwise, irq_exit() is called on the task stack that can
368		 * be potentially deep already. So call softirq in its own stack
369		 * to prevent from any overrun.
370		 */
371		do_softirq_own_stack();
372#endif
373	} else {
374		wakeup_softirqd();
375	}
376}
377
378static inline void tick_irq_exit(void)
379{
380#ifdef CONFIG_NO_HZ_COMMON
381	int cpu = smp_processor_id();
382
383	/* Make sure that timer wheel updates are propagated */
384	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
385		if (!in_interrupt())
386			tick_nohz_irq_exit();
387	}
388#endif
389}
390
391/*
392 * Exit an interrupt context. Process softirqs if needed and possible:
393 */
394void irq_exit(void)
395{
396#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
397	local_irq_disable();
398#else
399	WARN_ON_ONCE(!irqs_disabled());
400#endif
401
402	account_irq_exit_time(current);
403	preempt_count_sub(HARDIRQ_OFFSET);
404	if (!in_interrupt() && local_softirq_pending())
405		invoke_softirq();
406
407	tick_irq_exit();
408	rcu_irq_exit();
409	trace_hardirq_exit(); /* must be last! */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410}
411
412/*
413 * This function must run with irqs disabled!
414 */
415inline void raise_softirq_irqoff(unsigned int nr)
416{
417	__raise_softirq_irqoff(nr);
418
419	/*
420	 * If we're in an interrupt or softirq, we're done
421	 * (this also catches softirq-disabled code). We will
422	 * actually run the softirq once we return from
423	 * the irq or softirq.
424	 *
425	 * Otherwise we wake up ksoftirqd to make sure we
426	 * schedule the softirq soon.
427	 */
428	if (!in_interrupt())
429		wakeup_softirqd();
430}
431
432void raise_softirq(unsigned int nr)
433{
434	unsigned long flags;
435
436	local_irq_save(flags);
437	raise_softirq_irqoff(nr);
438	local_irq_restore(flags);
439}
440
441void __raise_softirq_irqoff(unsigned int nr)
442{
 
443	trace_softirq_raise(nr);
444	or_softirq_pending(1UL << nr);
445}
446
447void open_softirq(int nr, void (*action)(struct softirq_action *))
448{
449	softirq_vec[nr].action = action;
450}
451
452/*
453 * Tasklets
454 */
455struct tasklet_head {
456	struct tasklet_struct *head;
457	struct tasklet_struct **tail;
458};
459
460static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
462
463void __tasklet_schedule(struct tasklet_struct *t)
 
 
464{
 
465	unsigned long flags;
466
467	local_irq_save(flags);
 
468	t->next = NULL;
469	*__this_cpu_read(tasklet_vec.tail) = t;
470	__this_cpu_write(tasklet_vec.tail, &(t->next));
471	raise_softirq_irqoff(TASKLET_SOFTIRQ);
472	local_irq_restore(flags);
473}
 
 
 
 
 
 
474EXPORT_SYMBOL(__tasklet_schedule);
475
476void __tasklet_hi_schedule(struct tasklet_struct *t)
477{
478	unsigned long flags;
479
480	local_irq_save(flags);
481	t->next = NULL;
482	*__this_cpu_read(tasklet_hi_vec.tail) = t;
483	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
484	raise_softirq_irqoff(HI_SOFTIRQ);
485	local_irq_restore(flags);
486}
487EXPORT_SYMBOL(__tasklet_hi_schedule);
488
489void __tasklet_hi_schedule_first(struct tasklet_struct *t)
490{
491	BUG_ON(!irqs_disabled());
 
 
 
492
493	t->next = __this_cpu_read(tasklet_hi_vec.head);
494	__this_cpu_write(tasklet_hi_vec.head, t);
495	__raise_softirq_irqoff(HI_SOFTIRQ);
 
 
496}
497EXPORT_SYMBOL(__tasklet_hi_schedule_first);
498
499static __latent_entropy void tasklet_action(struct softirq_action *a)
 
 
500{
501	struct tasklet_struct *list;
502
503	local_irq_disable();
504	list = __this_cpu_read(tasklet_vec.head);
505	__this_cpu_write(tasklet_vec.head, NULL);
506	__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
507	local_irq_enable();
508
509	while (list) {
510		struct tasklet_struct *t = list;
511
512		list = list->next;
513
514		if (tasklet_trylock(t)) {
515			if (!atomic_read(&t->count)) {
516				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
517							&t->state))
518					BUG();
519				t->func(t->data);
 
 
 
 
 
 
 
520				tasklet_unlock(t);
521				continue;
522			}
523			tasklet_unlock(t);
524		}
525
526		local_irq_disable();
527		t->next = NULL;
528		*__this_cpu_read(tasklet_vec.tail) = t;
529		__this_cpu_write(tasklet_vec.tail, &(t->next));
530		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
531		local_irq_enable();
532	}
533}
534
535static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
536{
537	struct tasklet_struct *list;
538
539	local_irq_disable();
540	list = __this_cpu_read(tasklet_hi_vec.head);
541	__this_cpu_write(tasklet_hi_vec.head, NULL);
542	__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
543	local_irq_enable();
544
545	while (list) {
546		struct tasklet_struct *t = list;
547
548		list = list->next;
549
550		if (tasklet_trylock(t)) {
551			if (!atomic_read(&t->count)) {
552				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
553							&t->state))
554					BUG();
555				t->func(t->data);
556				tasklet_unlock(t);
557				continue;
558			}
559			tasklet_unlock(t);
560		}
561
562		local_irq_disable();
563		t->next = NULL;
564		*__this_cpu_read(tasklet_hi_vec.tail) = t;
565		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
566		__raise_softirq_irqoff(HI_SOFTIRQ);
567		local_irq_enable();
568	}
 
 
569}
 
570
571void tasklet_init(struct tasklet_struct *t,
572		  void (*func)(unsigned long), unsigned long data)
573{
574	t->next = NULL;
575	t->state = 0;
576	atomic_set(&t->count, 0);
577	t->func = func;
 
578	t->data = data;
579}
580EXPORT_SYMBOL(tasklet_init);
581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582void tasklet_kill(struct tasklet_struct *t)
583{
584	if (in_interrupt())
585		pr_notice("Attempt to kill tasklet from interrupt\n");
586
587	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
588		do {
589			yield();
590		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
591	}
592	tasklet_unlock_wait(t);
593	clear_bit(TASKLET_STATE_SCHED, &t->state);
594}
595EXPORT_SYMBOL(tasklet_kill);
596
597/*
598 * tasklet_hrtimer
599 */
600
601/*
602 * The trampoline is called when the hrtimer expires. It schedules a tasklet
603 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
604 * hrtimer callback, but from softirq context.
605 */
606static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
607{
608	struct tasklet_hrtimer *ttimer =
609		container_of(timer, struct tasklet_hrtimer, timer);
610
611	tasklet_hi_schedule(&ttimer->tasklet);
612	return HRTIMER_NORESTART;
613}
 
614
615/*
616 * Helper function which calls the hrtimer callback from
617 * tasklet/softirq context
618 */
619static void __tasklet_hrtimer_trampoline(unsigned long data)
620{
621	struct tasklet_hrtimer *ttimer = (void *)data;
622	enum hrtimer_restart restart;
623
624	restart = ttimer->function(&ttimer->timer);
625	if (restart != HRTIMER_NORESTART)
626		hrtimer_restart(&ttimer->timer);
627}
628
629/**
630 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
631 * @ttimer:	 tasklet_hrtimer which is initialized
632 * @function:	 hrtimer callback function which gets called from softirq context
633 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
634 * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
635 */
636void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
637			  enum hrtimer_restart (*function)(struct hrtimer *),
638			  clockid_t which_clock, enum hrtimer_mode mode)
639{
640	hrtimer_init(&ttimer->timer, which_clock, mode);
641	ttimer->timer.function = __hrtimer_tasklet_trampoline;
642	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
643		     (unsigned long)ttimer);
644	ttimer->function = function;
645}
646EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
647
648void __init softirq_init(void)
649{
650	int cpu;
651
652	for_each_possible_cpu(cpu) {
653		per_cpu(tasklet_vec, cpu).tail =
654			&per_cpu(tasklet_vec, cpu).head;
655		per_cpu(tasklet_hi_vec, cpu).tail =
656			&per_cpu(tasklet_hi_vec, cpu).head;
657	}
658
659	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
660	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
661}
662
663static int ksoftirqd_should_run(unsigned int cpu)
664{
665	return local_softirq_pending();
666}
667
668static void run_ksoftirqd(unsigned int cpu)
669{
670	local_irq_disable();
671	if (local_softirq_pending()) {
672		/*
673		 * We can safely run softirq on inline stack, as we are not deep
674		 * in the task stack here.
675		 */
676		__do_softirq();
677		local_irq_enable();
678		cond_resched_rcu_qs();
679		return;
680	}
681	local_irq_enable();
682}
683
684#ifdef CONFIG_HOTPLUG_CPU
685/*
686 * tasklet_kill_immediate is called to remove a tasklet which can already be
687 * scheduled for execution on @cpu.
688 *
689 * Unlike tasklet_kill, this function removes the tasklet
690 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
691 *
692 * When this function is called, @cpu must be in the CPU_DEAD state.
693 */
694void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
695{
696	struct tasklet_struct **i;
697
698	BUG_ON(cpu_online(cpu));
699	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
700
701	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
702		return;
703
704	/* CPU is dead, so no lock needed. */
705	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
706		if (*i == t) {
707			*i = t->next;
708			/* If this was the tail element, move the tail ptr */
709			if (*i == NULL)
710				per_cpu(tasklet_vec, cpu).tail = i;
711			return;
712		}
713	}
714	BUG();
715}
716
717static int takeover_tasklets(unsigned int cpu)
718{
 
 
719	/* CPU is dead, so no lock needed. */
720	local_irq_disable();
721
722	/* Find end, append list for that CPU. */
723	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
724		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
725		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
726		per_cpu(tasklet_vec, cpu).head = NULL;
727		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
728	}
729	raise_softirq_irqoff(TASKLET_SOFTIRQ);
730
731	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
732		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
733		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
734		per_cpu(tasklet_hi_vec, cpu).head = NULL;
735		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
736	}
737	raise_softirq_irqoff(HI_SOFTIRQ);
738
739	local_irq_enable();
740	return 0;
741}
742#else
743#define takeover_tasklets	NULL
744#endif /* CONFIG_HOTPLUG_CPU */
745
746static struct smp_hotplug_thread softirq_threads = {
747	.store			= &ksoftirqd,
748	.thread_should_run	= ksoftirqd_should_run,
749	.thread_fn		= run_ksoftirqd,
750	.thread_comm		= "ksoftirqd/%u",
751};
752
753static __init int spawn_ksoftirqd(void)
754{
755	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
756				  takeover_tasklets);
757	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
758
759	return 0;
760}
761early_initcall(spawn_ksoftirqd);
762
763/*
764 * [ These __weak aliases are kept in a separate compilation unit, so that
765 *   GCC does not inline them incorrectly. ]
766 */
767
768int __init __weak early_irq_init(void)
769{
770	return 0;
771}
772
773int __init __weak arch_probe_nr_irqs(void)
774{
775	return NR_IRQS_LEGACY;
776}
777
778int __init __weak arch_early_irq_init(void)
779{
780	return 0;
781}
782
783unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
784{
785	return from;
786}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	linux/kernel/softirq.c
   4 *
   5 *	Copyright (C) 1992 Linus Torvalds
   6 *
 
 
   7 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/export.h>
  13#include <linux/kernel_stat.h>
  14#include <linux/interrupt.h>
  15#include <linux/init.h>
  16#include <linux/local_lock.h>
  17#include <linux/mm.h>
  18#include <linux/notifier.h>
  19#include <linux/percpu.h>
  20#include <linux/cpu.h>
  21#include <linux/freezer.h>
  22#include <linux/kthread.h>
  23#include <linux/rcupdate.h>
  24#include <linux/ftrace.h>
  25#include <linux/smp.h>
  26#include <linux/smpboot.h>
  27#include <linux/tick.h>
  28#include <linux/irq.h>
  29#include <linux/wait_bit.h>
  30#include <linux/workqueue.h>
  31
  32#include <asm/softirq_stack.h>
  33
  34#define CREATE_TRACE_POINTS
  35#include <trace/events/irq.h>
  36
  37/*
  38   - No shared variables, all the data are CPU local.
  39   - If a softirq needs serialization, let it serialize itself
  40     by its own spinlocks.
  41   - Even if softirq is serialized, only local cpu is marked for
  42     execution. Hence, we get something sort of weak cpu binding.
  43     Though it is still not clear, will it result in better locality
  44     or will not.
  45
  46   Examples:
  47   - NET RX softirq. It is multithreaded and does not require
  48     any global serialization.
  49   - NET TX softirq. It kicks software netdevice queues, hence
  50     it is logically serialized per device, but this serialization
  51     is invisible to common code.
  52   - Tasklets: serialized wrt itself.
  53 */
  54
  55#ifndef __ARCH_IRQ_STAT
  56DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
  57EXPORT_PER_CPU_SYMBOL(irq_stat);
  58#endif
  59
  60static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
  61
  62DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
  63
  64const char * const softirq_to_name[NR_SOFTIRQS] = {
  65	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
  66	"TASKLET", "SCHED", "HRTIMER", "RCU"
  67};
  68
  69/*
  70 * we cannot loop indefinitely here to avoid userspace starvation,
  71 * but we also don't want to introduce a worst case 1/HZ latency
  72 * to the pending events, so lets the scheduler to balance
  73 * the softirq load for us.
  74 */
  75static void wakeup_softirqd(void)
  76{
  77	/* Interrupts are disabled: no need to stop preemption */
  78	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
  79
  80	if (tsk)
  81		wake_up_process(tsk);
  82}
  83
  84#ifdef CONFIG_TRACE_IRQFLAGS
  85DEFINE_PER_CPU(int, hardirqs_enabled);
  86DEFINE_PER_CPU(int, hardirq_context);
  87EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
  88EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
  89#endif
  90
  91/*
  92 * SOFTIRQ_OFFSET usage:
  93 *
  94 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
  95 * to a per CPU counter and to task::softirqs_disabled_cnt.
  96 *
  97 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
  98 *   processing.
  99 *
 100 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 101 *   on local_bh_disable or local_bh_enable.
 102 *
 103 * This lets us distinguish between whether we are currently processing
 104 * softirq and whether we just have bh disabled.
 105 */
 106#ifdef CONFIG_PREEMPT_RT
 107
 108/*
 109 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
 110 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
 111 * softirq disabled section to be preempted.
 112 *
 113 * The per task counter is used for softirq_count(), in_softirq() and
 114 * in_serving_softirqs() because these counts are only valid when the task
 115 * holding softirq_ctrl::lock is running.
 116 *
 117 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
 118 * the task which is in a softirq disabled section is preempted or blocks.
 119 */
 120struct softirq_ctrl {
 121	local_lock_t	lock;
 122	int		cnt;
 123};
 124
 125static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
 126	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
 127};
 128
 129/**
 130 * local_bh_blocked() - Check for idle whether BH processing is blocked
 131 *
 132 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
 133 *
 134 * This is invoked from the idle task to guard against false positive
 135 * softirq pending warnings, which would happen when the task which holds
 136 * softirq_ctrl::lock was the only running task on the CPU and blocks on
 137 * some other lock.
 138 */
 139bool local_bh_blocked(void)
 140{
 141	return __this_cpu_read(softirq_ctrl.cnt) != 0;
 142}
 143
 144void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 145{
 146	unsigned long flags;
 147	int newcnt;
 148
 149	WARN_ON_ONCE(in_hardirq());
 150
 151	/* First entry of a task into a BH disabled section? */
 152	if (!current->softirq_disable_cnt) {
 153		if (preemptible()) {
 154			local_lock(&softirq_ctrl.lock);
 155			/* Required to meet the RCU bottomhalf requirements. */
 156			rcu_read_lock();
 157		} else {
 158			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
 159		}
 160	}
 161
 162	/*
 163	 * Track the per CPU softirq disabled state. On RT this is per CPU
 164	 * state to allow preemption of bottom half disabled sections.
 165	 */
 166	newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
 167	/*
 168	 * Reflect the result in the task state to prevent recursion on the
 169	 * local lock and to make softirq_count() & al work.
 170	 */
 171	current->softirq_disable_cnt = newcnt;
 172
 173	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
 174		raw_local_irq_save(flags);
 175		lockdep_softirqs_off(ip);
 176		raw_local_irq_restore(flags);
 177	}
 178}
 179EXPORT_SYMBOL(__local_bh_disable_ip);
 180
 181static void __local_bh_enable(unsigned int cnt, bool unlock)
 182{
 183	unsigned long flags;
 184	int newcnt;
 185
 186	DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
 187			    this_cpu_read(softirq_ctrl.cnt));
 188
 189	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
 190		raw_local_irq_save(flags);
 191		lockdep_softirqs_on(_RET_IP_);
 192		raw_local_irq_restore(flags);
 193	}
 194
 195	newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
 196	current->softirq_disable_cnt = newcnt;
 197
 198	if (!newcnt && unlock) {
 199		rcu_read_unlock();
 200		local_unlock(&softirq_ctrl.lock);
 201	}
 202}
 203
 204void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 205{
 206	bool preempt_on = preemptible();
 207	unsigned long flags;
 208	u32 pending;
 209	int curcnt;
 210
 211	WARN_ON_ONCE(in_hardirq());
 212	lockdep_assert_irqs_enabled();
 213
 214	local_irq_save(flags);
 215	curcnt = __this_cpu_read(softirq_ctrl.cnt);
 216
 217	/*
 218	 * If this is not reenabling soft interrupts, no point in trying to
 219	 * run pending ones.
 220	 */
 221	if (curcnt != cnt)
 222		goto out;
 223
 224	pending = local_softirq_pending();
 225	if (!pending)
 226		goto out;
 227
 228	/*
 229	 * If this was called from non preemptible context, wake up the
 230	 * softirq daemon.
 231	 */
 232	if (!preempt_on) {
 233		wakeup_softirqd();
 234		goto out;
 235	}
 236
 237	/*
 238	 * Adjust softirq count to SOFTIRQ_OFFSET which makes
 239	 * in_serving_softirq() become true.
 240	 */
 241	cnt = SOFTIRQ_OFFSET;
 242	__local_bh_enable(cnt, false);
 243	__do_softirq();
 244
 245out:
 246	__local_bh_enable(cnt, preempt_on);
 247	local_irq_restore(flags);
 248}
 249EXPORT_SYMBOL(__local_bh_enable_ip);
 250
 251/*
 252 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
 253 * to acquire the per CPU local lock for reentrancy protection.
 254 */
 255static inline void ksoftirqd_run_begin(void)
 256{
 257	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
 258	local_irq_disable();
 259}
 260
 261/* Counterpart to ksoftirqd_run_begin() */
 262static inline void ksoftirqd_run_end(void)
 263{
 264	__local_bh_enable(SOFTIRQ_OFFSET, true);
 265	WARN_ON_ONCE(in_interrupt());
 266	local_irq_enable();
 267}
 268
 269static inline void softirq_handle_begin(void) { }
 270static inline void softirq_handle_end(void) { }
 271
 272static inline bool should_wake_ksoftirqd(void)
 273{
 274	return !this_cpu_read(softirq_ctrl.cnt);
 275}
 276
 277static inline void invoke_softirq(void)
 278{
 279	if (should_wake_ksoftirqd())
 280		wakeup_softirqd();
 281}
 282
 283/*
 284 * flush_smp_call_function_queue() can raise a soft interrupt in a function
 285 * call. On RT kernels this is undesired and the only known functionality
 286 * in the block layer which does this is disabled on RT. If soft interrupts
 287 * get raised which haven't been raised before the flush, warn so it can be
 288 * investigated.
 289 */
 290void do_softirq_post_smp_call_flush(unsigned int was_pending)
 291{
 292	if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
 293		invoke_softirq();
 294}
 295
 296#else /* CONFIG_PREEMPT_RT */
 297
 298/*
 299 * This one is for softirq.c-internal use, where hardirqs are disabled
 300 * legitimately:
 301 */
 302#ifdef CONFIG_TRACE_IRQFLAGS
 303void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 304{
 305	unsigned long flags;
 306
 307	WARN_ON_ONCE(in_hardirq());
 308
 309	raw_local_irq_save(flags);
 310	/*
 311	 * The preempt tracer hooks into preempt_count_add and will break
 312	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
 313	 * is set and before current->softirq_enabled is cleared.
 314	 * We must manually increment preempt_count here and manually
 315	 * call the trace_preempt_off later.
 316	 */
 317	__preempt_count_add(cnt);
 318	/*
 319	 * Were softirqs turned off above:
 320	 */
 321	if (softirq_count() == (cnt & SOFTIRQ_MASK))
 322		lockdep_softirqs_off(ip);
 323	raw_local_irq_restore(flags);
 324
 325	if (preempt_count() == cnt) {
 326#ifdef CONFIG_DEBUG_PREEMPT
 327		current->preempt_disable_ip = get_lock_parent_ip();
 328#endif
 329		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
 330	}
 331}
 332EXPORT_SYMBOL(__local_bh_disable_ip);
 333#endif /* CONFIG_TRACE_IRQFLAGS */
 334
 335static void __local_bh_enable(unsigned int cnt)
 336{
 337	lockdep_assert_irqs_disabled();
 338
 339	if (preempt_count() == cnt)
 340		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
 341
 342	if (softirq_count() == (cnt & SOFTIRQ_MASK))
 343		lockdep_softirqs_on(_RET_IP_);
 344
 345	__preempt_count_sub(cnt);
 346}
 347
 348/*
 349 * Special-case - softirqs can safely be enabled by __do_softirq(),
 
 350 * without processing still-pending softirqs:
 351 */
 352void _local_bh_enable(void)
 353{
 354	WARN_ON_ONCE(in_hardirq());
 355	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
 356}
 357EXPORT_SYMBOL(_local_bh_enable);
 358
 359void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 360{
 361	WARN_ON_ONCE(in_hardirq());
 362	lockdep_assert_irqs_enabled();
 363#ifdef CONFIG_TRACE_IRQFLAGS
 364	local_irq_disable();
 365#endif
 366	/*
 367	 * Are softirqs going to be turned on now:
 368	 */
 369	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
 370		lockdep_softirqs_on(ip);
 371	/*
 372	 * Keep preemption disabled until we are done with
 373	 * softirq processing:
 374	 */
 375	__preempt_count_sub(cnt - 1);
 376
 377	if (unlikely(!in_interrupt() && local_softirq_pending())) {
 378		/*
 379		 * Run softirq if any pending. And do it in its own stack
 380		 * as we may be calling this deep in a task call stack already.
 381		 */
 382		do_softirq();
 383	}
 384
 385	preempt_count_dec();
 386#ifdef CONFIG_TRACE_IRQFLAGS
 387	local_irq_enable();
 388#endif
 389	preempt_check_resched();
 390}
 391EXPORT_SYMBOL(__local_bh_enable_ip);
 392
 393static inline void softirq_handle_begin(void)
 394{
 395	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
 396}
 397
 398static inline void softirq_handle_end(void)
 399{
 400	__local_bh_enable(SOFTIRQ_OFFSET);
 401	WARN_ON_ONCE(in_interrupt());
 402}
 403
 404static inline void ksoftirqd_run_begin(void)
 405{
 406	local_irq_disable();
 407}
 408
 409static inline void ksoftirqd_run_end(void)
 410{
 411	local_irq_enable();
 412}
 413
 414static inline bool should_wake_ksoftirqd(void)
 415{
 416	return true;
 417}
 418
 419static inline void invoke_softirq(void)
 420{
 421	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
 422#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 423		/*
 424		 * We can safely execute softirq on the current stack if
 425		 * it is the irq stack, because it should be near empty
 426		 * at this stage.
 427		 */
 428		__do_softirq();
 429#else
 430		/*
 431		 * Otherwise, irq_exit() is called on the task stack that can
 432		 * be potentially deep already. So call softirq in its own stack
 433		 * to prevent from any overrun.
 434		 */
 435		do_softirq_own_stack();
 436#endif
 437	} else {
 438		wakeup_softirqd();
 439	}
 440}
 441
 442asmlinkage __visible void do_softirq(void)
 443{
 444	__u32 pending;
 445	unsigned long flags;
 446
 447	if (in_interrupt())
 448		return;
 449
 450	local_irq_save(flags);
 451
 452	pending = local_softirq_pending();
 453
 454	if (pending)
 455		do_softirq_own_stack();
 456
 457	local_irq_restore(flags);
 458}
 459
 460#endif /* !CONFIG_PREEMPT_RT */
 461
 462/*
 463 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
 464 * but break the loop if need_resched() is set or after 2 ms.
 465 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
 466 * certain cases, such as stop_machine(), jiffies may cease to
 467 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
 468 * well to make sure we eventually return from this method.
 469 *
 470 * These limits have been established via experimentation.
 471 * The two things to balance is latency against fairness -
 472 * we want to handle softirqs as soon as possible, but they
 473 * should not be able to lock up the box.
 474 */
 475#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
 476#define MAX_SOFTIRQ_RESTART 10
 477
 478#ifdef CONFIG_TRACE_IRQFLAGS
 479/*
 480 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
 481 * to keep the lockdep irq context tracking as tight as possible in order to
 482 * not miss-qualify lock contexts and miss possible deadlocks.
 483 */
 484
 485static inline bool lockdep_softirq_start(void)
 486{
 487	bool in_hardirq = false;
 488
 489	if (lockdep_hardirq_context()) {
 490		in_hardirq = true;
 491		lockdep_hardirq_exit();
 492	}
 493
 494	lockdep_softirq_enter();
 495
 496	return in_hardirq;
 497}
 498
 499static inline void lockdep_softirq_end(bool in_hardirq)
 500{
 501	lockdep_softirq_exit();
 502
 503	if (in_hardirq)
 504		lockdep_hardirq_enter();
 505}
 506#else
 507static inline bool lockdep_softirq_start(void) { return false; }
 508static inline void lockdep_softirq_end(bool in_hardirq) { }
 509#endif
 510
 511static void handle_softirqs(bool ksirqd)
 512{
 513	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
 514	unsigned long old_flags = current->flags;
 515	int max_restart = MAX_SOFTIRQ_RESTART;
 516	struct softirq_action *h;
 517	bool in_hardirq;
 518	__u32 pending;
 519	int softirq_bit;
 520
 521	/*
 522	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
 523	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
 524	 * again if the socket is related to swapping.
 525	 */
 526	current->flags &= ~PF_MEMALLOC;
 527
 528	pending = local_softirq_pending();
 
 529
 530	softirq_handle_begin();
 531	in_hardirq = lockdep_softirq_start();
 532	account_softirq_enter(current);
 533
 534restart:
 535	/* Reset the pending bitmask before enabling irqs */
 536	set_softirq_pending(0);
 537
 538	local_irq_enable();
 539
 540	h = softirq_vec;
 541
 542	while ((softirq_bit = ffs(pending))) {
 543		unsigned int vec_nr;
 544		int prev_count;
 545
 546		h += softirq_bit - 1;
 547
 548		vec_nr = h - softirq_vec;
 549		prev_count = preempt_count();
 550
 551		kstat_incr_softirqs_this_cpu(vec_nr);
 552
 553		trace_softirq_entry(vec_nr);
 554		h->action(h);
 555		trace_softirq_exit(vec_nr);
 556		if (unlikely(prev_count != preempt_count())) {
 557			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
 558			       vec_nr, softirq_to_name[vec_nr], h->action,
 559			       prev_count, preempt_count());
 560			preempt_count_set(prev_count);
 561		}
 562		h++;
 563		pending >>= softirq_bit;
 564	}
 565
 566	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
 567		rcu_softirq_qs();
 568
 569	local_irq_disable();
 570
 571	pending = local_softirq_pending();
 572	if (pending) {
 573		if (time_before(jiffies, end) && !need_resched() &&
 574		    --max_restart)
 575			goto restart;
 576
 577		wakeup_softirqd();
 578	}
 579
 580	account_softirq_exit(current);
 581	lockdep_softirq_end(in_hardirq);
 582	softirq_handle_end();
 583	current_restore_flags(old_flags, PF_MEMALLOC);
 
 
 584}
 585
 586asmlinkage __visible void __softirq_entry __do_softirq(void)
 587{
 588	handle_softirqs(false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 589}
 590
 591/**
 592 * irq_enter_rcu - Enter an interrupt context with RCU watching
 593 */
 594void irq_enter_rcu(void)
 595{
 596	__irq_enter_raw();
 597
 598	if (tick_nohz_full_cpu(smp_processor_id()) ||
 599	    (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
 
 
 
 600		tick_irq_enter();
 
 
 601
 602	account_hardirq_enter(current);
 603}
 604
 605/**
 606 * irq_enter - Enter an interrupt context including RCU update
 607 */
 608void irq_enter(void)
 609{
 610	ct_irq_enter();
 611	irq_enter_rcu();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 612}
 613
 614static inline void tick_irq_exit(void)
 615{
 616#ifdef CONFIG_NO_HZ_COMMON
 617	int cpu = smp_processor_id();
 618
 619	/* Make sure that timer wheel updates are propagated */
 620	if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
 621		if (!in_hardirq())
 622			tick_nohz_irq_exit();
 623	}
 624#endif
 625}
 626
 627static inline void __irq_exit_rcu(void)
 
 
 
 628{
 629#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
 630	local_irq_disable();
 631#else
 632	lockdep_assert_irqs_disabled();
 633#endif
 634	account_hardirq_exit(current);
 
 635	preempt_count_sub(HARDIRQ_OFFSET);
 636	if (!in_interrupt() && local_softirq_pending())
 637		invoke_softirq();
 638
 639	tick_irq_exit();
 640}
 641
 642/**
 643 * irq_exit_rcu() - Exit an interrupt context without updating RCU
 644 *
 645 * Also processes softirqs if needed and possible.
 646 */
 647void irq_exit_rcu(void)
 648{
 649	__irq_exit_rcu();
 650	 /* must be last! */
 651	lockdep_hardirq_exit();
 652}
 653
 654/**
 655 * irq_exit - Exit an interrupt context, update RCU and lockdep
 656 *
 657 * Also processes softirqs if needed and possible.
 658 */
 659void irq_exit(void)
 660{
 661	__irq_exit_rcu();
 662	ct_irq_exit();
 663	 /* must be last! */
 664	lockdep_hardirq_exit();
 665}
 666
 667/*
 668 * This function must run with irqs disabled!
 669 */
 670inline void raise_softirq_irqoff(unsigned int nr)
 671{
 672	__raise_softirq_irqoff(nr);
 673
 674	/*
 675	 * If we're in an interrupt or softirq, we're done
 676	 * (this also catches softirq-disabled code). We will
 677	 * actually run the softirq once we return from
 678	 * the irq or softirq.
 679	 *
 680	 * Otherwise we wake up ksoftirqd to make sure we
 681	 * schedule the softirq soon.
 682	 */
 683	if (!in_interrupt() && should_wake_ksoftirqd())
 684		wakeup_softirqd();
 685}
 686
 687void raise_softirq(unsigned int nr)
 688{
 689	unsigned long flags;
 690
 691	local_irq_save(flags);
 692	raise_softirq_irqoff(nr);
 693	local_irq_restore(flags);
 694}
 695
 696void __raise_softirq_irqoff(unsigned int nr)
 697{
 698	lockdep_assert_irqs_disabled();
 699	trace_softirq_raise(nr);
 700	or_softirq_pending(1UL << nr);
 701}
 702
 703void open_softirq(int nr, void (*action)(struct softirq_action *))
 704{
 705	softirq_vec[nr].action = action;
 706}
 707
 708/*
 709 * Tasklets
 710 */
 711struct tasklet_head {
 712	struct tasklet_struct *head;
 713	struct tasklet_struct **tail;
 714};
 715
 716static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
 717static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
 718
 719static void __tasklet_schedule_common(struct tasklet_struct *t,
 720				      struct tasklet_head __percpu *headp,
 721				      unsigned int softirq_nr)
 722{
 723	struct tasklet_head *head;
 724	unsigned long flags;
 725
 726	local_irq_save(flags);
 727	head = this_cpu_ptr(headp);
 728	t->next = NULL;
 729	*head->tail = t;
 730	head->tail = &(t->next);
 731	raise_softirq_irqoff(softirq_nr);
 732	local_irq_restore(flags);
 733}
 734
 735void __tasklet_schedule(struct tasklet_struct *t)
 736{
 737	__tasklet_schedule_common(t, &tasklet_vec,
 738				  TASKLET_SOFTIRQ);
 739}
 740EXPORT_SYMBOL(__tasklet_schedule);
 741
 742void __tasklet_hi_schedule(struct tasklet_struct *t)
 743{
 744	__tasklet_schedule_common(t, &tasklet_hi_vec,
 745				  HI_SOFTIRQ);
 
 
 
 
 
 
 746}
 747EXPORT_SYMBOL(__tasklet_hi_schedule);
 748
 749static bool tasklet_clear_sched(struct tasklet_struct *t)
 750{
 751	if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
 752		wake_up_var(&t->state);
 753		return true;
 754	}
 755
 756	WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
 757		  t->use_callback ? "callback" : "func",
 758		  t->use_callback ? (void *)t->callback : (void *)t->func);
 759
 760	return false;
 761}
 
 762
 763static void tasklet_action_common(struct softirq_action *a,
 764				  struct tasklet_head *tl_head,
 765				  unsigned int softirq_nr)
 766{
 767	struct tasklet_struct *list;
 768
 769	local_irq_disable();
 770	list = tl_head->head;
 771	tl_head->head = NULL;
 772	tl_head->tail = &tl_head->head;
 773	local_irq_enable();
 774
 775	while (list) {
 776		struct tasklet_struct *t = list;
 777
 778		list = list->next;
 779
 780		if (tasklet_trylock(t)) {
 781			if (!atomic_read(&t->count)) {
 782				if (tasklet_clear_sched(t)) {
 783					if (t->use_callback) {
 784						trace_tasklet_entry(t, t->callback);
 785						t->callback(t);
 786						trace_tasklet_exit(t, t->callback);
 787					} else {
 788						trace_tasklet_entry(t, t->func);
 789						t->func(t->data);
 790						trace_tasklet_exit(t, t->func);
 791					}
 792				}
 793				tasklet_unlock(t);
 794				continue;
 795			}
 796			tasklet_unlock(t);
 797		}
 798
 799		local_irq_disable();
 800		t->next = NULL;
 801		*tl_head->tail = t;
 802		tl_head->tail = &t->next;
 803		__raise_softirq_irqoff(softirq_nr);
 804		local_irq_enable();
 805	}
 806}
 807
 808static __latent_entropy void tasklet_action(struct softirq_action *a)
 809{
 810	workqueue_softirq_action(false);
 811	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
 812}
 
 
 
 
 
 
 
 
 
 813
 814static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
 815{
 816	workqueue_softirq_action(true);
 817	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
 818}
 
 
 
 
 
 
 819
 820void tasklet_setup(struct tasklet_struct *t,
 821		   void (*callback)(struct tasklet_struct *))
 822{
 823	t->next = NULL;
 824	t->state = 0;
 825	atomic_set(&t->count, 0);
 826	t->callback = callback;
 827	t->use_callback = true;
 828	t->data = 0;
 829}
 830EXPORT_SYMBOL(tasklet_setup);
 831
 832void tasklet_init(struct tasklet_struct *t,
 833		  void (*func)(unsigned long), unsigned long data)
 834{
 835	t->next = NULL;
 836	t->state = 0;
 837	atomic_set(&t->count, 0);
 838	t->func = func;
 839	t->use_callback = false;
 840	t->data = data;
 841}
 842EXPORT_SYMBOL(tasklet_init);
 843
 844#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 845/*
 846 * Do not use in new code. Waiting for tasklets from atomic contexts is
 847 * error prone and should be avoided.
 848 */
 849void tasklet_unlock_spin_wait(struct tasklet_struct *t)
 850{
 851	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
 852		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
 853			/*
 854			 * Prevent a live lock when current preempted soft
 855			 * interrupt processing or prevents ksoftirqd from
 856			 * running. If the tasklet runs on a different CPU
 857			 * then this has no effect other than doing the BH
 858			 * disable/enable dance for nothing.
 859			 */
 860			local_bh_disable();
 861			local_bh_enable();
 862		} else {
 863			cpu_relax();
 864		}
 865	}
 866}
 867EXPORT_SYMBOL(tasklet_unlock_spin_wait);
 868#endif
 869
 870void tasklet_kill(struct tasklet_struct *t)
 871{
 872	if (in_interrupt())
 873		pr_notice("Attempt to kill tasklet from interrupt\n");
 874
 875	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
 876		wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
 877
 
 
 878	tasklet_unlock_wait(t);
 879	tasklet_clear_sched(t);
 880}
 881EXPORT_SYMBOL(tasklet_kill);
 882
 883#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 884void tasklet_unlock(struct tasklet_struct *t)
 
 
 
 
 
 
 
 
 885{
 886	smp_mb__before_atomic();
 887	clear_bit(TASKLET_STATE_RUN, &t->state);
 888	smp_mb__after_atomic();
 889	wake_up_var(&t->state);
 
 890}
 891EXPORT_SYMBOL_GPL(tasklet_unlock);
 892
 893void tasklet_unlock_wait(struct tasklet_struct *t)
 
 
 
 
 894{
 895	wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
 
 
 
 
 
 896}
 897EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
 898#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899
 900void __init softirq_init(void)
 901{
 902	int cpu;
 903
 904	for_each_possible_cpu(cpu) {
 905		per_cpu(tasklet_vec, cpu).tail =
 906			&per_cpu(tasklet_vec, cpu).head;
 907		per_cpu(tasklet_hi_vec, cpu).tail =
 908			&per_cpu(tasklet_hi_vec, cpu).head;
 909	}
 910
 911	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
 912	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 913}
 914
 915static int ksoftirqd_should_run(unsigned int cpu)
 916{
 917	return local_softirq_pending();
 918}
 919
 920static void run_ksoftirqd(unsigned int cpu)
 921{
 922	ksoftirqd_run_begin();
 923	if (local_softirq_pending()) {
 924		/*
 925		 * We can safely run softirq on inline stack, as we are not deep
 926		 * in the task stack here.
 927		 */
 928		handle_softirqs(true);
 929		ksoftirqd_run_end();
 930		cond_resched();
 931		return;
 932	}
 933	ksoftirqd_run_end();
 934}
 935
 936#ifdef CONFIG_HOTPLUG_CPU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937static int takeover_tasklets(unsigned int cpu)
 938{
 939	workqueue_softirq_dead(cpu);
 940
 941	/* CPU is dead, so no lock needed. */
 942	local_irq_disable();
 943
 944	/* Find end, append list for that CPU. */
 945	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
 946		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
 947		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
 948		per_cpu(tasklet_vec, cpu).head = NULL;
 949		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
 950	}
 951	raise_softirq_irqoff(TASKLET_SOFTIRQ);
 952
 953	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
 954		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
 955		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
 956		per_cpu(tasklet_hi_vec, cpu).head = NULL;
 957		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
 958	}
 959	raise_softirq_irqoff(HI_SOFTIRQ);
 960
 961	local_irq_enable();
 962	return 0;
 963}
 964#else
 965#define takeover_tasklets	NULL
 966#endif /* CONFIG_HOTPLUG_CPU */
 967
 968static struct smp_hotplug_thread softirq_threads = {
 969	.store			= &ksoftirqd,
 970	.thread_should_run	= ksoftirqd_should_run,
 971	.thread_fn		= run_ksoftirqd,
 972	.thread_comm		= "ksoftirqd/%u",
 973};
 974
 975static __init int spawn_ksoftirqd(void)
 976{
 977	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
 978				  takeover_tasklets);
 979	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
 980
 981	return 0;
 982}
 983early_initcall(spawn_ksoftirqd);
 984
 985/*
 986 * [ These __weak aliases are kept in a separate compilation unit, so that
 987 *   GCC does not inline them incorrectly. ]
 988 */
 989
 990int __init __weak early_irq_init(void)
 991{
 992	return 0;
 993}
 994
 995int __init __weak arch_probe_nr_irqs(void)
 996{
 997	return NR_IRQS_LEGACY;
 998}
 999
1000int __init __weak arch_early_irq_init(void)
1001{
1002	return 0;
1003}
1004
1005unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1006{
1007	return from;
1008}