Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  3 *
  4 * Provides a framework for enqueueing and running callbacks from hardirq
  5 * context. The enqueueing is NMI-safe.
  6 */
  7
  8#include <linux/bug.h>
  9#include <linux/kernel.h>
 10#include <linux/export.h>
 11#include <linux/irq_work.h>
 12#include <linux/percpu.h>
 13#include <linux/hardirq.h>
 14#include <linux/irqflags.h>
 15#include <linux/sched.h>
 16#include <linux/tick.h>
 17#include <linux/cpu.h>
 18#include <linux/notifier.h>
 
 19#include <asm/processor.h>
 
 20
 21
 22static DEFINE_PER_CPU(struct llist_head, irq_work_list);
 23static DEFINE_PER_CPU(int, irq_work_raised);
 24
 25/*
 26 * Claim the entry so that no one else will poke at it.
 27 */
 28static bool irq_work_claim(struct irq_work *work)
 29{
 30	unsigned long flags, oflags, nflags;
 31
 
 32	/*
 33	 * Start with our best wish as a premise but only trust any
 34	 * flag value after cmpxchg() result.
 
 35	 */
 36	flags = work->flags & ~IRQ_WORK_PENDING;
 37	for (;;) {
 38		nflags = flags | IRQ_WORK_FLAGS;
 39		oflags = cmpxchg(&work->flags, flags, nflags);
 40		if (oflags == flags)
 41			break;
 42		if (oflags & IRQ_WORK_PENDING)
 43			return false;
 44		flags = oflags;
 45		cpu_relax();
 46	}
 47
 48	return true;
 49}
 50
 51void __weak arch_irq_work_raise(void)
 52{
 53	/*
 54	 * Lame architectures will get the timer tick callback
 55	 */
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58/*
 59 * Enqueue the irq_work @entry unless it's already pending
 60 * somewhere.
 61 *
 62 * Can be re-enqueued while the callback is still in progress.
 63 */
 64bool irq_work_queue(struct irq_work *work)
 65{
 
 
 
 
 
 
 
 66	/* Only queue if not already pending */
 67	if (!irq_work_claim(work))
 68		return false;
 69
 70	/* Queue the entry and raise the IPI if needed. */
 71	preempt_disable();
 72
 73	llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
 74
 75	/*
 76	 * If the work is not "lazy" or the tick is stopped, raise the irq
 77	 * work interrupt (if supported by the arch), otherwise, just wait
 78	 * for the next tick.
 79	 */
 80	if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
 81		if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
 82			arch_irq_work_raise();
 83	}
 84
 85	preempt_enable();
 86
 87	return true;
 
 88}
 89EXPORT_SYMBOL_GPL(irq_work_queue);
 90
 91bool irq_work_needs_cpu(void)
 92{
 93	struct llist_head *this_list;
 94
 95	this_list = &__get_cpu_var(irq_work_list);
 96	if (llist_empty(this_list))
 97		return false;
 
 
 
 98
 99	/* All work should have been flushed before going offline */
100	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
101
102	return true;
103}
104
105static void __irq_work_run(void)
106{
107	unsigned long flags;
108	struct irq_work *work;
109	struct llist_head *this_list;
110	struct llist_node *llnode;
111
 
 
 
 
 
 
 
 
112
113	/*
114	 * Reset the "raised" state right before we check the list because
115	 * an NMI may enqueue after we find the list empty from the runner.
116	 */
117	__this_cpu_write(irq_work_raised, 0);
118	barrier();
119
120	this_list = &__get_cpu_var(irq_work_list);
121	if (llist_empty(this_list))
122		return;
 
 
 
 
 
 
 
 
 
 
 
 
123
124	BUG_ON(!irqs_disabled());
125
126	llnode = llist_del_all(this_list);
127	while (llnode != NULL) {
128		work = llist_entry(llnode, struct irq_work, llnode);
129
130		llnode = llist_next(llnode);
131
132		/*
133		 * Clear the PENDING bit, after this point the @work
134		 * can be re-used.
135		 * Make it immediately visible so that other CPUs trying
136		 * to claim that work don't rely on us to handle their data
137		 * while we are in the middle of the func.
138		 */
139		flags = work->flags & ~IRQ_WORK_PENDING;
140		xchg(&work->flags, flags);
141
142		work->func(work);
143		/*
144		 * Clear the BUSY bit and return to the free state if
145		 * no-one else claimed it meanwhile.
146		 */
147		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
148	}
149}
150
151/*
152 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
153 * context with local IRQs disabled.
154 */
155void irq_work_run(void)
156{
157	BUG_ON(!in_irq());
158	__irq_work_run();
159}
160EXPORT_SYMBOL_GPL(irq_work_run);
161
 
 
 
 
 
 
 
 
 
162/*
163 * Synchronize against the irq_work @entry, ensures the entry is not
164 * currently in use.
165 */
166void irq_work_sync(struct irq_work *work)
167{
168	WARN_ON_ONCE(irqs_disabled());
169
170	while (work->flags & IRQ_WORK_BUSY)
171		cpu_relax();
172}
173EXPORT_SYMBOL_GPL(irq_work_sync);
174
175#ifdef CONFIG_HOTPLUG_CPU
176static int irq_work_cpu_notify(struct notifier_block *self,
177			       unsigned long action, void *hcpu)
178{
179	long cpu = (long)hcpu;
180
181	switch (action) {
182	case CPU_DYING:
183		/* Called from stop_machine */
184		if (WARN_ON_ONCE(cpu != smp_processor_id()))
185			break;
186		__irq_work_run();
187		break;
188	default:
189		break;
190	}
191	return NOTIFY_OK;
192}
193
194static struct notifier_block cpu_notify;
195
196static __init int irq_work_init_cpu_notifier(void)
197{
198	cpu_notify.notifier_call = irq_work_cpu_notify;
199	cpu_notify.priority = 0;
200	register_cpu_notifier(&cpu_notify);
201	return 0;
202}
203device_initcall(irq_work_init_cpu_notifier);
204
205#endif /* CONFIG_HOTPLUG_CPU */
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  4 *
  5 * Provides a framework for enqueueing and running callbacks from hardirq
  6 * context. The enqueueing is NMI-safe.
  7 */
  8
  9#include <linux/bug.h>
 10#include <linux/kernel.h>
 11#include <linux/export.h>
 12#include <linux/irq_work.h>
 13#include <linux/percpu.h>
 14#include <linux/hardirq.h>
 15#include <linux/irqflags.h>
 16#include <linux/sched.h>
 17#include <linux/tick.h>
 18#include <linux/cpu.h>
 19#include <linux/notifier.h>
 20#include <linux/smp.h>
 21#include <asm/processor.h>
 22#include <linux/kasan.h>
 23
 24static DEFINE_PER_CPU(struct llist_head, raised_list);
 25static DEFINE_PER_CPU(struct llist_head, lazy_list);
 
 26
 27/*
 28 * Claim the entry so that no one else will poke at it.
 29 */
 30static bool irq_work_claim(struct irq_work *work)
 31{
 32	int oflags;
 33
 34	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
 35	/*
 36	 * If the work is already pending, no need to raise the IPI.
 37	 * The pairing smp_mb() in irq_work_single() makes sure
 38	 * everything we did before is visible.
 39	 */
 40	if (oflags & IRQ_WORK_PENDING)
 41		return false;
 
 
 
 
 
 
 
 
 
 
 42	return true;
 43}
 44
 45void __weak arch_irq_work_raise(void)
 46{
 47	/*
 48	 * Lame architectures will get the timer tick callback
 49	 */
 50}
 51
 52/* Enqueue on current CPU, work must already be claimed and preempt disabled */
 53static void __irq_work_queue_local(struct irq_work *work)
 54{
 55	/* If the work is "lazy", handle it from next tick if any */
 56	if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) {
 57		if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) &&
 58		    tick_nohz_tick_stopped())
 59			arch_irq_work_raise();
 60	} else {
 61		if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list)))
 62			arch_irq_work_raise();
 63	}
 64}
 65
 66/* Enqueue the irq work @work on the current CPU */
 67bool irq_work_queue(struct irq_work *work)
 68{
 69	/* Only queue if not already pending */
 70	if (!irq_work_claim(work))
 71		return false;
 72
 73	/* Queue the entry and raise the IPI if needed. */
 74	preempt_disable();
 75	__irq_work_queue_local(work);
 76	preempt_enable();
 77
 78	return true;
 79}
 80EXPORT_SYMBOL_GPL(irq_work_queue);
 81
 82/*
 83 * Enqueue the irq_work @work on @cpu unless it's already pending
 84 * somewhere.
 85 *
 86 * Can be re-enqueued while the callback is still in progress.
 87 */
 88bool irq_work_queue_on(struct irq_work *work, int cpu)
 89{
 90#ifndef CONFIG_SMP
 91	return irq_work_queue(work);
 92
 93#else /* CONFIG_SMP: */
 94	/* All work should have been flushed before going offline */
 95	WARN_ON_ONCE(cpu_is_offline(cpu));
 96
 97	/* Only queue if not already pending */
 98	if (!irq_work_claim(work))
 99		return false;
100
101	kasan_record_aux_stack(work);
 
 
 
102
103	preempt_disable();
104	if (cpu != smp_processor_id()) {
105		/* Arch remote IPI send/receive backend aren't NMI safe */
106		WARN_ON_ONCE(in_nmi());
107		__smp_call_single_queue(cpu, &work->node.llist);
108	} else {
109		__irq_work_queue_local(work);
 
110	}
 
111	preempt_enable();
112
113	return true;
114#endif /* CONFIG_SMP */
115}
116
117
118bool irq_work_needs_cpu(void)
119{
120	struct llist_head *raised, *lazy;
121
122	raised = this_cpu_ptr(&raised_list);
123	lazy = this_cpu_ptr(&lazy_list);
124
125	if (llist_empty(raised) || arch_irq_work_has_interrupt())
126		if (llist_empty(lazy))
127			return false;
128
129	/* All work should have been flushed before going offline */
130	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
131
132	return true;
133}
134
135void irq_work_single(void *arg)
136{
137	struct irq_work *work = arg;
138	int flags;
 
 
139
140	/*
141	 * Clear the PENDING bit, after this point the @work can be re-used.
142	 * The PENDING bit acts as a lock, and we own it, so we can clear it
143	 * without atomic ops.
144	 */
145	flags = atomic_read(&work->node.a_flags);
146	flags &= ~IRQ_WORK_PENDING;
147	atomic_set(&work->node.a_flags, flags);
148
149	/*
150	 * See irq_work_claim().
 
151	 */
152	smp_mb();
 
153
154	lockdep_irq_work_enter(flags);
155	work->func(work);
156	lockdep_irq_work_exit(flags);
157
158	/*
159	 * Clear the BUSY bit, if set, and return to the free state if no-one
160	 * else claimed it meanwhile.
161	 */
162	(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
163}
164
165static void irq_work_run_list(struct llist_head *list)
166{
167	struct irq_work *work, *tmp;
168	struct llist_node *llnode;
169
170	BUG_ON(!irqs_disabled());
171
172	if (llist_empty(list))
173		return;
174
175	llnode = llist_del_all(list);
176	llist_for_each_entry_safe(work, tmp, llnode, node.llist)
177		irq_work_single(work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178}
179
180/*
181 * hotplug calls this through:
182 *  hotplug_cfd() -> flush_smp_call_function_queue()
183 */
184void irq_work_run(void)
185{
186	irq_work_run_list(this_cpu_ptr(&raised_list));
187	irq_work_run_list(this_cpu_ptr(&lazy_list));
188}
189EXPORT_SYMBOL_GPL(irq_work_run);
190
191void irq_work_tick(void)
192{
193	struct llist_head *raised = this_cpu_ptr(&raised_list);
194
195	if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
196		irq_work_run_list(raised);
197	irq_work_run_list(this_cpu_ptr(&lazy_list));
198}
199
200/*
201 * Synchronize against the irq_work @entry, ensures the entry is not
202 * currently in use.
203 */
204void irq_work_sync(struct irq_work *work)
205{
206	lockdep_assert_irqs_enabled();
207
208	while (irq_work_is_busy(work))
209		cpu_relax();
210}
211EXPORT_SYMBOL_GPL(irq_work_sync);