Loading...
1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/bug.h>
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/irq_work.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
14#include <linux/irqflags.h>
15#include <asm/processor.h>
16
17/*
18 * An entry can be in one of four states:
19 *
20 * free NULL, 0 -> {claimed} : free to be used
21 * claimed NULL, 3 -> {pending} : claimed to be enqueued
22 * pending next, 3 -> {busy} : queued, pending callback
23 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
24 */
25
26#define IRQ_WORK_PENDING 1UL
27#define IRQ_WORK_BUSY 2UL
28#define IRQ_WORK_FLAGS 3UL
29
30static DEFINE_PER_CPU(struct llist_head, irq_work_list);
31
32/*
33 * Claim the entry so that no one else will poke at it.
34 */
35static bool irq_work_claim(struct irq_work *work)
36{
37 unsigned long flags, nflags;
38
39 for (;;) {
40 flags = work->flags;
41 if (flags & IRQ_WORK_PENDING)
42 return false;
43 nflags = flags | IRQ_WORK_FLAGS;
44 if (cmpxchg(&work->flags, flags, nflags) == flags)
45 break;
46 cpu_relax();
47 }
48
49 return true;
50}
51
52void __weak arch_irq_work_raise(void)
53{
54 /*
55 * Lame architectures will get the timer tick callback
56 */
57}
58
59/*
60 * Queue the entry and raise the IPI if needed.
61 */
62static void __irq_work_queue(struct irq_work *work)
63{
64 bool empty;
65
66 preempt_disable();
67
68 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
69 /* The list was empty, raise self-interrupt to start processing. */
70 if (empty)
71 arch_irq_work_raise();
72
73 preempt_enable();
74}
75
76/*
77 * Enqueue the irq_work @entry, returns true on success, failure when the
78 * @entry was already enqueued by someone else.
79 *
80 * Can be re-enqueued while the callback is still in progress.
81 */
82bool irq_work_queue(struct irq_work *work)
83{
84 if (!irq_work_claim(work)) {
85 /*
86 * Already enqueued, can't do!
87 */
88 return false;
89 }
90
91 __irq_work_queue(work);
92 return true;
93}
94EXPORT_SYMBOL_GPL(irq_work_queue);
95
96/*
97 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
98 * context with local IRQs disabled.
99 */
100void irq_work_run(void)
101{
102 struct irq_work *work;
103 struct llist_head *this_list;
104 struct llist_node *llnode;
105
106 this_list = &__get_cpu_var(irq_work_list);
107 if (llist_empty(this_list))
108 return;
109
110 BUG_ON(!in_irq());
111 BUG_ON(!irqs_disabled());
112
113 llnode = llist_del_all(this_list);
114 while (llnode != NULL) {
115 work = llist_entry(llnode, struct irq_work, llnode);
116
117 llnode = llist_next(llnode);
118
119 /*
120 * Clear the PENDING bit, after this point the @work
121 * can be re-used.
122 */
123 work->flags = IRQ_WORK_BUSY;
124 work->func(work);
125 /*
126 * Clear the BUSY bit and return to the free state if
127 * no-one else claimed it meanwhile.
128 */
129 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
130 }
131}
132EXPORT_SYMBOL_GPL(irq_work_run);
133
134/*
135 * Synchronize against the irq_work @entry, ensures the entry is not
136 * currently in use.
137 */
138void irq_work_sync(struct irq_work *work)
139{
140 WARN_ON_ONCE(irqs_disabled());
141
142 while (work->flags & IRQ_WORK_BUSY)
143 cpu_relax();
144}
145EXPORT_SYMBOL_GPL(irq_work_sync);
1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/bug.h>
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/irq_work.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
14#include <linux/irqflags.h>
15#include <linux/sched.h>
16#include <linux/tick.h>
17#include <linux/cpu.h>
18#include <linux/notifier.h>
19#include <linux/smp.h>
20#include <asm/processor.h>
21
22
23static DEFINE_PER_CPU(struct llist_head, raised_list);
24static DEFINE_PER_CPU(struct llist_head, lazy_list);
25
26/*
27 * Claim the entry so that no one else will poke at it.
28 */
29static bool irq_work_claim(struct irq_work *work)
30{
31 unsigned long flags, oflags, nflags;
32
33 /*
34 * Start with our best wish as a premise but only trust any
35 * flag value after cmpxchg() result.
36 */
37 flags = work->flags & ~IRQ_WORK_PENDING;
38 for (;;) {
39 nflags = flags | IRQ_WORK_CLAIMED;
40 oflags = cmpxchg(&work->flags, flags, nflags);
41 if (oflags == flags)
42 break;
43 if (oflags & IRQ_WORK_PENDING)
44 return false;
45 flags = oflags;
46 cpu_relax();
47 }
48
49 return true;
50}
51
52void __weak arch_irq_work_raise(void)
53{
54 /*
55 * Lame architectures will get the timer tick callback
56 */
57}
58
59/*
60 * Enqueue the irq_work @work on @cpu unless it's already pending
61 * somewhere.
62 *
63 * Can be re-enqueued while the callback is still in progress.
64 */
65bool irq_work_queue_on(struct irq_work *work, int cpu)
66{
67 /* All work should have been flushed before going offline */
68 WARN_ON_ONCE(cpu_is_offline(cpu));
69
70#ifdef CONFIG_SMP
71
72 /* Arch remote IPI send/receive backend aren't NMI safe */
73 WARN_ON_ONCE(in_nmi());
74
75 /* Only queue if not already pending */
76 if (!irq_work_claim(work))
77 return false;
78
79 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
80 arch_send_call_function_single_ipi(cpu);
81
82#else /* #ifdef CONFIG_SMP */
83 irq_work_queue(work);
84#endif /* #else #ifdef CONFIG_SMP */
85
86 return true;
87}
88
89/* Enqueue the irq work @work on the current CPU */
90bool irq_work_queue(struct irq_work *work)
91{
92 /* Only queue if not already pending */
93 if (!irq_work_claim(work))
94 return false;
95
96 /* Queue the entry and raise the IPI if needed. */
97 preempt_disable();
98
99 /* If the work is "lazy", handle it from next tick if any */
100 if (work->flags & IRQ_WORK_LAZY) {
101 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
102 tick_nohz_tick_stopped())
103 arch_irq_work_raise();
104 } else {
105 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
106 arch_irq_work_raise();
107 }
108
109 preempt_enable();
110
111 return true;
112}
113EXPORT_SYMBOL_GPL(irq_work_queue);
114
115bool irq_work_needs_cpu(void)
116{
117 struct llist_head *raised, *lazy;
118
119 raised = this_cpu_ptr(&raised_list);
120 lazy = this_cpu_ptr(&lazy_list);
121
122 if (llist_empty(raised) || arch_irq_work_has_interrupt())
123 if (llist_empty(lazy))
124 return false;
125
126 /* All work should have been flushed before going offline */
127 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
128
129 return true;
130}
131
132static void irq_work_run_list(struct llist_head *list)
133{
134 struct irq_work *work, *tmp;
135 struct llist_node *llnode;
136 unsigned long flags;
137
138 BUG_ON(!irqs_disabled());
139
140 if (llist_empty(list))
141 return;
142
143 llnode = llist_del_all(list);
144 llist_for_each_entry_safe(work, tmp, llnode, llnode) {
145 /*
146 * Clear the PENDING bit, after this point the @work
147 * can be re-used.
148 * Make it immediately visible so that other CPUs trying
149 * to claim that work don't rely on us to handle their data
150 * while we are in the middle of the func.
151 */
152 flags = work->flags & ~IRQ_WORK_PENDING;
153 xchg(&work->flags, flags);
154
155 work->func(work);
156 /*
157 * Clear the BUSY bit and return to the free state if
158 * no-one else claimed it meanwhile.
159 */
160 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
161 }
162}
163
164/*
165 * hotplug calls this through:
166 * hotplug_cfd() -> flush_smp_call_function_queue()
167 */
168void irq_work_run(void)
169{
170 irq_work_run_list(this_cpu_ptr(&raised_list));
171 irq_work_run_list(this_cpu_ptr(&lazy_list));
172}
173EXPORT_SYMBOL_GPL(irq_work_run);
174
175void irq_work_tick(void)
176{
177 struct llist_head *raised = this_cpu_ptr(&raised_list);
178
179 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
180 irq_work_run_list(raised);
181 irq_work_run_list(this_cpu_ptr(&lazy_list));
182}
183
184/*
185 * Synchronize against the irq_work @entry, ensures the entry is not
186 * currently in use.
187 */
188void irq_work_sync(struct irq_work *work)
189{
190 lockdep_assert_irqs_enabled();
191
192 while (work->flags & IRQ_WORK_BUSY)
193 cpu_relax();
194}
195EXPORT_SYMBOL_GPL(irq_work_sync);