Loading...
1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/bug.h>
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/irq_work.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
14#include <linux/irqflags.h>
15#include <linux/sched.h>
16#include <linux/tick.h>
17#include <linux/cpu.h>
18#include <linux/notifier.h>
19#include <linux/smp.h>
20#include <asm/processor.h>
21
22
23static DEFINE_PER_CPU(struct llist_head, raised_list);
24static DEFINE_PER_CPU(struct llist_head, lazy_list);
25
26/*
27 * Claim the entry so that no one else will poke at it.
28 */
29static bool irq_work_claim(struct irq_work *work)
30{
31 unsigned long flags, oflags, nflags;
32
33 /*
34 * Start with our best wish as a premise but only trust any
35 * flag value after cmpxchg() result.
36 */
37 flags = work->flags & ~IRQ_WORK_PENDING;
38 for (;;) {
39 nflags = flags | IRQ_WORK_FLAGS;
40 oflags = cmpxchg(&work->flags, flags, nflags);
41 if (oflags == flags)
42 break;
43 if (oflags & IRQ_WORK_PENDING)
44 return false;
45 flags = oflags;
46 cpu_relax();
47 }
48
49 return true;
50}
51
52void __weak arch_irq_work_raise(void)
53{
54 /*
55 * Lame architectures will get the timer tick callback
56 */
57}
58
59#ifdef CONFIG_SMP
60/*
61 * Enqueue the irq_work @work on @cpu unless it's already pending
62 * somewhere.
63 *
64 * Can be re-enqueued while the callback is still in progress.
65 */
66bool irq_work_queue_on(struct irq_work *work, int cpu)
67{
68 /* All work should have been flushed before going offline */
69 WARN_ON_ONCE(cpu_is_offline(cpu));
70
71 /* Arch remote IPI send/receive backend aren't NMI safe */
72 WARN_ON_ONCE(in_nmi());
73
74 /* Only queue if not already pending */
75 if (!irq_work_claim(work))
76 return false;
77
78 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
79 arch_send_call_function_single_ipi(cpu);
80
81 return true;
82}
83EXPORT_SYMBOL_GPL(irq_work_queue_on);
84#endif
85
86/* Enqueue the irq work @work on the current CPU */
87bool irq_work_queue(struct irq_work *work)
88{
89 /* Only queue if not already pending */
90 if (!irq_work_claim(work))
91 return false;
92
93 /* Queue the entry and raise the IPI if needed. */
94 preempt_disable();
95
96 /* If the work is "lazy", handle it from next tick if any */
97 if (work->flags & IRQ_WORK_LAZY) {
98 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
99 tick_nohz_tick_stopped())
100 arch_irq_work_raise();
101 } else {
102 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
103 arch_irq_work_raise();
104 }
105
106 preempt_enable();
107
108 return true;
109}
110EXPORT_SYMBOL_GPL(irq_work_queue);
111
112bool irq_work_needs_cpu(void)
113{
114 struct llist_head *raised, *lazy;
115
116 raised = this_cpu_ptr(&raised_list);
117 lazy = this_cpu_ptr(&lazy_list);
118
119 if (llist_empty(raised) || arch_irq_work_has_interrupt())
120 if (llist_empty(lazy))
121 return false;
122
123 /* All work should have been flushed before going offline */
124 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
125
126 return true;
127}
128
129static void irq_work_run_list(struct llist_head *list)
130{
131 unsigned long flags;
132 struct irq_work *work;
133 struct llist_node *llnode;
134
135 BUG_ON(!irqs_disabled());
136
137 if (llist_empty(list))
138 return;
139
140 llnode = llist_del_all(list);
141 while (llnode != NULL) {
142 work = llist_entry(llnode, struct irq_work, llnode);
143
144 llnode = llist_next(llnode);
145
146 /*
147 * Clear the PENDING bit, after this point the @work
148 * can be re-used.
149 * Make it immediately visible so that other CPUs trying
150 * to claim that work don't rely on us to handle their data
151 * while we are in the middle of the func.
152 */
153 flags = work->flags & ~IRQ_WORK_PENDING;
154 xchg(&work->flags, flags);
155
156 work->func(work);
157 /*
158 * Clear the BUSY bit and return to the free state if
159 * no-one else claimed it meanwhile.
160 */
161 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
162 }
163}
164
165/*
166 * hotplug calls this through:
167 * hotplug_cfd() -> flush_smp_call_function_queue()
168 */
169void irq_work_run(void)
170{
171 irq_work_run_list(this_cpu_ptr(&raised_list));
172 irq_work_run_list(this_cpu_ptr(&lazy_list));
173}
174EXPORT_SYMBOL_GPL(irq_work_run);
175
176void irq_work_tick(void)
177{
178 struct llist_head *raised = this_cpu_ptr(&raised_list);
179
180 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
181 irq_work_run_list(raised);
182 irq_work_run_list(this_cpu_ptr(&lazy_list));
183}
184
185/*
186 * Synchronize against the irq_work @entry, ensures the entry is not
187 * currently in use.
188 */
189void irq_work_sync(struct irq_work *work)
190{
191 WARN_ON_ONCE(irqs_disabled());
192
193 while (work->flags & IRQ_WORK_BUSY)
194 cpu_relax();
195}
196EXPORT_SYMBOL_GPL(irq_work_sync);
1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/bug.h>
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/irq_work.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
14#include <linux/irqflags.h>
15#include <asm/processor.h>
16
17/*
18 * An entry can be in one of four states:
19 *
20 * free NULL, 0 -> {claimed} : free to be used
21 * claimed NULL, 3 -> {pending} : claimed to be enqueued
22 * pending next, 3 -> {busy} : queued, pending callback
23 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
24 */
25
26#define IRQ_WORK_PENDING 1UL
27#define IRQ_WORK_BUSY 2UL
28#define IRQ_WORK_FLAGS 3UL
29
30static DEFINE_PER_CPU(struct llist_head, irq_work_list);
31
32/*
33 * Claim the entry so that no one else will poke at it.
34 */
35static bool irq_work_claim(struct irq_work *work)
36{
37 unsigned long flags, nflags;
38
39 for (;;) {
40 flags = work->flags;
41 if (flags & IRQ_WORK_PENDING)
42 return false;
43 nflags = flags | IRQ_WORK_FLAGS;
44 if (cmpxchg(&work->flags, flags, nflags) == flags)
45 break;
46 cpu_relax();
47 }
48
49 return true;
50}
51
52void __weak arch_irq_work_raise(void)
53{
54 /*
55 * Lame architectures will get the timer tick callback
56 */
57}
58
59/*
60 * Queue the entry and raise the IPI if needed.
61 */
62static void __irq_work_queue(struct irq_work *work)
63{
64 bool empty;
65
66 preempt_disable();
67
68 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
69 /* The list was empty, raise self-interrupt to start processing. */
70 if (empty)
71 arch_irq_work_raise();
72
73 preempt_enable();
74}
75
76/*
77 * Enqueue the irq_work @entry, returns true on success, failure when the
78 * @entry was already enqueued by someone else.
79 *
80 * Can be re-enqueued while the callback is still in progress.
81 */
82bool irq_work_queue(struct irq_work *work)
83{
84 if (!irq_work_claim(work)) {
85 /*
86 * Already enqueued, can't do!
87 */
88 return false;
89 }
90
91 __irq_work_queue(work);
92 return true;
93}
94EXPORT_SYMBOL_GPL(irq_work_queue);
95
96/*
97 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
98 * context with local IRQs disabled.
99 */
100void irq_work_run(void)
101{
102 struct irq_work *work;
103 struct llist_head *this_list;
104 struct llist_node *llnode;
105
106 this_list = &__get_cpu_var(irq_work_list);
107 if (llist_empty(this_list))
108 return;
109
110 BUG_ON(!in_irq());
111 BUG_ON(!irqs_disabled());
112
113 llnode = llist_del_all(this_list);
114 while (llnode != NULL) {
115 work = llist_entry(llnode, struct irq_work, llnode);
116
117 llnode = llist_next(llnode);
118
119 /*
120 * Clear the PENDING bit, after this point the @work
121 * can be re-used.
122 */
123 work->flags = IRQ_WORK_BUSY;
124 work->func(work);
125 /*
126 * Clear the BUSY bit and return to the free state if
127 * no-one else claimed it meanwhile.
128 */
129 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
130 }
131}
132EXPORT_SYMBOL_GPL(irq_work_run);
133
134/*
135 * Synchronize against the irq_work @entry, ensures the entry is not
136 * currently in use.
137 */
138void irq_work_sync(struct irq_work *work)
139{
140 WARN_ON_ONCE(irqs_disabled());
141
142 while (work->flags & IRQ_WORK_BUSY)
143 cpu_relax();
144}
145EXPORT_SYMBOL_GPL(irq_work_sync);