Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
4 *
5 * Provides a framework for enqueueing and running callbacks from hardirq
6 * context. The enqueueing is NMI-safe.
7 */
8
9#include <linux/bug.h>
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/irq_work.h>
13#include <linux/percpu.h>
14#include <linux/hardirq.h>
15#include <linux/irqflags.h>
16#include <linux/sched.h>
17#include <linux/tick.h>
18#include <linux/cpu.h>
19#include <linux/notifier.h>
20#include <linux/smp.h>
21#include <asm/processor.h>
22
23
24static DEFINE_PER_CPU(struct llist_head, raised_list);
25static DEFINE_PER_CPU(struct llist_head, lazy_list);
26
27/*
28 * Claim the entry so that no one else will poke at it.
29 */
30static bool irq_work_claim(struct irq_work *work)
31{
32 unsigned long flags, oflags, nflags;
33
34 /*
35 * Start with our best wish as a premise but only trust any
36 * flag value after cmpxchg() result.
37 */
38 flags = work->flags & ~IRQ_WORK_PENDING;
39 for (;;) {
40 nflags = flags | IRQ_WORK_CLAIMED;
41 oflags = cmpxchg(&work->flags, flags, nflags);
42 if (oflags == flags)
43 break;
44 if (oflags & IRQ_WORK_PENDING)
45 return false;
46 flags = oflags;
47 cpu_relax();
48 }
49
50 return true;
51}
52
53void __weak arch_irq_work_raise(void)
54{
55 /*
56 * Lame architectures will get the timer tick callback
57 */
58}
59
60/* Enqueue on current CPU, work must already be claimed and preempt disabled */
61static void __irq_work_queue_local(struct irq_work *work)
62{
63 /* If the work is "lazy", handle it from next tick if any */
64 if (work->flags & IRQ_WORK_LAZY) {
65 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
66 tick_nohz_tick_stopped())
67 arch_irq_work_raise();
68 } else {
69 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
70 arch_irq_work_raise();
71 }
72}
73
74/* Enqueue the irq work @work on the current CPU */
75bool irq_work_queue(struct irq_work *work)
76{
77 /* Only queue if not already pending */
78 if (!irq_work_claim(work))
79 return false;
80
81 /* Queue the entry and raise the IPI if needed. */
82 preempt_disable();
83 __irq_work_queue_local(work);
84 preempt_enable();
85
86 return true;
87}
88EXPORT_SYMBOL_GPL(irq_work_queue);
89
90/*
91 * Enqueue the irq_work @work on @cpu unless it's already pending
92 * somewhere.
93 *
94 * Can be re-enqueued while the callback is still in progress.
95 */
96bool irq_work_queue_on(struct irq_work *work, int cpu)
97{
98#ifndef CONFIG_SMP
99 return irq_work_queue(work);
100
101#else /* CONFIG_SMP: */
102 /* All work should have been flushed before going offline */
103 WARN_ON_ONCE(cpu_is_offline(cpu));
104
105 /* Only queue if not already pending */
106 if (!irq_work_claim(work))
107 return false;
108
109 preempt_disable();
110 if (cpu != smp_processor_id()) {
111 /* Arch remote IPI send/receive backend aren't NMI safe */
112 WARN_ON_ONCE(in_nmi());
113 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
114 arch_send_call_function_single_ipi(cpu);
115 } else {
116 __irq_work_queue_local(work);
117 }
118 preempt_enable();
119
120 return true;
121#endif /* CONFIG_SMP */
122}
123
124
125bool irq_work_needs_cpu(void)
126{
127 struct llist_head *raised, *lazy;
128
129 raised = this_cpu_ptr(&raised_list);
130 lazy = this_cpu_ptr(&lazy_list);
131
132 if (llist_empty(raised) || arch_irq_work_has_interrupt())
133 if (llist_empty(lazy))
134 return false;
135
136 /* All work should have been flushed before going offline */
137 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
138
139 return true;
140}
141
142static void irq_work_run_list(struct llist_head *list)
143{
144 struct irq_work *work, *tmp;
145 struct llist_node *llnode;
146 unsigned long flags;
147
148 BUG_ON(!irqs_disabled());
149
150 if (llist_empty(list))
151 return;
152
153 llnode = llist_del_all(list);
154 llist_for_each_entry_safe(work, tmp, llnode, llnode) {
155 /*
156 * Clear the PENDING bit, after this point the @work
157 * can be re-used.
158 * Make it immediately visible so that other CPUs trying
159 * to claim that work don't rely on us to handle their data
160 * while we are in the middle of the func.
161 */
162 flags = work->flags & ~IRQ_WORK_PENDING;
163 xchg(&work->flags, flags);
164
165 work->func(work);
166 /*
167 * Clear the BUSY bit and return to the free state if
168 * no-one else claimed it meanwhile.
169 */
170 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
171 }
172}
173
174/*
175 * hotplug calls this through:
176 * hotplug_cfd() -> flush_smp_call_function_queue()
177 */
178void irq_work_run(void)
179{
180 irq_work_run_list(this_cpu_ptr(&raised_list));
181 irq_work_run_list(this_cpu_ptr(&lazy_list));
182}
183EXPORT_SYMBOL_GPL(irq_work_run);
184
185void irq_work_tick(void)
186{
187 struct llist_head *raised = this_cpu_ptr(&raised_list);
188
189 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
190 irq_work_run_list(raised);
191 irq_work_run_list(this_cpu_ptr(&lazy_list));
192}
193
194/*
195 * Synchronize against the irq_work @entry, ensures the entry is not
196 * currently in use.
197 */
198void irq_work_sync(struct irq_work *work)
199{
200 lockdep_assert_irqs_enabled();
201
202 while (work->flags & IRQ_WORK_BUSY)
203 cpu_relax();
204}
205EXPORT_SYMBOL_GPL(irq_work_sync);
1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/irq_work.h>
11#include <linux/hardirq.h>
12
13/*
14 * An entry can be in one of four states:
15 *
16 * free NULL, 0 -> {claimed} : free to be used
17 * claimed NULL, 3 -> {pending} : claimed to be enqueued
18 * pending next, 3 -> {busy} : queued, pending callback
19 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
20 *
21 * We use the lower two bits of the next pointer to keep PENDING and BUSY
22 * flags.
23 */
24
25#define IRQ_WORK_PENDING 1UL
26#define IRQ_WORK_BUSY 2UL
27#define IRQ_WORK_FLAGS 3UL
28
29static inline bool irq_work_is_set(struct irq_work *entry, int flags)
30{
31 return (unsigned long)entry->next & flags;
32}
33
34static inline struct irq_work *irq_work_next(struct irq_work *entry)
35{
36 unsigned long next = (unsigned long)entry->next;
37 next &= ~IRQ_WORK_FLAGS;
38 return (struct irq_work *)next;
39}
40
41static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
42{
43 unsigned long next = (unsigned long)entry;
44 next |= flags;
45 return (struct irq_work *)next;
46}
47
48static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
49
50/*
51 * Claim the entry so that no one else will poke at it.
52 */
53static bool irq_work_claim(struct irq_work *entry)
54{
55 struct irq_work *next, *nflags;
56
57 do {
58 next = entry->next;
59 if ((unsigned long)next & IRQ_WORK_PENDING)
60 return false;
61 nflags = next_flags(next, IRQ_WORK_FLAGS);
62 } while (cmpxchg(&entry->next, next, nflags) != next);
63
64 return true;
65}
66
67
68void __weak arch_irq_work_raise(void)
69{
70 /*
71 * Lame architectures will get the timer tick callback
72 */
73}
74
75/*
76 * Queue the entry and raise the IPI if needed.
77 */
78static void __irq_work_queue(struct irq_work *entry)
79{
80 struct irq_work *next;
81
82 preempt_disable();
83
84 do {
85 next = __this_cpu_read(irq_work_list);
86 /* Can assign non-atomic because we keep the flags set. */
87 entry->next = next_flags(next, IRQ_WORK_FLAGS);
88 } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
89
90 /* The list was empty, raise self-interrupt to start processing. */
91 if (!irq_work_next(entry))
92 arch_irq_work_raise();
93
94 preempt_enable();
95}
96
97/*
98 * Enqueue the irq_work @entry, returns true on success, failure when the
99 * @entry was already enqueued by someone else.
100 *
101 * Can be re-enqueued while the callback is still in progress.
102 */
103bool irq_work_queue(struct irq_work *entry)
104{
105 if (!irq_work_claim(entry)) {
106 /*
107 * Already enqueued, can't do!
108 */
109 return false;
110 }
111
112 __irq_work_queue(entry);
113 return true;
114}
115EXPORT_SYMBOL_GPL(irq_work_queue);
116
117/*
118 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
119 * context with local IRQs disabled.
120 */
121void irq_work_run(void)
122{
123 struct irq_work *list;
124
125 if (this_cpu_read(irq_work_list) == NULL)
126 return;
127
128 BUG_ON(!in_irq());
129 BUG_ON(!irqs_disabled());
130
131 list = this_cpu_xchg(irq_work_list, NULL);
132
133 while (list != NULL) {
134 struct irq_work *entry = list;
135
136 list = irq_work_next(list);
137
138 /*
139 * Clear the PENDING bit, after this point the @entry
140 * can be re-used.
141 */
142 entry->next = next_flags(NULL, IRQ_WORK_BUSY);
143 entry->func(entry);
144 /*
145 * Clear the BUSY bit and return to the free state if
146 * no-one else claimed it meanwhile.
147 */
148 (void)cmpxchg(&entry->next,
149 next_flags(NULL, IRQ_WORK_BUSY),
150 NULL);
151 }
152}
153EXPORT_SYMBOL_GPL(irq_work_run);
154
155/*
156 * Synchronize against the irq_work @entry, ensures the entry is not
157 * currently in use.
158 */
159void irq_work_sync(struct irq_work *entry)
160{
161 WARN_ON_ONCE(irqs_disabled());
162
163 while (irq_work_is_set(entry, IRQ_WORK_BUSY))
164 cpu_relax();
165}
166EXPORT_SYMBOL_GPL(irq_work_sync);