Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * RajeshwarR: Dec 11, 2007
6 * -- Added support for Inter Processor Interrupts
7 *
8 * Vineetg: Nov 1st, 2007
9 * -- Initial Write (Borrowed heavily from ARM)
10 */
11
12#include <linux/spinlock.h>
13#include <linux/sched/mm.h>
14#include <linux/interrupt.h>
15#include <linux/profile.h>
16#include <linux/mm.h>
17#include <linux/cpu.h>
18#include <linux/irq.h>
19#include <linux/atomic.h>
20#include <linux/cpumask.h>
21#include <linux/reboot.h>
22#include <linux/irqdomain.h>
23#include <linux/export.h>
24#include <linux/of_fdt.h>
25
26#include <asm/mach_desc.h>
27#include <asm/setup.h>
28#include <asm/smp.h>
29#include <asm/processor.h>
30
31#ifndef CONFIG_ARC_HAS_LLSC
32arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
33
34EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
35#endif
36
37struct plat_smp_ops __weak plat_smp_ops;
38
39/* XXX: per cpu ? Only needed once in early secondary boot */
40struct task_struct *secondary_idle_tsk;
41
42static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
43{
44 unsigned long dt_root = of_get_flat_dt_root();
45 const char *buf;
46
47 buf = of_get_flat_dt_prop(dt_root, name, NULL);
48 if (!buf)
49 return -EINVAL;
50
51 if (cpulist_parse(buf, cpumask))
52 return -EINVAL;
53
54 return 0;
55}
56
57/*
58 * Read from DeviceTree and setup cpu possible mask. If there is no
59 * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
60 */
61static void __init arc_init_cpu_possible(void)
62{
63 struct cpumask cpumask;
64
65 if (arc_get_cpu_map("possible-cpus", &cpumask)) {
66 pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
67 NR_CPUS);
68
69 cpumask_setall(&cpumask);
70 }
71
72 if (!cpumask_test_cpu(0, &cpumask))
73 panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
74
75 init_cpu_possible(&cpumask);
76}
77
78/*
79 * Called from setup_arch() before calling setup_processor()
80 *
81 * - Initialise the CPU possible map early - this describes the CPUs
82 * which may be present or become present in the system.
83 * - Call early smp init hook. This can initialize a specific multi-core
84 * IP which is say common to several platforms (hence not part of
85 * platform specific int_early() hook)
86 */
87void __init smp_init_cpus(void)
88{
89 arc_init_cpu_possible();
90
91 if (plat_smp_ops.init_early_smp)
92 plat_smp_ops.init_early_smp();
93}
94
95/* called from init ( ) => process 1 */
96void __init smp_prepare_cpus(unsigned int max_cpus)
97{
98 /*
99 * if platform didn't set the present map already, do it now
100 * boot cpu is set to present already by init/main.c
101 */
102 if (num_present_cpus() <= 1)
103 init_cpu_present(cpu_possible_mask);
104}
105
106void __init smp_cpus_done(unsigned int max_cpus)
107{
108
109}
110
111/*
112 * Default smp boot helper for Run-on-reset case where all cores start off
113 * together. Non-masters need to wait for Master to start running.
114 * This is implemented using a flag in memory, which Non-masters spin-wait on.
115 * Master sets it to cpu-id of core to "ungate" it.
116 */
117static volatile int wake_flag;
118
119#ifdef CONFIG_ISA_ARCOMPACT
120
121#define __boot_read(f) f
122#define __boot_write(f, v) f = v
123
124#else
125
126#define __boot_read(f) arc_read_uncached_32(&f)
127#define __boot_write(f, v) arc_write_uncached_32(&f, v)
128
129#endif
130
131static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
132{
133 BUG_ON(cpu == 0);
134
135 __boot_write(wake_flag, cpu);
136}
137
138void arc_platform_smp_wait_to_boot(int cpu)
139{
140 /* for halt-on-reset, we've waited already */
141 if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
142 return;
143
144 while (__boot_read(wake_flag) != cpu)
145 ;
146
147 __boot_write(wake_flag, 0);
148}
149
150const char *arc_platform_smp_cpuinfo(void)
151{
152 return plat_smp_ops.info ? : "";
153}
154
155/*
156 * The very first "C" code executed by secondary
157 * Called from asm stub in head.S
158 * "current"/R25 already setup by low level boot code
159 */
160void start_kernel_secondary(void)
161{
162 struct mm_struct *mm = &init_mm;
163 unsigned int cpu = smp_processor_id();
164
165 /* MMU, Caches, Vector Table, Interrupts etc */
166 setup_processor();
167
168 mmget(mm);
169 mmgrab(mm);
170 current->active_mm = mm;
171 cpumask_set_cpu(cpu, mm_cpumask(mm));
172
173 /* Some SMP H/w setup - for each cpu */
174 if (plat_smp_ops.init_per_cpu)
175 plat_smp_ops.init_per_cpu(cpu);
176
177 if (machine_desc->init_per_cpu)
178 machine_desc->init_per_cpu(cpu);
179
180 notify_cpu_starting(cpu);
181 set_cpu_online(cpu, true);
182
183 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
184
185 local_irq_enable();
186 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
187}
188
189/*
190 * Called from kernel_init( ) -> smp_init( ) - for each CPU
191 *
192 * At this point, Secondary Processor is "HALT"ed:
193 * -It booted, but was halted in head.S
194 * -It was configured to halt-on-reset
195 * So need to wake it up.
196 *
197 * Essential requirements being where to run from (PC) and stack (SP)
198*/
199int __cpu_up(unsigned int cpu, struct task_struct *idle)
200{
201 unsigned long wait_till;
202
203 secondary_idle_tsk = idle;
204
205 pr_info("Idle Task [%d] %p", cpu, idle);
206 pr_info("Trying to bring up CPU%u ...\n", cpu);
207
208 if (plat_smp_ops.cpu_kick)
209 plat_smp_ops.cpu_kick(cpu,
210 (unsigned long)first_lines_of_secondary);
211 else
212 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
213
214 /* wait for 1 sec after kicking the secondary */
215 wait_till = jiffies + HZ;
216 while (time_before(jiffies, wait_till)) {
217 if (cpu_online(cpu))
218 break;
219 }
220
221 if (!cpu_online(cpu)) {
222 pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
223 return -1;
224 }
225
226 secondary_idle_tsk = NULL;
227
228 return 0;
229}
230
231/*****************************************************************************/
232/* Inter Processor Interrupt Handling */
233/*****************************************************************************/
234
235enum ipi_msg_type {
236 IPI_EMPTY = 0,
237 IPI_RESCHEDULE = 1,
238 IPI_CALL_FUNC,
239 IPI_CPU_STOP,
240};
241
242/*
243 * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
244 * figure out what msg was sent. For those which don't (ARC has dedicated IPI
245 * IRQ), the msg-type needs to be conveyed via per-cpu data
246 */
247
248static DEFINE_PER_CPU(unsigned long, ipi_data);
249
250static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
251{
252 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
253 unsigned long old, new;
254 unsigned long flags;
255
256 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
257
258 local_irq_save(flags);
259
260 /*
261 * Atomically write new msg bit (in case others are writing too),
262 * and read back old value
263 */
264 do {
265 new = old = *ipi_data_ptr;
266 new |= 1U << msg;
267 } while (cmpxchg(ipi_data_ptr, old, new) != old);
268
269 /*
270 * Call the platform specific IPI kick function, but avoid if possible:
271 * Only do so if there's no pending msg from other concurrent sender(s).
272 * Otherwise, receiver will see this msg as well when it takes the
273 * IPI corresponding to that msg. This is true, even if it is already in
274 * IPI handler, because !@old means it has not yet dequeued the msg(s)
275 * so @new msg can be a free-loader
276 */
277 if (plat_smp_ops.ipi_send && !old)
278 plat_smp_ops.ipi_send(cpu);
279
280 local_irq_restore(flags);
281}
282
283static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
284{
285 unsigned int cpu;
286
287 for_each_cpu(cpu, callmap)
288 ipi_send_msg_one(cpu, msg);
289}
290
291void arch_smp_send_reschedule(int cpu)
292{
293 ipi_send_msg_one(cpu, IPI_RESCHEDULE);
294}
295
296void smp_send_stop(void)
297{
298 struct cpumask targets;
299 cpumask_copy(&targets, cpu_online_mask);
300 cpumask_clear_cpu(smp_processor_id(), &targets);
301 ipi_send_msg(&targets, IPI_CPU_STOP);
302}
303
304void arch_send_call_function_single_ipi(int cpu)
305{
306 ipi_send_msg_one(cpu, IPI_CALL_FUNC);
307}
308
309void arch_send_call_function_ipi_mask(const struct cpumask *mask)
310{
311 ipi_send_msg(mask, IPI_CALL_FUNC);
312}
313
314/*
315 * ipi_cpu_stop - handle IPI from smp_send_stop()
316 */
317static void ipi_cpu_stop(void)
318{
319 machine_halt();
320}
321
322static inline int __do_IPI(unsigned long msg)
323{
324 int rc = 0;
325
326 switch (msg) {
327 case IPI_RESCHEDULE:
328 scheduler_ipi();
329 break;
330
331 case IPI_CALL_FUNC:
332 generic_smp_call_function_interrupt();
333 break;
334
335 case IPI_CPU_STOP:
336 ipi_cpu_stop();
337 break;
338
339 default:
340 rc = 1;
341 }
342
343 return rc;
344}
345
346/*
347 * arch-common ISR to handle for inter-processor interrupts
348 * Has hooks for platform specific IPI
349 */
350static irqreturn_t do_IPI(int irq, void *dev_id)
351{
352 unsigned long pending;
353 unsigned long __maybe_unused copy;
354
355 pr_debug("IPI [%ld] received on cpu %d\n",
356 *this_cpu_ptr(&ipi_data), smp_processor_id());
357
358 if (plat_smp_ops.ipi_clear)
359 plat_smp_ops.ipi_clear(irq);
360
361 /*
362 * "dequeue" the msg corresponding to this IPI (and possibly other
363 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
364 */
365 copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
366
367 do {
368 unsigned long msg = __ffs(pending);
369 int rc;
370
371 rc = __do_IPI(msg);
372 if (rc)
373 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
374 pending &= ~(1U << msg);
375 } while (pending);
376
377 return IRQ_HANDLED;
378}
379
380/*
381 * API called by platform code to hookup arch-common ISR to their IPI IRQ
382 *
383 * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
384 * function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise
385 * request_percpu_irq() below will fail
386 */
387static DEFINE_PER_CPU(int, ipi_dev);
388
389int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
390{
391 int *dev = per_cpu_ptr(&ipi_dev, cpu);
392 unsigned int virq = irq_find_mapping(NULL, hwirq);
393
394 if (!virq)
395 panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
396
397 /* Boot cpu calls request, all call enable */
398 if (!cpu) {
399 int rc;
400
401 rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
402 if (rc)
403 panic("Percpu IRQ request failed for %u\n", virq);
404 }
405
406 enable_percpu_irq(virq, 0);
407
408 return 0;
409}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * RajeshwarR: Dec 11, 2007
6 * -- Added support for Inter Processor Interrupts
7 *
8 * Vineetg: Nov 1st, 2007
9 * -- Initial Write (Borrowed heavily from ARM)
10 */
11
12#include <linux/spinlock.h>
13#include <linux/sched/mm.h>
14#include <linux/interrupt.h>
15#include <linux/profile.h>
16#include <linux/mm.h>
17#include <linux/cpu.h>
18#include <linux/irq.h>
19#include <linux/atomic.h>
20#include <linux/cpumask.h>
21#include <linux/reboot.h>
22#include <linux/irqdomain.h>
23#include <linux/export.h>
24#include <linux/of_fdt.h>
25
26#include <asm/processor.h>
27#include <asm/setup.h>
28#include <asm/mach_desc.h>
29
30#ifndef CONFIG_ARC_HAS_LLSC
31arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
32
33EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
34#endif
35
36struct plat_smp_ops __weak plat_smp_ops;
37
38/* XXX: per cpu ? Only needed once in early secondary boot */
39struct task_struct *secondary_idle_tsk;
40
41/* Called from start_kernel */
42void __init smp_prepare_boot_cpu(void)
43{
44}
45
46static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
47{
48 unsigned long dt_root = of_get_flat_dt_root();
49 const char *buf;
50
51 buf = of_get_flat_dt_prop(dt_root, name, NULL);
52 if (!buf)
53 return -EINVAL;
54
55 if (cpulist_parse(buf, cpumask))
56 return -EINVAL;
57
58 return 0;
59}
60
61/*
62 * Read from DeviceTree and setup cpu possible mask. If there is no
63 * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
64 */
65static void __init arc_init_cpu_possible(void)
66{
67 struct cpumask cpumask;
68
69 if (arc_get_cpu_map("possible-cpus", &cpumask)) {
70 pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
71 NR_CPUS);
72
73 cpumask_setall(&cpumask);
74 }
75
76 if (!cpumask_test_cpu(0, &cpumask))
77 panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
78
79 init_cpu_possible(&cpumask);
80}
81
82/*
83 * Called from setup_arch() before calling setup_processor()
84 *
85 * - Initialise the CPU possible map early - this describes the CPUs
86 * which may be present or become present in the system.
87 * - Call early smp init hook. This can initialize a specific multi-core
88 * IP which is say common to several platforms (hence not part of
89 * platform specific int_early() hook)
90 */
91void __init smp_init_cpus(void)
92{
93 arc_init_cpu_possible();
94
95 if (plat_smp_ops.init_early_smp)
96 plat_smp_ops.init_early_smp();
97}
98
99/* called from init ( ) => process 1 */
100void __init smp_prepare_cpus(unsigned int max_cpus)
101{
102 /*
103 * if platform didn't set the present map already, do it now
104 * boot cpu is set to present already by init/main.c
105 */
106 if (num_present_cpus() <= 1)
107 init_cpu_present(cpu_possible_mask);
108}
109
110void __init smp_cpus_done(unsigned int max_cpus)
111{
112
113}
114
115/*
116 * Default smp boot helper for Run-on-reset case where all cores start off
117 * together. Non-masters need to wait for Master to start running.
118 * This is implemented using a flag in memory, which Non-masters spin-wait on.
119 * Master sets it to cpu-id of core to "ungate" it.
120 */
121static volatile int wake_flag;
122
123#ifdef CONFIG_ISA_ARCOMPACT
124
125#define __boot_read(f) f
126#define __boot_write(f, v) f = v
127
128#else
129
130#define __boot_read(f) arc_read_uncached_32(&f)
131#define __boot_write(f, v) arc_write_uncached_32(&f, v)
132
133#endif
134
135static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
136{
137 BUG_ON(cpu == 0);
138
139 __boot_write(wake_flag, cpu);
140}
141
142void arc_platform_smp_wait_to_boot(int cpu)
143{
144 /* for halt-on-reset, we've waited already */
145 if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
146 return;
147
148 while (__boot_read(wake_flag) != cpu)
149 ;
150
151 __boot_write(wake_flag, 0);
152}
153
154const char *arc_platform_smp_cpuinfo(void)
155{
156 return plat_smp_ops.info ? : "";
157}
158
159/*
160 * The very first "C" code executed by secondary
161 * Called from asm stub in head.S
162 * "current"/R25 already setup by low level boot code
163 */
164void start_kernel_secondary(void)
165{
166 struct mm_struct *mm = &init_mm;
167 unsigned int cpu = smp_processor_id();
168
169 /* MMU, Caches, Vector Table, Interrupts etc */
170 setup_processor();
171
172 mmget(mm);
173 mmgrab(mm);
174 current->active_mm = mm;
175 cpumask_set_cpu(cpu, mm_cpumask(mm));
176
177 /* Some SMP H/w setup - for each cpu */
178 if (plat_smp_ops.init_per_cpu)
179 plat_smp_ops.init_per_cpu(cpu);
180
181 if (machine_desc->init_per_cpu)
182 machine_desc->init_per_cpu(cpu);
183
184 notify_cpu_starting(cpu);
185 set_cpu_online(cpu, true);
186
187 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
188
189 local_irq_enable();
190 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
191}
192
193/*
194 * Called from kernel_init( ) -> smp_init( ) - for each CPU
195 *
196 * At this point, Secondary Processor is "HALT"ed:
197 * -It booted, but was halted in head.S
198 * -It was configured to halt-on-reset
199 * So need to wake it up.
200 *
201 * Essential requirements being where to run from (PC) and stack (SP)
202*/
203int __cpu_up(unsigned int cpu, struct task_struct *idle)
204{
205 unsigned long wait_till;
206
207 secondary_idle_tsk = idle;
208
209 pr_info("Idle Task [%d] %p", cpu, idle);
210 pr_info("Trying to bring up CPU%u ...\n", cpu);
211
212 if (plat_smp_ops.cpu_kick)
213 plat_smp_ops.cpu_kick(cpu,
214 (unsigned long)first_lines_of_secondary);
215 else
216 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
217
218 /* wait for 1 sec after kicking the secondary */
219 wait_till = jiffies + HZ;
220 while (time_before(jiffies, wait_till)) {
221 if (cpu_online(cpu))
222 break;
223 }
224
225 if (!cpu_online(cpu)) {
226 pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
227 return -1;
228 }
229
230 secondary_idle_tsk = NULL;
231
232 return 0;
233}
234
235/*****************************************************************************/
236/* Inter Processor Interrupt Handling */
237/*****************************************************************************/
238
239enum ipi_msg_type {
240 IPI_EMPTY = 0,
241 IPI_RESCHEDULE = 1,
242 IPI_CALL_FUNC,
243 IPI_CPU_STOP,
244};
245
246/*
247 * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
248 * figure out what msg was sent. For those which don't (ARC has dedicated IPI
249 * IRQ), the msg-type needs to be conveyed via per-cpu data
250 */
251
252static DEFINE_PER_CPU(unsigned long, ipi_data);
253
254static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
255{
256 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
257 unsigned long old, new;
258 unsigned long flags;
259
260 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
261
262 local_irq_save(flags);
263
264 /*
265 * Atomically write new msg bit (in case others are writing too),
266 * and read back old value
267 */
268 do {
269 new = old = *ipi_data_ptr;
270 new |= 1U << msg;
271 } while (cmpxchg(ipi_data_ptr, old, new) != old);
272
273 /*
274 * Call the platform specific IPI kick function, but avoid if possible:
275 * Only do so if there's no pending msg from other concurrent sender(s).
276 * Otherwise, receiver will see this msg as well when it takes the
277 * IPI corresponding to that msg. This is true, even if it is already in
278 * IPI handler, because !@old means it has not yet dequeued the msg(s)
279 * so @new msg can be a free-loader
280 */
281 if (plat_smp_ops.ipi_send && !old)
282 plat_smp_ops.ipi_send(cpu);
283
284 local_irq_restore(flags);
285}
286
287static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
288{
289 unsigned int cpu;
290
291 for_each_cpu(cpu, callmap)
292 ipi_send_msg_one(cpu, msg);
293}
294
295void smp_send_reschedule(int cpu)
296{
297 ipi_send_msg_one(cpu, IPI_RESCHEDULE);
298}
299
300void smp_send_stop(void)
301{
302 struct cpumask targets;
303 cpumask_copy(&targets, cpu_online_mask);
304 cpumask_clear_cpu(smp_processor_id(), &targets);
305 ipi_send_msg(&targets, IPI_CPU_STOP);
306}
307
308void arch_send_call_function_single_ipi(int cpu)
309{
310 ipi_send_msg_one(cpu, IPI_CALL_FUNC);
311}
312
313void arch_send_call_function_ipi_mask(const struct cpumask *mask)
314{
315 ipi_send_msg(mask, IPI_CALL_FUNC);
316}
317
318/*
319 * ipi_cpu_stop - handle IPI from smp_send_stop()
320 */
321static void ipi_cpu_stop(void)
322{
323 machine_halt();
324}
325
326static inline int __do_IPI(unsigned long msg)
327{
328 int rc = 0;
329
330 switch (msg) {
331 case IPI_RESCHEDULE:
332 scheduler_ipi();
333 break;
334
335 case IPI_CALL_FUNC:
336 generic_smp_call_function_interrupt();
337 break;
338
339 case IPI_CPU_STOP:
340 ipi_cpu_stop();
341 break;
342
343 default:
344 rc = 1;
345 }
346
347 return rc;
348}
349
350/*
351 * arch-common ISR to handle for inter-processor interrupts
352 * Has hooks for platform specific IPI
353 */
354irqreturn_t do_IPI(int irq, void *dev_id)
355{
356 unsigned long pending;
357 unsigned long __maybe_unused copy;
358
359 pr_debug("IPI [%ld] received on cpu %d\n",
360 *this_cpu_ptr(&ipi_data), smp_processor_id());
361
362 if (plat_smp_ops.ipi_clear)
363 plat_smp_ops.ipi_clear(irq);
364
365 /*
366 * "dequeue" the msg corresponding to this IPI (and possibly other
367 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
368 */
369 copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
370
371 do {
372 unsigned long msg = __ffs(pending);
373 int rc;
374
375 rc = __do_IPI(msg);
376 if (rc)
377 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
378 pending &= ~(1U << msg);
379 } while (pending);
380
381 return IRQ_HANDLED;
382}
383
384/*
385 * API called by platform code to hookup arch-common ISR to their IPI IRQ
386 *
387 * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
388 * function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise
389 * request_percpu_irq() below will fail
390 */
391static DEFINE_PER_CPU(int, ipi_dev);
392
393int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
394{
395 int *dev = per_cpu_ptr(&ipi_dev, cpu);
396 unsigned int virq = irq_find_mapping(NULL, hwirq);
397
398 if (!virq)
399 panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
400
401 /* Boot cpu calls request, all call enable */
402 if (!cpu) {
403 int rc;
404
405 rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
406 if (rc)
407 panic("Percpu IRQ request failed for %u\n", virq);
408 }
409
410 enable_percpu_irq(virq, 0);
411
412 return 0;
413}