Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * RajeshwarR: Dec 11, 2007
6 * -- Added support for Inter Processor Interrupts
7 *
8 * Vineetg: Nov 1st, 2007
9 * -- Initial Write (Borrowed heavily from ARM)
10 */
11
12#include <linux/spinlock.h>
13#include <linux/sched/mm.h>
14#include <linux/interrupt.h>
15#include <linux/profile.h>
16#include <linux/mm.h>
17#include <linux/cpu.h>
18#include <linux/irq.h>
19#include <linux/atomic.h>
20#include <linux/cpumask.h>
21#include <linux/reboot.h>
22#include <linux/irqdomain.h>
23#include <linux/export.h>
24#include <linux/of_fdt.h>
25
26#include <asm/mach_desc.h>
27#include <asm/setup.h>
28#include <asm/smp.h>
29#include <asm/processor.h>
30
31#ifndef CONFIG_ARC_HAS_LLSC
32arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
33
34EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
35#endif
36
37struct plat_smp_ops __weak plat_smp_ops;
38
39/* XXX: per cpu ? Only needed once in early secondary boot */
40struct task_struct *secondary_idle_tsk;
41
42/* Called from start_kernel */
43void __init smp_prepare_boot_cpu(void)
44{
45}
46
47static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
48{
49 unsigned long dt_root = of_get_flat_dt_root();
50 const char *buf;
51
52 buf = of_get_flat_dt_prop(dt_root, name, NULL);
53 if (!buf)
54 return -EINVAL;
55
56 if (cpulist_parse(buf, cpumask))
57 return -EINVAL;
58
59 return 0;
60}
61
62/*
63 * Read from DeviceTree and setup cpu possible mask. If there is no
64 * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
65 */
66static void __init arc_init_cpu_possible(void)
67{
68 struct cpumask cpumask;
69
70 if (arc_get_cpu_map("possible-cpus", &cpumask)) {
71 pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
72 NR_CPUS);
73
74 cpumask_setall(&cpumask);
75 }
76
77 if (!cpumask_test_cpu(0, &cpumask))
78 panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
79
80 init_cpu_possible(&cpumask);
81}
82
83/*
84 * Called from setup_arch() before calling setup_processor()
85 *
86 * - Initialise the CPU possible map early - this describes the CPUs
87 * which may be present or become present in the system.
88 * - Call early smp init hook. This can initialize a specific multi-core
89 * IP which is say common to several platforms (hence not part of
90 * platform specific int_early() hook)
91 */
92void __init smp_init_cpus(void)
93{
94 arc_init_cpu_possible();
95
96 if (plat_smp_ops.init_early_smp)
97 plat_smp_ops.init_early_smp();
98}
99
100/* called from init ( ) => process 1 */
101void __init smp_prepare_cpus(unsigned int max_cpus)
102{
103 /*
104 * if platform didn't set the present map already, do it now
105 * boot cpu is set to present already by init/main.c
106 */
107 if (num_present_cpus() <= 1)
108 init_cpu_present(cpu_possible_mask);
109}
110
111void __init smp_cpus_done(unsigned int max_cpus)
112{
113
114}
115
116/*
117 * Default smp boot helper for Run-on-reset case where all cores start off
118 * together. Non-masters need to wait for Master to start running.
119 * This is implemented using a flag in memory, which Non-masters spin-wait on.
120 * Master sets it to cpu-id of core to "ungate" it.
121 */
122static volatile int wake_flag;
123
124#ifdef CONFIG_ISA_ARCOMPACT
125
126#define __boot_read(f) f
127#define __boot_write(f, v) f = v
128
129#else
130
131#define __boot_read(f) arc_read_uncached_32(&f)
132#define __boot_write(f, v) arc_write_uncached_32(&f, v)
133
134#endif
135
136static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
137{
138 BUG_ON(cpu == 0);
139
140 __boot_write(wake_flag, cpu);
141}
142
143void arc_platform_smp_wait_to_boot(int cpu)
144{
145 /* for halt-on-reset, we've waited already */
146 if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
147 return;
148
149 while (__boot_read(wake_flag) != cpu)
150 ;
151
152 __boot_write(wake_flag, 0);
153}
154
155const char *arc_platform_smp_cpuinfo(void)
156{
157 return plat_smp_ops.info ? : "";
158}
159
160/*
161 * The very first "C" code executed by secondary
162 * Called from asm stub in head.S
163 * "current"/R25 already setup by low level boot code
164 */
165void start_kernel_secondary(void)
166{
167 struct mm_struct *mm = &init_mm;
168 unsigned int cpu = smp_processor_id();
169
170 /* MMU, Caches, Vector Table, Interrupts etc */
171 setup_processor();
172
173 mmget(mm);
174 mmgrab(mm);
175 current->active_mm = mm;
176 cpumask_set_cpu(cpu, mm_cpumask(mm));
177
178 /* Some SMP H/w setup - for each cpu */
179 if (plat_smp_ops.init_per_cpu)
180 plat_smp_ops.init_per_cpu(cpu);
181
182 if (machine_desc->init_per_cpu)
183 machine_desc->init_per_cpu(cpu);
184
185 notify_cpu_starting(cpu);
186 set_cpu_online(cpu, true);
187
188 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
189
190 local_irq_enable();
191 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
192}
193
194/*
195 * Called from kernel_init( ) -> smp_init( ) - for each CPU
196 *
197 * At this point, Secondary Processor is "HALT"ed:
198 * -It booted, but was halted in head.S
199 * -It was configured to halt-on-reset
200 * So need to wake it up.
201 *
202 * Essential requirements being where to run from (PC) and stack (SP)
203*/
204int __cpu_up(unsigned int cpu, struct task_struct *idle)
205{
206 unsigned long wait_till;
207
208 secondary_idle_tsk = idle;
209
210 pr_info("Idle Task [%d] %p", cpu, idle);
211 pr_info("Trying to bring up CPU%u ...\n", cpu);
212
213 if (plat_smp_ops.cpu_kick)
214 plat_smp_ops.cpu_kick(cpu,
215 (unsigned long)first_lines_of_secondary);
216 else
217 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
218
219 /* wait for 1 sec after kicking the secondary */
220 wait_till = jiffies + HZ;
221 while (time_before(jiffies, wait_till)) {
222 if (cpu_online(cpu))
223 break;
224 }
225
226 if (!cpu_online(cpu)) {
227 pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
228 return -1;
229 }
230
231 secondary_idle_tsk = NULL;
232
233 return 0;
234}
235
236/*****************************************************************************/
237/* Inter Processor Interrupt Handling */
238/*****************************************************************************/
239
240enum ipi_msg_type {
241 IPI_EMPTY = 0,
242 IPI_RESCHEDULE = 1,
243 IPI_CALL_FUNC,
244 IPI_CPU_STOP,
245};
246
247/*
248 * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
249 * figure out what msg was sent. For those which don't (ARC has dedicated IPI
250 * IRQ), the msg-type needs to be conveyed via per-cpu data
251 */
252
253static DEFINE_PER_CPU(unsigned long, ipi_data);
254
255static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
256{
257 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
258 unsigned long old, new;
259 unsigned long flags;
260
261 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
262
263 local_irq_save(flags);
264
265 /*
266 * Atomically write new msg bit (in case others are writing too),
267 * and read back old value
268 */
269 do {
270 new = old = *ipi_data_ptr;
271 new |= 1U << msg;
272 } while (cmpxchg(ipi_data_ptr, old, new) != old);
273
274 /*
275 * Call the platform specific IPI kick function, but avoid if possible:
276 * Only do so if there's no pending msg from other concurrent sender(s).
277 * Otherwise, receiver will see this msg as well when it takes the
278 * IPI corresponding to that msg. This is true, even if it is already in
279 * IPI handler, because !@old means it has not yet dequeued the msg(s)
280 * so @new msg can be a free-loader
281 */
282 if (plat_smp_ops.ipi_send && !old)
283 plat_smp_ops.ipi_send(cpu);
284
285 local_irq_restore(flags);
286}
287
288static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
289{
290 unsigned int cpu;
291
292 for_each_cpu(cpu, callmap)
293 ipi_send_msg_one(cpu, msg);
294}
295
296void arch_smp_send_reschedule(int cpu)
297{
298 ipi_send_msg_one(cpu, IPI_RESCHEDULE);
299}
300
301void smp_send_stop(void)
302{
303 struct cpumask targets;
304 cpumask_copy(&targets, cpu_online_mask);
305 cpumask_clear_cpu(smp_processor_id(), &targets);
306 ipi_send_msg(&targets, IPI_CPU_STOP);
307}
308
309void arch_send_call_function_single_ipi(int cpu)
310{
311 ipi_send_msg_one(cpu, IPI_CALL_FUNC);
312}
313
314void arch_send_call_function_ipi_mask(const struct cpumask *mask)
315{
316 ipi_send_msg(mask, IPI_CALL_FUNC);
317}
318
319/*
320 * ipi_cpu_stop - handle IPI from smp_send_stop()
321 */
322static void ipi_cpu_stop(void)
323{
324 machine_halt();
325}
326
327static inline int __do_IPI(unsigned long msg)
328{
329 int rc = 0;
330
331 switch (msg) {
332 case IPI_RESCHEDULE:
333 scheduler_ipi();
334 break;
335
336 case IPI_CALL_FUNC:
337 generic_smp_call_function_interrupt();
338 break;
339
340 case IPI_CPU_STOP:
341 ipi_cpu_stop();
342 break;
343
344 default:
345 rc = 1;
346 }
347
348 return rc;
349}
350
351/*
352 * arch-common ISR to handle for inter-processor interrupts
353 * Has hooks for platform specific IPI
354 */
355static irqreturn_t do_IPI(int irq, void *dev_id)
356{
357 unsigned long pending;
358 unsigned long __maybe_unused copy;
359
360 pr_debug("IPI [%ld] received on cpu %d\n",
361 *this_cpu_ptr(&ipi_data), smp_processor_id());
362
363 if (plat_smp_ops.ipi_clear)
364 plat_smp_ops.ipi_clear(irq);
365
366 /*
367 * "dequeue" the msg corresponding to this IPI (and possibly other
368 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
369 */
370 copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
371
372 do {
373 unsigned long msg = __ffs(pending);
374 int rc;
375
376 rc = __do_IPI(msg);
377 if (rc)
378 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
379 pending &= ~(1U << msg);
380 } while (pending);
381
382 return IRQ_HANDLED;
383}
384
385/*
386 * API called by platform code to hookup arch-common ISR to their IPI IRQ
387 *
388 * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
389 * function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise
390 * request_percpu_irq() below will fail
391 */
392static DEFINE_PER_CPU(int, ipi_dev);
393
394int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
395{
396 int *dev = per_cpu_ptr(&ipi_dev, cpu);
397 unsigned int virq = irq_find_mapping(NULL, hwirq);
398
399 if (!virq)
400 panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
401
402 /* Boot cpu calls request, all call enable */
403 if (!cpu) {
404 int rc;
405
406 rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
407 if (rc)
408 panic("Percpu IRQ request failed for %u\n", virq);
409 }
410
411 enable_percpu_irq(virq, 0);
412
413 return 0;
414}
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * RajeshwarR: Dec 11, 2007
9 * -- Added support for Inter Processor Interrupts
10 *
11 * Vineetg: Nov 1st, 2007
12 * -- Initial Write (Borrowed heavily from ARM)
13 */
14
15#include <linux/spinlock.h>
16#include <linux/sched.h>
17#include <linux/interrupt.h>
18#include <linux/profile.h>
19#include <linux/mm.h>
20#include <linux/cpu.h>
21#include <linux/irq.h>
22#include <linux/atomic.h>
23#include <linux/cpumask.h>
24#include <linux/reboot.h>
25#include <asm/processor.h>
26#include <asm/setup.h>
27#include <asm/mach_desc.h>
28
29#ifndef CONFIG_ARC_HAS_LLSC
30arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
31arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
32#endif
33
34struct plat_smp_ops __weak plat_smp_ops;
35
36/* XXX: per cpu ? Only needed once in early seconday boot */
37struct task_struct *secondary_idle_tsk;
38
39/* Called from start_kernel */
40void __init smp_prepare_boot_cpu(void)
41{
42}
43
44/*
45 * Called from setup_arch() before calling setup_processor()
46 *
47 * - Initialise the CPU possible map early - this describes the CPUs
48 * which may be present or become present in the system.
49 * - Call early smp init hook. This can initialize a specific multi-core
50 * IP which is say common to several platforms (hence not part of
51 * platform specific int_early() hook)
52 */
53void __init smp_init_cpus(void)
54{
55 unsigned int i;
56
57 for (i = 0; i < NR_CPUS; i++)
58 set_cpu_possible(i, true);
59
60 if (plat_smp_ops.init_early_smp)
61 plat_smp_ops.init_early_smp();
62}
63
64/* called from init ( ) => process 1 */
65void __init smp_prepare_cpus(unsigned int max_cpus)
66{
67 int i;
68
69 /*
70 * Initialise the present map, which describes the set of CPUs
71 * actually populated at the present time.
72 */
73 for (i = 0; i < max_cpus; i++)
74 set_cpu_present(i, true);
75}
76
77void __init smp_cpus_done(unsigned int max_cpus)
78{
79
80}
81
82/*
83 * Default smp boot helper for Run-on-reset case where all cores start off
84 * together. Non-masters need to wait for Master to start running.
85 * This is implemented using a flag in memory, which Non-masters spin-wait on.
86 * Master sets it to cpu-id of core to "ungate" it.
87 */
88static volatile int wake_flag;
89
90static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
91{
92 BUG_ON(cpu == 0);
93 wake_flag = cpu;
94}
95
96void arc_platform_smp_wait_to_boot(int cpu)
97{
98 while (wake_flag != cpu)
99 ;
100
101 wake_flag = 0;
102 __asm__ __volatile__("j @first_lines_of_secondary \n");
103}
104
105
106const char *arc_platform_smp_cpuinfo(void)
107{
108 return plat_smp_ops.info ? : "";
109}
110
111/*
112 * The very first "C" code executed by secondary
113 * Called from asm stub in head.S
114 * "current"/R25 already setup by low level boot code
115 */
116void start_kernel_secondary(void)
117{
118 struct mm_struct *mm = &init_mm;
119 unsigned int cpu = smp_processor_id();
120
121 /* MMU, Caches, Vector Table, Interrupts etc */
122 setup_processor();
123
124 atomic_inc(&mm->mm_users);
125 atomic_inc(&mm->mm_count);
126 current->active_mm = mm;
127 cpumask_set_cpu(cpu, mm_cpumask(mm));
128
129 notify_cpu_starting(cpu);
130 set_cpu_online(cpu, true);
131
132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
133
134 /* Some SMP H/w setup - for each cpu */
135 if (plat_smp_ops.init_per_cpu)
136 plat_smp_ops.init_per_cpu(cpu);
137
138 if (machine_desc->init_per_cpu)
139 machine_desc->init_per_cpu(cpu);
140
141 arc_local_timer_setup();
142
143 local_irq_enable();
144 preempt_disable();
145 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
146}
147
148/*
149 * Called from kernel_init( ) -> smp_init( ) - for each CPU
150 *
151 * At this point, Secondary Processor is "HALT"ed:
152 * -It booted, but was halted in head.S
153 * -It was configured to halt-on-reset
154 * So need to wake it up.
155 *
156 * Essential requirements being where to run from (PC) and stack (SP)
157*/
158int __cpu_up(unsigned int cpu, struct task_struct *idle)
159{
160 unsigned long wait_till;
161
162 secondary_idle_tsk = idle;
163
164 pr_info("Idle Task [%d] %p", cpu, idle);
165 pr_info("Trying to bring up CPU%u ...\n", cpu);
166
167 if (plat_smp_ops.cpu_kick)
168 plat_smp_ops.cpu_kick(cpu,
169 (unsigned long)first_lines_of_secondary);
170 else
171 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
172
173 /* wait for 1 sec after kicking the secondary */
174 wait_till = jiffies + HZ;
175 while (time_before(jiffies, wait_till)) {
176 if (cpu_online(cpu))
177 break;
178 }
179
180 if (!cpu_online(cpu)) {
181 pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
182 return -1;
183 }
184
185 secondary_idle_tsk = NULL;
186
187 return 0;
188}
189
190/*
191 * not supported here
192 */
193int setup_profiling_timer(unsigned int multiplier)
194{
195 return -EINVAL;
196}
197
198/*****************************************************************************/
199/* Inter Processor Interrupt Handling */
200/*****************************************************************************/
201
202enum ipi_msg_type {
203 IPI_EMPTY = 0,
204 IPI_RESCHEDULE = 1,
205 IPI_CALL_FUNC,
206 IPI_CPU_STOP,
207};
208
209/*
210 * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
211 * figure out what msg was sent. For those which don't (ARC has dedicated IPI
212 * IRQ), the msg-type needs to be conveyed via per-cpu data
213 */
214
215static DEFINE_PER_CPU(unsigned long, ipi_data);
216
217static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
218{
219 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
220 unsigned long old, new;
221 unsigned long flags;
222
223 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
224
225 local_irq_save(flags);
226
227 /*
228 * Atomically write new msg bit (in case others are writing too),
229 * and read back old value
230 */
231 do {
232 new = old = ACCESS_ONCE(*ipi_data_ptr);
233 new |= 1U << msg;
234 } while (cmpxchg(ipi_data_ptr, old, new) != old);
235
236 /*
237 * Call the platform specific IPI kick function, but avoid if possible:
238 * Only do so if there's no pending msg from other concurrent sender(s).
239 * Otherwise, recevier will see this msg as well when it takes the
240 * IPI corresponding to that msg. This is true, even if it is already in
241 * IPI handler, because !@old means it has not yet dequeued the msg(s)
242 * so @new msg can be a free-loader
243 */
244 if (plat_smp_ops.ipi_send && !old)
245 plat_smp_ops.ipi_send(cpu);
246
247 local_irq_restore(flags);
248}
249
250static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
251{
252 unsigned int cpu;
253
254 for_each_cpu(cpu, callmap)
255 ipi_send_msg_one(cpu, msg);
256}
257
258void smp_send_reschedule(int cpu)
259{
260 ipi_send_msg_one(cpu, IPI_RESCHEDULE);
261}
262
263void smp_send_stop(void)
264{
265 struct cpumask targets;
266 cpumask_copy(&targets, cpu_online_mask);
267 cpumask_clear_cpu(smp_processor_id(), &targets);
268 ipi_send_msg(&targets, IPI_CPU_STOP);
269}
270
271void arch_send_call_function_single_ipi(int cpu)
272{
273 ipi_send_msg_one(cpu, IPI_CALL_FUNC);
274}
275
276void arch_send_call_function_ipi_mask(const struct cpumask *mask)
277{
278 ipi_send_msg(mask, IPI_CALL_FUNC);
279}
280
281/*
282 * ipi_cpu_stop - handle IPI from smp_send_stop()
283 */
284static void ipi_cpu_stop(void)
285{
286 machine_halt();
287}
288
289static inline int __do_IPI(unsigned long msg)
290{
291 int rc = 0;
292
293 switch (msg) {
294 case IPI_RESCHEDULE:
295 scheduler_ipi();
296 break;
297
298 case IPI_CALL_FUNC:
299 generic_smp_call_function_interrupt();
300 break;
301
302 case IPI_CPU_STOP:
303 ipi_cpu_stop();
304 break;
305
306 default:
307 rc = 1;
308 }
309
310 return rc;
311}
312
313/*
314 * arch-common ISR to handle for inter-processor interrupts
315 * Has hooks for platform specific IPI
316 */
317irqreturn_t do_IPI(int irq, void *dev_id)
318{
319 unsigned long pending;
320 unsigned long __maybe_unused copy;
321
322 pr_debug("IPI [%ld] received on cpu %d\n",
323 *this_cpu_ptr(&ipi_data), smp_processor_id());
324
325 if (plat_smp_ops.ipi_clear)
326 plat_smp_ops.ipi_clear(irq);
327
328 /*
329 * "dequeue" the msg corresponding to this IPI (and possibly other
330 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
331 */
332 copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
333
334 do {
335 unsigned long msg = __ffs(pending);
336 int rc;
337
338 rc = __do_IPI(msg);
339 if (rc)
340 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
341 pending &= ~(1U << msg);
342 } while (pending);
343
344 return IRQ_HANDLED;
345}
346
347/*
348 * API called by platform code to hookup arch-common ISR to their IPI IRQ
349 */
350static DEFINE_PER_CPU(int, ipi_dev);
351
352int smp_ipi_irq_setup(int cpu, int irq)
353{
354 int *dev = per_cpu_ptr(&ipi_dev, cpu);
355
356 arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev);
357
358 return 0;
359}