Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
5 *
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive
9 */
10
11#include <linux/cpu.h>
12#include <linux/clockchips.h>
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/kexec.h>
16#include <linux/profile.h>
17#include <linux/smp.h>
18#include <linux/sched.h>
19#include <linux/seq_file.h>
20#include <linux/delay.h>
21#include <linux/irq_work.h>
22
23#include <asm/sbi.h>
24#include <asm/tlbflush.h>
25#include <asm/cacheflush.h>
26#include <asm/cpu_ops.h>
27
28enum ipi_message_type {
29 IPI_RESCHEDULE,
30 IPI_CALL_FUNC,
31 IPI_CPU_STOP,
32 IPI_CPU_CRASH_STOP,
33 IPI_IRQ_WORK,
34 IPI_TIMER,
35 IPI_MAX
36};
37
38unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
39 [0 ... NR_CPUS-1] = INVALID_HARTID
40};
41
42void __init smp_setup_processor_id(void)
43{
44 cpuid_to_hartid_map(0) = boot_cpu_hartid;
45}
46
47/* A collection of single bit ipi messages. */
48static struct {
49 unsigned long stats[IPI_MAX] ____cacheline_aligned;
50 unsigned long bits ____cacheline_aligned;
51} ipi_data[NR_CPUS] __cacheline_aligned;
52
53int riscv_hartid_to_cpuid(unsigned long hartid)
54{
55 int i;
56
57 for (i = 0; i < NR_CPUS; i++)
58 if (cpuid_to_hartid_map(i) == hartid)
59 return i;
60
61 pr_err("Couldn't find cpu id for hartid [%lu]\n", hartid);
62 return -ENOENT;
63}
64
65bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
66{
67 return phys_id == cpuid_to_hartid_map(cpu);
68}
69
70static void ipi_stop(void)
71{
72 set_cpu_online(smp_processor_id(), false);
73 while (1)
74 wait_for_interrupt();
75}
76
77#ifdef CONFIG_KEXEC_CORE
78static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
79
80static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
81{
82 crash_save_cpu(regs, cpu);
83
84 atomic_dec(&waiting_for_crash_ipi);
85
86 local_irq_disable();
87
88#ifdef CONFIG_HOTPLUG_CPU
89 if (cpu_has_hotplug(cpu))
90 cpu_ops[cpu]->cpu_stop();
91#endif
92
93 for(;;)
94 wait_for_interrupt();
95}
96#else
97static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
98{
99 unreachable();
100}
101#endif
102
103static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
104
105void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
106{
107 ipi_ops = ops;
108}
109EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
110
111void riscv_clear_ipi(void)
112{
113 if (ipi_ops && ipi_ops->ipi_clear)
114 ipi_ops->ipi_clear();
115
116 csr_clear(CSR_IP, IE_SIE);
117}
118EXPORT_SYMBOL_GPL(riscv_clear_ipi);
119
120static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
121{
122 int cpu;
123
124 smp_mb__before_atomic();
125 for_each_cpu(cpu, mask)
126 set_bit(op, &ipi_data[cpu].bits);
127 smp_mb__after_atomic();
128
129 if (ipi_ops && ipi_ops->ipi_inject)
130 ipi_ops->ipi_inject(mask);
131 else
132 pr_warn("SMP: IPI inject method not available\n");
133}
134
135static void send_ipi_single(int cpu, enum ipi_message_type op)
136{
137 smp_mb__before_atomic();
138 set_bit(op, &ipi_data[cpu].bits);
139 smp_mb__after_atomic();
140
141 if (ipi_ops && ipi_ops->ipi_inject)
142 ipi_ops->ipi_inject(cpumask_of(cpu));
143 else
144 pr_warn("SMP: IPI inject method not available\n");
145}
146
147#ifdef CONFIG_IRQ_WORK
148void arch_irq_work_raise(void)
149{
150 send_ipi_single(smp_processor_id(), IPI_IRQ_WORK);
151}
152#endif
153
154void handle_IPI(struct pt_regs *regs)
155{
156 unsigned int cpu = smp_processor_id();
157 unsigned long *pending_ipis = &ipi_data[cpu].bits;
158 unsigned long *stats = ipi_data[cpu].stats;
159
160 riscv_clear_ipi();
161
162 while (true) {
163 unsigned long ops;
164
165 /* Order bit clearing and data access. */
166 mb();
167
168 ops = xchg(pending_ipis, 0);
169 if (ops == 0)
170 return;
171
172 if (ops & (1 << IPI_RESCHEDULE)) {
173 stats[IPI_RESCHEDULE]++;
174 scheduler_ipi();
175 }
176
177 if (ops & (1 << IPI_CALL_FUNC)) {
178 stats[IPI_CALL_FUNC]++;
179 generic_smp_call_function_interrupt();
180 }
181
182 if (ops & (1 << IPI_CPU_STOP)) {
183 stats[IPI_CPU_STOP]++;
184 ipi_stop();
185 }
186
187 if (ops & (1 << IPI_CPU_CRASH_STOP)) {
188 ipi_cpu_crash_stop(cpu, get_irq_regs());
189 }
190
191 if (ops & (1 << IPI_IRQ_WORK)) {
192 stats[IPI_IRQ_WORK]++;
193 irq_work_run();
194 }
195
196#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
197 if (ops & (1 << IPI_TIMER)) {
198 stats[IPI_TIMER]++;
199 tick_receive_broadcast();
200 }
201#endif
202 BUG_ON((ops >> IPI_MAX) != 0);
203
204 /* Order data access and bit testing. */
205 mb();
206 }
207}
208
209static const char * const ipi_names[] = {
210 [IPI_RESCHEDULE] = "Rescheduling interrupts",
211 [IPI_CALL_FUNC] = "Function call interrupts",
212 [IPI_CPU_STOP] = "CPU stop interrupts",
213 [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
214 [IPI_IRQ_WORK] = "IRQ work interrupts",
215 [IPI_TIMER] = "Timer broadcast interrupts",
216};
217
218void show_ipi_stats(struct seq_file *p, int prec)
219{
220 unsigned int cpu, i;
221
222 for (i = 0; i < IPI_MAX; i++) {
223 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
224 prec >= 4 ? " " : "");
225 for_each_online_cpu(cpu)
226 seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
227 seq_printf(p, " %s\n", ipi_names[i]);
228 }
229}
230
231void arch_send_call_function_ipi_mask(struct cpumask *mask)
232{
233 send_ipi_mask(mask, IPI_CALL_FUNC);
234}
235
236void arch_send_call_function_single_ipi(int cpu)
237{
238 send_ipi_single(cpu, IPI_CALL_FUNC);
239}
240
241#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
242void tick_broadcast(const struct cpumask *mask)
243{
244 send_ipi_mask(mask, IPI_TIMER);
245}
246#endif
247
248void smp_send_stop(void)
249{
250 unsigned long timeout;
251
252 if (num_online_cpus() > 1) {
253 cpumask_t mask;
254
255 cpumask_copy(&mask, cpu_online_mask);
256 cpumask_clear_cpu(smp_processor_id(), &mask);
257
258 if (system_state <= SYSTEM_RUNNING)
259 pr_crit("SMP: stopping secondary CPUs\n");
260 send_ipi_mask(&mask, IPI_CPU_STOP);
261 }
262
263 /* Wait up to one second for other CPUs to stop */
264 timeout = USEC_PER_SEC;
265 while (num_online_cpus() > 1 && timeout--)
266 udelay(1);
267
268 if (num_online_cpus() > 1)
269 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
270 cpumask_pr_args(cpu_online_mask));
271}
272
273#ifdef CONFIG_KEXEC_CORE
274/*
275 * The number of CPUs online, not counting this CPU (which may not be
276 * fully online and so not counted in num_online_cpus()).
277 */
278static inline unsigned int num_other_online_cpus(void)
279{
280 unsigned int this_cpu_online = cpu_online(smp_processor_id());
281
282 return num_online_cpus() - this_cpu_online;
283}
284
285void crash_smp_send_stop(void)
286{
287 static int cpus_stopped;
288 cpumask_t mask;
289 unsigned long timeout;
290
291 /*
292 * This function can be called twice in panic path, but obviously
293 * we execute this only once.
294 */
295 if (cpus_stopped)
296 return;
297
298 cpus_stopped = 1;
299
300 /*
301 * If this cpu is the only one alive at this point in time, online or
302 * not, there are no stop messages to be sent around, so just back out.
303 */
304 if (num_other_online_cpus() == 0)
305 return;
306
307 cpumask_copy(&mask, cpu_online_mask);
308 cpumask_clear_cpu(smp_processor_id(), &mask);
309
310 atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
311
312 pr_crit("SMP: stopping secondary CPUs\n");
313 send_ipi_mask(&mask, IPI_CPU_CRASH_STOP);
314
315 /* Wait up to one second for other CPUs to stop */
316 timeout = USEC_PER_SEC;
317 while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
318 udelay(1);
319
320 if (atomic_read(&waiting_for_crash_ipi) > 0)
321 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
322 cpumask_pr_args(&mask));
323}
324
325bool smp_crash_stop_failed(void)
326{
327 return (atomic_read(&waiting_for_crash_ipi) > 0);
328}
329#endif
330
331void smp_send_reschedule(int cpu)
332{
333 send_ipi_single(cpu, IPI_RESCHEDULE);
334}
335EXPORT_SYMBOL_GPL(smp_send_reschedule);
1/*
2 * SMP initialisation and IPI support
3 * Based on arch/arm64/kernel/smp.c
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Copyright (C) 2015 Regents of the University of California
7 * Copyright (C) 2017 SiFive
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/interrupt.h>
23#include <linux/smp.h>
24#include <linux/sched.h>
25
26#include <asm/sbi.h>
27#include <asm/tlbflush.h>
28#include <asm/cacheflush.h>
29
30/* A collection of single bit ipi messages. */
31static struct {
32 unsigned long bits ____cacheline_aligned;
33} ipi_data[NR_CPUS] __cacheline_aligned;
34
35enum ipi_message_type {
36 IPI_RESCHEDULE,
37 IPI_CALL_FUNC,
38 IPI_MAX
39};
40
41
42/* Unsupported */
43int setup_profiling_timer(unsigned int multiplier)
44{
45 return -EINVAL;
46}
47
48irqreturn_t handle_ipi(void)
49{
50 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
51
52 /* Clear pending IPI */
53 csr_clear(sip, SIE_SSIE);
54
55 while (true) {
56 unsigned long ops;
57
58 /* Order bit clearing and data access. */
59 mb();
60
61 ops = xchg(pending_ipis, 0);
62 if (ops == 0)
63 return IRQ_HANDLED;
64
65 if (ops & (1 << IPI_RESCHEDULE))
66 scheduler_ipi();
67
68 if (ops & (1 << IPI_CALL_FUNC))
69 generic_smp_call_function_interrupt();
70
71 BUG_ON((ops >> IPI_MAX) != 0);
72
73 /* Order data access and bit testing. */
74 mb();
75 }
76
77 return IRQ_HANDLED;
78}
79
80static void
81send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
82{
83 int i;
84
85 mb();
86 for_each_cpu(i, to_whom)
87 set_bit(operation, &ipi_data[i].bits);
88
89 mb();
90 sbi_send_ipi(cpumask_bits(to_whom));
91}
92
93void arch_send_call_function_ipi_mask(struct cpumask *mask)
94{
95 send_ipi_message(mask, IPI_CALL_FUNC);
96}
97
98void arch_send_call_function_single_ipi(int cpu)
99{
100 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
101}
102
103static void ipi_stop(void *unused)
104{
105 while (1)
106 wait_for_interrupt();
107}
108
109void smp_send_stop(void)
110{
111 on_each_cpu(ipi_stop, NULL, 1);
112}
113
114void smp_send_reschedule(int cpu)
115{
116 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
117}
118
119/*
120 * Performs an icache flush for the given MM context. RISC-V has no direct
121 * mechanism for instruction cache shoot downs, so instead we send an IPI that
122 * informs the remote harts they need to flush their local instruction caches.
123 * To avoid pathologically slow behavior in a common case (a bunch of
124 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
125 * IPIs for harts that are not currently executing a MM context and instead
126 * schedule a deferred local instruction cache flush to be performed before
127 * execution resumes on each hart.
128 */
129void flush_icache_mm(struct mm_struct *mm, bool local)
130{
131 unsigned int cpu;
132 cpumask_t others, *mask;
133
134 preempt_disable();
135
136 /* Mark every hart's icache as needing a flush for this MM. */
137 mask = &mm->context.icache_stale_mask;
138 cpumask_setall(mask);
139 /* Flush this hart's I$ now, and mark it as flushed. */
140 cpu = smp_processor_id();
141 cpumask_clear_cpu(cpu, mask);
142 local_flush_icache_all();
143
144 /*
145 * Flush the I$ of other harts concurrently executing, and mark them as
146 * flushed.
147 */
148 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
149 local |= cpumask_empty(&others);
150 if (mm != current->active_mm || !local)
151 sbi_remote_fence_i(others.bits);
152 else {
153 /*
154 * It's assumed that at least one strongly ordered operation is
155 * performed on this hart between setting a hart's cpumask bit
156 * and scheduling this MM context on that hart. Sending an SBI
157 * remote message will do this, but in the case where no
158 * messages are sent we still need to order this hart's writes
159 * with flush_icache_deferred().
160 */
161 smp_mb();
162 }
163
164 preempt_enable();
165}