Loading...
1/*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
4 *
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/sched.h>
17#include <linux/irq.h>
18#include <asm/cpuinfo.h>
19#include <asm/mmu_context.h>
20#include <asm/tlbflush.h>
21#include <asm/cacheflush.h>
22#include <asm/time.h>
23
24static void (*smp_cross_call)(const struct cpumask *, unsigned int);
25
26unsigned long secondary_release = -1;
27struct thread_info *secondary_thread_info;
28
29enum ipi_msg_type {
30 IPI_WAKEUP,
31 IPI_RESCHEDULE,
32 IPI_CALL_FUNC,
33 IPI_CALL_FUNC_SINGLE,
34};
35
36static DEFINE_SPINLOCK(boot_lock);
37
38static void boot_secondary(unsigned int cpu, struct task_struct *idle)
39{
40 /*
41 * set synchronisation state between this boot processor
42 * and the secondary one
43 */
44 spin_lock(&boot_lock);
45
46 secondary_release = cpu;
47 smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
48
49 /*
50 * now the secondary core is starting up let it run its
51 * calibrations, then wait for it to finish
52 */
53 spin_unlock(&boot_lock);
54}
55
56void __init smp_prepare_boot_cpu(void)
57{
58}
59
60void __init smp_init_cpus(void)
61{
62 int i;
63
64 for (i = 0; i < NR_CPUS; i++)
65 set_cpu_possible(i, true);
66}
67
68void __init smp_prepare_cpus(unsigned int max_cpus)
69{
70 int i;
71
72 /*
73 * Initialise the present map, which describes the set of CPUs
74 * actually populated at the present time.
75 */
76 for (i = 0; i < max_cpus; i++)
77 set_cpu_present(i, true);
78}
79
80void __init smp_cpus_done(unsigned int max_cpus)
81{
82}
83
84static DECLARE_COMPLETION(cpu_running);
85
86int __cpu_up(unsigned int cpu, struct task_struct *idle)
87{
88 if (smp_cross_call == NULL) {
89 pr_warn("CPU%u: failed to start, IPI controller missing",
90 cpu);
91 return -EIO;
92 }
93
94 secondary_thread_info = task_thread_info(idle);
95 current_pgd[cpu] = init_mm.pgd;
96
97 boot_secondary(cpu, idle);
98 if (!wait_for_completion_timeout(&cpu_running,
99 msecs_to_jiffies(1000))) {
100 pr_crit("CPU%u: failed to start\n", cpu);
101 return -EIO;
102 }
103 synchronise_count_master(cpu);
104
105 return 0;
106}
107
108asmlinkage __init void secondary_start_kernel(void)
109{
110 struct mm_struct *mm = &init_mm;
111 unsigned int cpu = smp_processor_id();
112 /*
113 * All kernel threads share the same mm context; grab a
114 * reference and switch to it.
115 */
116 atomic_inc(&mm->mm_count);
117 current->active_mm = mm;
118 cpumask_set_cpu(cpu, mm_cpumask(mm));
119
120 pr_info("CPU%u: Booted secondary processor\n", cpu);
121
122 setup_cpuinfo();
123 openrisc_clockevent_init();
124
125 notify_cpu_starting(cpu);
126
127 /*
128 * OK, now it's safe to let the boot CPU continue
129 */
130 complete(&cpu_running);
131
132 synchronise_count_slave(cpu);
133 set_cpu_online(cpu, true);
134
135 local_irq_enable();
136
137 preempt_disable();
138 /*
139 * OK, it's off to the idle thread for us
140 */
141 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
142}
143
144void handle_IPI(unsigned int ipi_msg)
145{
146 unsigned int cpu = smp_processor_id();
147
148 switch (ipi_msg) {
149 case IPI_WAKEUP:
150 break;
151
152 case IPI_RESCHEDULE:
153 scheduler_ipi();
154 break;
155
156 case IPI_CALL_FUNC:
157 generic_smp_call_function_interrupt();
158 break;
159
160 case IPI_CALL_FUNC_SINGLE:
161 generic_smp_call_function_single_interrupt();
162 break;
163
164 default:
165 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
166 break;
167 }
168}
169
170void smp_send_reschedule(int cpu)
171{
172 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
173}
174
175static void stop_this_cpu(void *dummy)
176{
177 /* Remove this CPU */
178 set_cpu_online(smp_processor_id(), false);
179
180 local_irq_disable();
181 /* CPU Doze */
182 if (mfspr(SPR_UPR) & SPR_UPR_PMP)
183 mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
184 /* If that didn't work, infinite loop */
185 while (1)
186 ;
187}
188
189void smp_send_stop(void)
190{
191 smp_call_function(stop_this_cpu, NULL, 0);
192}
193
194/* not supported, yet */
195int setup_profiling_timer(unsigned int multiplier)
196{
197 return -EINVAL;
198}
199
200void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
201{
202 smp_cross_call = fn;
203}
204
205void arch_send_call_function_single_ipi(int cpu)
206{
207 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
208}
209
210void arch_send_call_function_ipi_mask(const struct cpumask *mask)
211{
212 smp_cross_call(mask, IPI_CALL_FUNC);
213}
214
215/* TLB flush operations - Performed on each CPU*/
216static inline void ipi_flush_tlb_all(void *ignored)
217{
218 local_flush_tlb_all();
219}
220
221void flush_tlb_all(void)
222{
223 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
224}
225
226/*
227 * FIXME: implement proper functionality instead of flush_tlb_all.
228 * *But*, as things currently stands, the local_tlb_flush_* functions will
229 * all boil down to local_tlb_flush_all anyway.
230 */
231void flush_tlb_mm(struct mm_struct *mm)
232{
233 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
234}
235
236void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
237{
238 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
239}
240
241void flush_tlb_range(struct vm_area_struct *vma,
242 unsigned long start, unsigned long end)
243{
244 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
245}
246
247/* Instruction cache invalidate - performed on each cpu */
248static void ipi_icache_page_inv(void *arg)
249{
250 struct page *page = arg;
251
252 local_icache_page_inv(page);
253}
254
255void smp_icache_page_inv(struct page *page)
256{
257 on_each_cpu(ipi_icache_page_inv, page, 1);
258}
259EXPORT_SYMBOL(smp_icache_page_inv);
1/*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
4 *
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/sched.h>
17#include <linux/sched/mm.h>
18#include <linux/irq.h>
19#include <linux/of.h>
20#include <asm/cpuinfo.h>
21#include <asm/mmu_context.h>
22#include <asm/tlbflush.h>
23#include <asm/cacheflush.h>
24#include <asm/time.h>
25
26asmlinkage __init void secondary_start_kernel(void);
27
28static void (*smp_cross_call)(const struct cpumask *, unsigned int);
29
30unsigned long secondary_release = -1;
31struct thread_info *secondary_thread_info;
32
33enum ipi_msg_type {
34 IPI_WAKEUP,
35 IPI_RESCHEDULE,
36 IPI_CALL_FUNC,
37 IPI_CALL_FUNC_SINGLE,
38};
39
40static DEFINE_SPINLOCK(boot_lock);
41
42static void boot_secondary(unsigned int cpu, struct task_struct *idle)
43{
44 /*
45 * set synchronisation state between this boot processor
46 * and the secondary one
47 */
48 spin_lock(&boot_lock);
49
50 secondary_release = cpu;
51 smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
52
53 /*
54 * now the secondary core is starting up let it run its
55 * calibrations, then wait for it to finish
56 */
57 spin_unlock(&boot_lock);
58}
59
60void __init smp_prepare_boot_cpu(void)
61{
62}
63
64void __init smp_init_cpus(void)
65{
66 struct device_node *cpu;
67 u32 cpu_id;
68
69 for_each_of_cpu_node(cpu) {
70 cpu_id = of_get_cpu_hwid(cpu, 0);
71 if (cpu_id < NR_CPUS)
72 set_cpu_possible(cpu_id, true);
73 }
74}
75
76void __init smp_prepare_cpus(unsigned int max_cpus)
77{
78 unsigned int cpu;
79
80 /*
81 * Initialise the present map, which describes the set of CPUs
82 * actually populated at the present time.
83 */
84 for_each_possible_cpu(cpu) {
85 if (cpu < max_cpus)
86 set_cpu_present(cpu, true);
87 }
88}
89
90void __init smp_cpus_done(unsigned int max_cpus)
91{
92}
93
94static DECLARE_COMPLETION(cpu_running);
95
96int __cpu_up(unsigned int cpu, struct task_struct *idle)
97{
98 if (smp_cross_call == NULL) {
99 pr_warn("CPU%u: failed to start, IPI controller missing",
100 cpu);
101 return -EIO;
102 }
103
104 secondary_thread_info = task_thread_info(idle);
105 current_pgd[cpu] = init_mm.pgd;
106
107 boot_secondary(cpu, idle);
108 if (!wait_for_completion_timeout(&cpu_running,
109 msecs_to_jiffies(1000))) {
110 pr_crit("CPU%u: failed to start\n", cpu);
111 return -EIO;
112 }
113 synchronise_count_master(cpu);
114
115 return 0;
116}
117
118asmlinkage __init void secondary_start_kernel(void)
119{
120 struct mm_struct *mm = &init_mm;
121 unsigned int cpu = smp_processor_id();
122 /*
123 * All kernel threads share the same mm context; grab a
124 * reference and switch to it.
125 */
126 mmgrab(mm);
127 current->active_mm = mm;
128 cpumask_set_cpu(cpu, mm_cpumask(mm));
129
130 pr_info("CPU%u: Booted secondary processor\n", cpu);
131
132 setup_cpuinfo();
133 openrisc_clockevent_init();
134
135 notify_cpu_starting(cpu);
136
137 /*
138 * OK, now it's safe to let the boot CPU continue
139 */
140 complete(&cpu_running);
141
142 synchronise_count_slave(cpu);
143 set_cpu_online(cpu, true);
144
145 local_irq_enable();
146 /*
147 * OK, it's off to the idle thread for us
148 */
149 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
150}
151
152void handle_IPI(unsigned int ipi_msg)
153{
154 unsigned int cpu = smp_processor_id();
155
156 switch (ipi_msg) {
157 case IPI_WAKEUP:
158 break;
159
160 case IPI_RESCHEDULE:
161 scheduler_ipi();
162 break;
163
164 case IPI_CALL_FUNC:
165 generic_smp_call_function_interrupt();
166 break;
167
168 case IPI_CALL_FUNC_SINGLE:
169 generic_smp_call_function_single_interrupt();
170 break;
171
172 default:
173 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
174 break;
175 }
176}
177
178void arch_smp_send_reschedule(int cpu)
179{
180 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
181}
182
183static void stop_this_cpu(void *dummy)
184{
185 /* Remove this CPU */
186 set_cpu_online(smp_processor_id(), false);
187
188 local_irq_disable();
189 /* CPU Doze */
190 if (mfspr(SPR_UPR) & SPR_UPR_PMP)
191 mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
192 /* If that didn't work, infinite loop */
193 while (1)
194 ;
195}
196
197void smp_send_stop(void)
198{
199 smp_call_function(stop_this_cpu, NULL, 0);
200}
201
202void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
203{
204 smp_cross_call = fn;
205}
206
207void arch_send_call_function_single_ipi(int cpu)
208{
209 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
210}
211
212void arch_send_call_function_ipi_mask(const struct cpumask *mask)
213{
214 smp_cross_call(mask, IPI_CALL_FUNC);
215}
216
217/* TLB flush operations - Performed on each CPU*/
218static inline void ipi_flush_tlb_all(void *ignored)
219{
220 local_flush_tlb_all();
221}
222
223static inline void ipi_flush_tlb_mm(void *info)
224{
225 struct mm_struct *mm = (struct mm_struct *)info;
226
227 local_flush_tlb_mm(mm);
228}
229
230static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
231{
232 unsigned int cpuid;
233
234 if (cpumask_empty(cmask))
235 return;
236
237 cpuid = get_cpu();
238
239 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
240 /* local cpu is the only cpu present in cpumask */
241 local_flush_tlb_mm(mm);
242 } else {
243 on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
244 }
245 put_cpu();
246}
247
248struct flush_tlb_data {
249 unsigned long addr1;
250 unsigned long addr2;
251};
252
253static inline void ipi_flush_tlb_page(void *info)
254{
255 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
256
257 local_flush_tlb_page(NULL, fd->addr1);
258}
259
260static inline void ipi_flush_tlb_range(void *info)
261{
262 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
263
264 local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
265}
266
267static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
268 unsigned long end)
269{
270 unsigned int cpuid;
271
272 if (cpumask_empty(cmask))
273 return;
274
275 cpuid = get_cpu();
276
277 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
278 /* local cpu is the only cpu present in cpumask */
279 if ((end - start) <= PAGE_SIZE)
280 local_flush_tlb_page(NULL, start);
281 else
282 local_flush_tlb_range(NULL, start, end);
283 } else {
284 struct flush_tlb_data fd;
285
286 fd.addr1 = start;
287 fd.addr2 = end;
288
289 if ((end - start) <= PAGE_SIZE)
290 on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
291 else
292 on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
293 }
294 put_cpu();
295}
296
297void flush_tlb_all(void)
298{
299 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
300}
301
302void flush_tlb_mm(struct mm_struct *mm)
303{
304 smp_flush_tlb_mm(mm_cpumask(mm), mm);
305}
306
307void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
308{
309 smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
310}
311
312void flush_tlb_range(struct vm_area_struct *vma,
313 unsigned long start, unsigned long end)
314{
315 const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
316 : cpu_online_mask;
317 smp_flush_tlb_range(cmask, start, end);
318}
319
320/* Instruction cache invalidate - performed on each cpu */
321static void ipi_icache_page_inv(void *arg)
322{
323 struct page *page = arg;
324
325 local_icache_page_inv(page);
326}
327
328void smp_icache_page_inv(struct page *page)
329{
330 on_each_cpu(ipi_icache_page_inv, page, 1);
331}
332EXPORT_SYMBOL(smp_icache_page_inv);