Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
4 *
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/sched.h>
17#include <linux/sched/mm.h>
18#include <linux/irq.h>
19#include <linux/of.h>
20#include <asm/cpuinfo.h>
21#include <asm/mmu_context.h>
22#include <asm/tlbflush.h>
23#include <asm/cacheflush.h>
24#include <asm/time.h>
25
26asmlinkage __init void secondary_start_kernel(void);
27
28static void (*smp_cross_call)(const struct cpumask *, unsigned int);
29
30unsigned long secondary_release = -1;
31struct thread_info *secondary_thread_info;
32
33enum ipi_msg_type {
34 IPI_WAKEUP,
35 IPI_RESCHEDULE,
36 IPI_CALL_FUNC,
37 IPI_CALL_FUNC_SINGLE,
38};
39
40static DEFINE_SPINLOCK(boot_lock);
41
42static void boot_secondary(unsigned int cpu, struct task_struct *idle)
43{
44 /*
45 * set synchronisation state between this boot processor
46 * and the secondary one
47 */
48 spin_lock(&boot_lock);
49
50 secondary_release = cpu;
51 smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
52
53 /*
54 * now the secondary core is starting up let it run its
55 * calibrations, then wait for it to finish
56 */
57 spin_unlock(&boot_lock);
58}
59
60void __init smp_prepare_boot_cpu(void)
61{
62}
63
64void __init smp_init_cpus(void)
65{
66 struct device_node *cpu;
67 u32 cpu_id;
68
69 for_each_of_cpu_node(cpu) {
70 cpu_id = of_get_cpu_hwid(cpu, 0);
71 if (cpu_id < NR_CPUS)
72 set_cpu_possible(cpu_id, true);
73 }
74}
75
76void __init smp_prepare_cpus(unsigned int max_cpus)
77{
78 unsigned int cpu;
79
80 /*
81 * Initialise the present map, which describes the set of CPUs
82 * actually populated at the present time.
83 */
84 for_each_possible_cpu(cpu) {
85 if (cpu < max_cpus)
86 set_cpu_present(cpu, true);
87 }
88}
89
90void __init smp_cpus_done(unsigned int max_cpus)
91{
92}
93
94static DECLARE_COMPLETION(cpu_running);
95
96int __cpu_up(unsigned int cpu, struct task_struct *idle)
97{
98 if (smp_cross_call == NULL) {
99 pr_warn("CPU%u: failed to start, IPI controller missing",
100 cpu);
101 return -EIO;
102 }
103
104 secondary_thread_info = task_thread_info(idle);
105 current_pgd[cpu] = init_mm.pgd;
106
107 boot_secondary(cpu, idle);
108 if (!wait_for_completion_timeout(&cpu_running,
109 msecs_to_jiffies(1000))) {
110 pr_crit("CPU%u: failed to start\n", cpu);
111 return -EIO;
112 }
113 synchronise_count_master(cpu);
114
115 return 0;
116}
117
118asmlinkage __init void secondary_start_kernel(void)
119{
120 struct mm_struct *mm = &init_mm;
121 unsigned int cpu = smp_processor_id();
122 /*
123 * All kernel threads share the same mm context; grab a
124 * reference and switch to it.
125 */
126 mmgrab(mm);
127 current->active_mm = mm;
128 cpumask_set_cpu(cpu, mm_cpumask(mm));
129
130 pr_info("CPU%u: Booted secondary processor\n", cpu);
131
132 setup_cpuinfo();
133 openrisc_clockevent_init();
134
135 notify_cpu_starting(cpu);
136
137 /*
138 * OK, now it's safe to let the boot CPU continue
139 */
140 complete(&cpu_running);
141
142 synchronise_count_slave(cpu);
143 set_cpu_online(cpu, true);
144
145 local_irq_enable();
146 /*
147 * OK, it's off to the idle thread for us
148 */
149 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
150}
151
152void handle_IPI(unsigned int ipi_msg)
153{
154 unsigned int cpu = smp_processor_id();
155
156 switch (ipi_msg) {
157 case IPI_WAKEUP:
158 break;
159
160 case IPI_RESCHEDULE:
161 scheduler_ipi();
162 break;
163
164 case IPI_CALL_FUNC:
165 generic_smp_call_function_interrupt();
166 break;
167
168 case IPI_CALL_FUNC_SINGLE:
169 generic_smp_call_function_single_interrupt();
170 break;
171
172 default:
173 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
174 break;
175 }
176}
177
178void arch_smp_send_reschedule(int cpu)
179{
180 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
181}
182
183static void stop_this_cpu(void *dummy)
184{
185 /* Remove this CPU */
186 set_cpu_online(smp_processor_id(), false);
187
188 local_irq_disable();
189 /* CPU Doze */
190 if (mfspr(SPR_UPR) & SPR_UPR_PMP)
191 mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
192 /* If that didn't work, infinite loop */
193 while (1)
194 ;
195}
196
197void smp_send_stop(void)
198{
199 smp_call_function(stop_this_cpu, NULL, 0);
200}
201
202void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
203{
204 smp_cross_call = fn;
205}
206
207void arch_send_call_function_single_ipi(int cpu)
208{
209 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
210}
211
212void arch_send_call_function_ipi_mask(const struct cpumask *mask)
213{
214 smp_cross_call(mask, IPI_CALL_FUNC);
215}
216
217/* TLB flush operations - Performed on each CPU*/
218static inline void ipi_flush_tlb_all(void *ignored)
219{
220 local_flush_tlb_all();
221}
222
223static inline void ipi_flush_tlb_mm(void *info)
224{
225 struct mm_struct *mm = (struct mm_struct *)info;
226
227 local_flush_tlb_mm(mm);
228}
229
230static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
231{
232 unsigned int cpuid;
233
234 if (cpumask_empty(cmask))
235 return;
236
237 cpuid = get_cpu();
238
239 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
240 /* local cpu is the only cpu present in cpumask */
241 local_flush_tlb_mm(mm);
242 } else {
243 on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
244 }
245 put_cpu();
246}
247
248struct flush_tlb_data {
249 unsigned long addr1;
250 unsigned long addr2;
251};
252
253static inline void ipi_flush_tlb_page(void *info)
254{
255 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
256
257 local_flush_tlb_page(NULL, fd->addr1);
258}
259
260static inline void ipi_flush_tlb_range(void *info)
261{
262 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
263
264 local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
265}
266
267static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
268 unsigned long end)
269{
270 unsigned int cpuid;
271
272 if (cpumask_empty(cmask))
273 return;
274
275 cpuid = get_cpu();
276
277 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
278 /* local cpu is the only cpu present in cpumask */
279 if ((end - start) <= PAGE_SIZE)
280 local_flush_tlb_page(NULL, start);
281 else
282 local_flush_tlb_range(NULL, start, end);
283 } else {
284 struct flush_tlb_data fd;
285
286 fd.addr1 = start;
287 fd.addr2 = end;
288
289 if ((end - start) <= PAGE_SIZE)
290 on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
291 else
292 on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
293 }
294 put_cpu();
295}
296
297void flush_tlb_all(void)
298{
299 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
300}
301
302void flush_tlb_mm(struct mm_struct *mm)
303{
304 smp_flush_tlb_mm(mm_cpumask(mm), mm);
305}
306
307void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
308{
309 smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
310}
311
312void flush_tlb_range(struct vm_area_struct *vma,
313 unsigned long start, unsigned long end)
314{
315 const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
316 : cpu_online_mask;
317 smp_flush_tlb_range(cmask, start, end);
318}
319
320/* Instruction cache invalidate - performed on each cpu */
321static void ipi_icache_page_inv(void *arg)
322{
323 struct page *page = arg;
324
325 local_icache_page_inv(page);
326}
327
328void smp_icache_page_inv(struct page *page)
329{
330 on_each_cpu(ipi_icache_page_inv, page, 1);
331}
332EXPORT_SYMBOL(smp_icache_page_inv);