Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
  3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
  4 *
  5 * Based on arm64 and arc implementations
  6 * Copyright (C) 2013 ARM Ltd.
  7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  8 *
  9 * This file is licensed under the terms of the GNU General Public License
 10 * version 2.  This program is licensed "as is" without any warranty of any
 11 * kind, whether express or implied.
 12 */
 13
 14#include <linux/smp.h>
 15#include <linux/cpu.h>
 16#include <linux/sched.h>
 
 17#include <linux/irq.h>
 
 18#include <asm/cpuinfo.h>
 19#include <asm/mmu_context.h>
 20#include <asm/tlbflush.h>
 21#include <asm/cacheflush.h>
 22#include <asm/time.h>
 23
 
 
 24static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 25
 26unsigned long secondary_release = -1;
 27struct thread_info *secondary_thread_info;
 28
 29enum ipi_msg_type {
 30	IPI_WAKEUP,
 31	IPI_RESCHEDULE,
 32	IPI_CALL_FUNC,
 33	IPI_CALL_FUNC_SINGLE,
 34};
 35
 36static DEFINE_SPINLOCK(boot_lock);
 37
 38static void boot_secondary(unsigned int cpu, struct task_struct *idle)
 39{
 40	/*
 41	 * set synchronisation state between this boot processor
 42	 * and the secondary one
 43	 */
 44	spin_lock(&boot_lock);
 45
 46	secondary_release = cpu;
 47	smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
 48
 49	/*
 50	 * now the secondary core is starting up let it run its
 51	 * calibrations, then wait for it to finish
 52	 */
 53	spin_unlock(&boot_lock);
 54}
 55
 56void __init smp_prepare_boot_cpu(void)
 57{
 58}
 59
 60void __init smp_init_cpus(void)
 61{
 62	int i;
 
 63
 64	for (i = 0; i < NR_CPUS; i++)
 65		set_cpu_possible(i, true);
 
 
 
 66}
 67
 68void __init smp_prepare_cpus(unsigned int max_cpus)
 69{
 70	int i;
 71
 72	/*
 73	 * Initialise the present map, which describes the set of CPUs
 74	 * actually populated at the present time.
 75	 */
 76	for (i = 0; i < max_cpus; i++)
 77		set_cpu_present(i, true);
 
 
 78}
 79
 80void __init smp_cpus_done(unsigned int max_cpus)
 81{
 82}
 83
 84static DECLARE_COMPLETION(cpu_running);
 85
 86int __cpu_up(unsigned int cpu, struct task_struct *idle)
 87{
 88	if (smp_cross_call == NULL) {
 89		pr_warn("CPU%u: failed to start, IPI controller missing",
 90			cpu);
 91		return -EIO;
 92	}
 93
 94	secondary_thread_info = task_thread_info(idle);
 95	current_pgd[cpu] = init_mm.pgd;
 96
 97	boot_secondary(cpu, idle);
 98	if (!wait_for_completion_timeout(&cpu_running,
 99					msecs_to_jiffies(1000))) {
100		pr_crit("CPU%u: failed to start\n", cpu);
101		return -EIO;
102	}
103	synchronise_count_master(cpu);
104
105	return 0;
106}
107
108asmlinkage __init void secondary_start_kernel(void)
109{
110	struct mm_struct *mm = &init_mm;
111	unsigned int cpu = smp_processor_id();
112	/*
113	 * All kernel threads share the same mm context; grab a
114	 * reference and switch to it.
115	 */
116	atomic_inc(&mm->mm_count);
117	current->active_mm = mm;
118	cpumask_set_cpu(cpu, mm_cpumask(mm));
119
120	pr_info("CPU%u: Booted secondary processor\n", cpu);
121
122	setup_cpuinfo();
123	openrisc_clockevent_init();
124
125	notify_cpu_starting(cpu);
126
127	/*
128	 * OK, now it's safe to let the boot CPU continue
129	 */
130	complete(&cpu_running);
131
132	synchronise_count_slave(cpu);
133	set_cpu_online(cpu, true);
134
135	local_irq_enable();
136
137	preempt_disable();
138	/*
139	 * OK, it's off to the idle thread for us
140	 */
141	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
142}
143
144void handle_IPI(unsigned int ipi_msg)
145{
146	unsigned int cpu = smp_processor_id();
147
148	switch (ipi_msg) {
149	case IPI_WAKEUP:
150		break;
151
152	case IPI_RESCHEDULE:
153		scheduler_ipi();
154		break;
155
156	case IPI_CALL_FUNC:
157		generic_smp_call_function_interrupt();
158		break;
159
160	case IPI_CALL_FUNC_SINGLE:
161		generic_smp_call_function_single_interrupt();
162		break;
163
164	default:
165		WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
166		break;
167	}
168}
169
170void smp_send_reschedule(int cpu)
171{
172	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
173}
174
175static void stop_this_cpu(void *dummy)
176{
177	/* Remove this CPU */
178	set_cpu_online(smp_processor_id(), false);
179
180	local_irq_disable();
181	/* CPU Doze */
182	if (mfspr(SPR_UPR) & SPR_UPR_PMP)
183		mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
184	/* If that didn't work, infinite loop */
185	while (1)
186		;
187}
188
189void smp_send_stop(void)
190{
191	smp_call_function(stop_this_cpu, NULL, 0);
192}
193
194/* not supported, yet */
195int setup_profiling_timer(unsigned int multiplier)
196{
197	return -EINVAL;
198}
199
200void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
201{
202	smp_cross_call = fn;
203}
204
205void arch_send_call_function_single_ipi(int cpu)
206{
207	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
208}
209
210void arch_send_call_function_ipi_mask(const struct cpumask *mask)
211{
212	smp_cross_call(mask, IPI_CALL_FUNC);
213}
214
215/* TLB flush operations - Performed on each CPU*/
216static inline void ipi_flush_tlb_all(void *ignored)
217{
218	local_flush_tlb_all();
219}
220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221void flush_tlb_all(void)
222{
223	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
224}
225
226/*
227 * FIXME: implement proper functionality instead of flush_tlb_all.
228 * *But*, as things currently stands, the local_tlb_flush_* functions will
229 * all boil down to local_tlb_flush_all anyway.
230 */
231void flush_tlb_mm(struct mm_struct *mm)
232{
233	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
234}
235
236void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
237{
238	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
239}
240
241void flush_tlb_range(struct vm_area_struct *vma,
242		     unsigned long start, unsigned long end)
243{
244	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 
 
245}
246
247/* Instruction cache invalidate - performed on each cpu */
248static void ipi_icache_page_inv(void *arg)
249{
250	struct page *page = arg;
251
252	local_icache_page_inv(page);
253}
254
255void smp_icache_page_inv(struct page *page)
256{
257	on_each_cpu(ipi_icache_page_inv, page, 1);
258}
259EXPORT_SYMBOL(smp_icache_page_inv);
v6.9.4
  1/*
  2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
  3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
  4 *
  5 * Based on arm64 and arc implementations
  6 * Copyright (C) 2013 ARM Ltd.
  7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  8 *
  9 * This file is licensed under the terms of the GNU General Public License
 10 * version 2.  This program is licensed "as is" without any warranty of any
 11 * kind, whether express or implied.
 12 */
 13
 14#include <linux/smp.h>
 15#include <linux/cpu.h>
 16#include <linux/sched.h>
 17#include <linux/sched/mm.h>
 18#include <linux/irq.h>
 19#include <linux/of.h>
 20#include <asm/cpuinfo.h>
 21#include <asm/mmu_context.h>
 22#include <asm/tlbflush.h>
 23#include <asm/cacheflush.h>
 24#include <asm/time.h>
 25
 26asmlinkage __init void secondary_start_kernel(void);
 27
 28static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 29
 30unsigned long secondary_release = -1;
 31struct thread_info *secondary_thread_info;
 32
 33enum ipi_msg_type {
 34	IPI_WAKEUP,
 35	IPI_RESCHEDULE,
 36	IPI_CALL_FUNC,
 37	IPI_CALL_FUNC_SINGLE,
 38};
 39
 40static DEFINE_SPINLOCK(boot_lock);
 41
 42static void boot_secondary(unsigned int cpu, struct task_struct *idle)
 43{
 44	/*
 45	 * set synchronisation state between this boot processor
 46	 * and the secondary one
 47	 */
 48	spin_lock(&boot_lock);
 49
 50	secondary_release = cpu;
 51	smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
 52
 53	/*
 54	 * now the secondary core is starting up let it run its
 55	 * calibrations, then wait for it to finish
 56	 */
 57	spin_unlock(&boot_lock);
 58}
 59
 
 
 
 
 60void __init smp_init_cpus(void)
 61{
 62	struct device_node *cpu;
 63	u32 cpu_id;
 64
 65	for_each_of_cpu_node(cpu) {
 66		cpu_id = of_get_cpu_hwid(cpu, 0);
 67		if (cpu_id < NR_CPUS)
 68			set_cpu_possible(cpu_id, true);
 69	}
 70}
 71
 72void __init smp_prepare_cpus(unsigned int max_cpus)
 73{
 74	unsigned int cpu;
 75
 76	/*
 77	 * Initialise the present map, which describes the set of CPUs
 78	 * actually populated at the present time.
 79	 */
 80	for_each_possible_cpu(cpu) {
 81		if (cpu < max_cpus)
 82			set_cpu_present(cpu, true);
 83	}
 84}
 85
 86void __init smp_cpus_done(unsigned int max_cpus)
 87{
 88}
 89
 90static DECLARE_COMPLETION(cpu_running);
 91
 92int __cpu_up(unsigned int cpu, struct task_struct *idle)
 93{
 94	if (smp_cross_call == NULL) {
 95		pr_warn("CPU%u: failed to start, IPI controller missing",
 96			cpu);
 97		return -EIO;
 98	}
 99
100	secondary_thread_info = task_thread_info(idle);
101	current_pgd[cpu] = init_mm.pgd;
102
103	boot_secondary(cpu, idle);
104	if (!wait_for_completion_timeout(&cpu_running,
105					msecs_to_jiffies(1000))) {
106		pr_crit("CPU%u: failed to start\n", cpu);
107		return -EIO;
108	}
109	synchronise_count_master(cpu);
110
111	return 0;
112}
113
114asmlinkage __init void secondary_start_kernel(void)
115{
116	struct mm_struct *mm = &init_mm;
117	unsigned int cpu = smp_processor_id();
118	/*
119	 * All kernel threads share the same mm context; grab a
120	 * reference and switch to it.
121	 */
122	mmgrab(mm);
123	current->active_mm = mm;
124	cpumask_set_cpu(cpu, mm_cpumask(mm));
125
126	pr_info("CPU%u: Booted secondary processor\n", cpu);
127
128	setup_cpuinfo();
129	openrisc_clockevent_init();
130
131	notify_cpu_starting(cpu);
132
133	/*
134	 * OK, now it's safe to let the boot CPU continue
135	 */
136	complete(&cpu_running);
137
138	synchronise_count_slave(cpu);
139	set_cpu_online(cpu, true);
140
141	local_irq_enable();
 
 
142	/*
143	 * OK, it's off to the idle thread for us
144	 */
145	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
146}
147
148void handle_IPI(unsigned int ipi_msg)
149{
150	unsigned int cpu = smp_processor_id();
151
152	switch (ipi_msg) {
153	case IPI_WAKEUP:
154		break;
155
156	case IPI_RESCHEDULE:
157		scheduler_ipi();
158		break;
159
160	case IPI_CALL_FUNC:
161		generic_smp_call_function_interrupt();
162		break;
163
164	case IPI_CALL_FUNC_SINGLE:
165		generic_smp_call_function_single_interrupt();
166		break;
167
168	default:
169		WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
170		break;
171	}
172}
173
174void arch_smp_send_reschedule(int cpu)
175{
176	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
177}
178
179static void stop_this_cpu(void *dummy)
180{
181	/* Remove this CPU */
182	set_cpu_online(smp_processor_id(), false);
183
184	local_irq_disable();
185	/* CPU Doze */
186	if (mfspr(SPR_UPR) & SPR_UPR_PMP)
187		mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
188	/* If that didn't work, infinite loop */
189	while (1)
190		;
191}
192
193void smp_send_stop(void)
194{
195	smp_call_function(stop_this_cpu, NULL, 0);
196}
197
 
 
 
 
 
 
198void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
199{
200	smp_cross_call = fn;
201}
202
203void arch_send_call_function_single_ipi(int cpu)
204{
205	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
206}
207
208void arch_send_call_function_ipi_mask(const struct cpumask *mask)
209{
210	smp_cross_call(mask, IPI_CALL_FUNC);
211}
212
213/* TLB flush operations - Performed on each CPU*/
214static inline void ipi_flush_tlb_all(void *ignored)
215{
216	local_flush_tlb_all();
217}
218
219static inline void ipi_flush_tlb_mm(void *info)
220{
221	struct mm_struct *mm = (struct mm_struct *)info;
222
223	local_flush_tlb_mm(mm);
224}
225
226static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
227{
228	unsigned int cpuid;
229
230	if (cpumask_empty(cmask))
231		return;
232
233	cpuid = get_cpu();
234
235	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
236		/* local cpu is the only cpu present in cpumask */
237		local_flush_tlb_mm(mm);
238	} else {
239		on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
240	}
241	put_cpu();
242}
243
244struct flush_tlb_data {
245	unsigned long addr1;
246	unsigned long addr2;
247};
248
249static inline void ipi_flush_tlb_page(void *info)
250{
251	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
252
253	local_flush_tlb_page(NULL, fd->addr1);
254}
255
256static inline void ipi_flush_tlb_range(void *info)
257{
258	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
259
260	local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
261}
262
263static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
264				unsigned long end)
265{
266	unsigned int cpuid;
267
268	if (cpumask_empty(cmask))
269		return;
270
271	cpuid = get_cpu();
272
273	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
274		/* local cpu is the only cpu present in cpumask */
275		if ((end - start) <= PAGE_SIZE)
276			local_flush_tlb_page(NULL, start);
277		else
278			local_flush_tlb_range(NULL, start, end);
279	} else {
280		struct flush_tlb_data fd;
281
282		fd.addr1 = start;
283		fd.addr2 = end;
284
285		if ((end - start) <= PAGE_SIZE)
286			on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
287		else
288			on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
289	}
290	put_cpu();
291}
292
293void flush_tlb_all(void)
294{
295	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
296}
297
 
 
 
 
 
298void flush_tlb_mm(struct mm_struct *mm)
299{
300	smp_flush_tlb_mm(mm_cpumask(mm), mm);
301}
302
303void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
304{
305	smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
306}
307
308void flush_tlb_range(struct vm_area_struct *vma,
309		     unsigned long start, unsigned long end)
310{
311	const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
312					  : cpu_online_mask;
313	smp_flush_tlb_range(cmask, start, end);
314}
315
316/* Instruction cache invalidate - performed on each cpu */
317static void ipi_icache_page_inv(void *arg)
318{
319	struct page *page = arg;
320
321	local_icache_page_inv(page);
322}
323
324void smp_icache_page_inv(struct page *page)
325{
326	on_each_cpu(ipi_icache_page_inv, page, 1);
327}
328EXPORT_SYMBOL(smp_icache_page_inv);