Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
  3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
  4 *
  5 * Based on arm64 and arc implementations
  6 * Copyright (C) 2013 ARM Ltd.
  7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  8 *
  9 * This file is licensed under the terms of the GNU General Public License
 10 * version 2.  This program is licensed "as is" without any warranty of any
 11 * kind, whether express or implied.
 12 */
 13
 14#include <linux/smp.h>
 15#include <linux/cpu.h>
 16#include <linux/sched.h>
 17#include <linux/sched/mm.h>
 18#include <linux/irq.h>
 19#include <linux/of.h>
 20#include <asm/cpuinfo.h>
 21#include <asm/mmu_context.h>
 22#include <asm/tlbflush.h>
 23#include <asm/cacheflush.h>
 24#include <asm/time.h>
 25
 26asmlinkage __init void secondary_start_kernel(void);
 27
 28static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 29
 30unsigned long secondary_release = -1;
 31struct thread_info *secondary_thread_info;
 32
 33enum ipi_msg_type {
 34	IPI_WAKEUP,
 35	IPI_RESCHEDULE,
 36	IPI_CALL_FUNC,
 37	IPI_CALL_FUNC_SINGLE,
 38};
 39
 40static DEFINE_SPINLOCK(boot_lock);
 41
 42static void boot_secondary(unsigned int cpu, struct task_struct *idle)
 43{
 44	/*
 45	 * set synchronisation state between this boot processor
 46	 * and the secondary one
 47	 */
 48	spin_lock(&boot_lock);
 49
 50	secondary_release = cpu;
 51	smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
 52
 53	/*
 54	 * now the secondary core is starting up let it run its
 55	 * calibrations, then wait for it to finish
 56	 */
 57	spin_unlock(&boot_lock);
 58}
 59
 60void __init smp_prepare_boot_cpu(void)
 61{
 62}
 63
 64void __init smp_init_cpus(void)
 65{
 66	struct device_node *cpu;
 67	u32 cpu_id;
 68
 69	for_each_of_cpu_node(cpu) {
 70		cpu_id = of_get_cpu_hwid(cpu, 0);
 71		if (cpu_id < NR_CPUS)
 72			set_cpu_possible(cpu_id, true);
 73	}
 74}
 75
 76void __init smp_prepare_cpus(unsigned int max_cpus)
 77{
 78	unsigned int cpu;
 79
 80	/*
 81	 * Initialise the present map, which describes the set of CPUs
 82	 * actually populated at the present time.
 83	 */
 84	for_each_possible_cpu(cpu) {
 85		if (cpu < max_cpus)
 86			set_cpu_present(cpu, true);
 87	}
 88}
 89
 90void __init smp_cpus_done(unsigned int max_cpus)
 91{
 92}
 93
 94static DECLARE_COMPLETION(cpu_running);
 95
 96int __cpu_up(unsigned int cpu, struct task_struct *idle)
 97{
 98	if (smp_cross_call == NULL) {
 99		pr_warn("CPU%u: failed to start, IPI controller missing",
100			cpu);
101		return -EIO;
102	}
103
104	secondary_thread_info = task_thread_info(idle);
105	current_pgd[cpu] = init_mm.pgd;
106
107	boot_secondary(cpu, idle);
108	if (!wait_for_completion_timeout(&cpu_running,
109					msecs_to_jiffies(1000))) {
110		pr_crit("CPU%u: failed to start\n", cpu);
111		return -EIO;
112	}
113	synchronise_count_master(cpu);
114
115	return 0;
116}
117
118asmlinkage __init void secondary_start_kernel(void)
119{
120	struct mm_struct *mm = &init_mm;
121	unsigned int cpu = smp_processor_id();
122	/*
123	 * All kernel threads share the same mm context; grab a
124	 * reference and switch to it.
125	 */
126	mmgrab(mm);
127	current->active_mm = mm;
128	cpumask_set_cpu(cpu, mm_cpumask(mm));
129
130	pr_info("CPU%u: Booted secondary processor\n", cpu);
131
132	setup_cpuinfo();
133	openrisc_clockevent_init();
134
135	notify_cpu_starting(cpu);
136
137	/*
138	 * OK, now it's safe to let the boot CPU continue
139	 */
140	complete(&cpu_running);
141
142	synchronise_count_slave(cpu);
143	set_cpu_online(cpu, true);
144
145	local_irq_enable();
 
 
146	/*
147	 * OK, it's off to the idle thread for us
148	 */
149	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
150}
151
152void handle_IPI(unsigned int ipi_msg)
153{
154	unsigned int cpu = smp_processor_id();
155
156	switch (ipi_msg) {
157	case IPI_WAKEUP:
158		break;
159
160	case IPI_RESCHEDULE:
161		scheduler_ipi();
162		break;
163
164	case IPI_CALL_FUNC:
165		generic_smp_call_function_interrupt();
166		break;
167
168	case IPI_CALL_FUNC_SINGLE:
169		generic_smp_call_function_single_interrupt();
170		break;
171
172	default:
173		WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
174		break;
175	}
176}
177
178void arch_smp_send_reschedule(int cpu)
179{
180	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
181}
182
183static void stop_this_cpu(void *dummy)
184{
185	/* Remove this CPU */
186	set_cpu_online(smp_processor_id(), false);
187
188	local_irq_disable();
189	/* CPU Doze */
190	if (mfspr(SPR_UPR) & SPR_UPR_PMP)
191		mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
192	/* If that didn't work, infinite loop */
193	while (1)
194		;
195}
196
197void smp_send_stop(void)
198{
199	smp_call_function(stop_this_cpu, NULL, 0);
200}
201
 
 
 
 
 
 
202void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
203{
204	smp_cross_call = fn;
205}
206
207void arch_send_call_function_single_ipi(int cpu)
208{
209	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
210}
211
212void arch_send_call_function_ipi_mask(const struct cpumask *mask)
213{
214	smp_cross_call(mask, IPI_CALL_FUNC);
215}
216
217/* TLB flush operations - Performed on each CPU*/
218static inline void ipi_flush_tlb_all(void *ignored)
219{
220	local_flush_tlb_all();
221}
222
223static inline void ipi_flush_tlb_mm(void *info)
224{
225	struct mm_struct *mm = (struct mm_struct *)info;
226
227	local_flush_tlb_mm(mm);
228}
229
230static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
231{
232	unsigned int cpuid;
233
234	if (cpumask_empty(cmask))
235		return;
236
237	cpuid = get_cpu();
238
239	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
240		/* local cpu is the only cpu present in cpumask */
241		local_flush_tlb_mm(mm);
242	} else {
243		on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
244	}
245	put_cpu();
246}
247
248struct flush_tlb_data {
249	unsigned long addr1;
250	unsigned long addr2;
251};
252
253static inline void ipi_flush_tlb_page(void *info)
254{
255	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
256
257	local_flush_tlb_page(NULL, fd->addr1);
258}
259
260static inline void ipi_flush_tlb_range(void *info)
261{
262	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
263
264	local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
265}
266
267static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
268				unsigned long end)
269{
270	unsigned int cpuid;
271
272	if (cpumask_empty(cmask))
273		return;
274
275	cpuid = get_cpu();
276
277	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
278		/* local cpu is the only cpu present in cpumask */
279		if ((end - start) <= PAGE_SIZE)
280			local_flush_tlb_page(NULL, start);
281		else
282			local_flush_tlb_range(NULL, start, end);
283	} else {
284		struct flush_tlb_data fd;
285
286		fd.addr1 = start;
287		fd.addr2 = end;
288
289		if ((end - start) <= PAGE_SIZE)
290			on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
291		else
292			on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
293	}
294	put_cpu();
295}
296
297void flush_tlb_all(void)
298{
299	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
300}
301
302void flush_tlb_mm(struct mm_struct *mm)
303{
304	smp_flush_tlb_mm(mm_cpumask(mm), mm);
305}
306
307void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
308{
309	smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
310}
311
312void flush_tlb_range(struct vm_area_struct *vma,
313		     unsigned long start, unsigned long end)
314{
315	const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
316					  : cpu_online_mask;
317	smp_flush_tlb_range(cmask, start, end);
318}
319
320/* Instruction cache invalidate - performed on each cpu */
321static void ipi_icache_page_inv(void *arg)
322{
323	struct page *page = arg;
324
325	local_icache_page_inv(page);
326}
327
328void smp_icache_page_inv(struct page *page)
329{
330	on_each_cpu(ipi_icache_page_inv, page, 1);
331}
332EXPORT_SYMBOL(smp_icache_page_inv);
v5.9
  1/*
  2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
  3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
  4 *
  5 * Based on arm64 and arc implementations
  6 * Copyright (C) 2013 ARM Ltd.
  7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  8 *
  9 * This file is licensed under the terms of the GNU General Public License
 10 * version 2.  This program is licensed "as is" without any warranty of any
 11 * kind, whether express or implied.
 12 */
 13
 14#include <linux/smp.h>
 15#include <linux/cpu.h>
 16#include <linux/sched.h>
 17#include <linux/sched/mm.h>
 18#include <linux/irq.h>
 
 19#include <asm/cpuinfo.h>
 20#include <asm/mmu_context.h>
 21#include <asm/tlbflush.h>
 22#include <asm/cacheflush.h>
 23#include <asm/time.h>
 24
 
 
 25static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 26
 27unsigned long secondary_release = -1;
 28struct thread_info *secondary_thread_info;
 29
 30enum ipi_msg_type {
 31	IPI_WAKEUP,
 32	IPI_RESCHEDULE,
 33	IPI_CALL_FUNC,
 34	IPI_CALL_FUNC_SINGLE,
 35};
 36
 37static DEFINE_SPINLOCK(boot_lock);
 38
 39static void boot_secondary(unsigned int cpu, struct task_struct *idle)
 40{
 41	/*
 42	 * set synchronisation state between this boot processor
 43	 * and the secondary one
 44	 */
 45	spin_lock(&boot_lock);
 46
 47	secondary_release = cpu;
 48	smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
 49
 50	/*
 51	 * now the secondary core is starting up let it run its
 52	 * calibrations, then wait for it to finish
 53	 */
 54	spin_unlock(&boot_lock);
 55}
 56
 57void __init smp_prepare_boot_cpu(void)
 58{
 59}
 60
 61void __init smp_init_cpus(void)
 62{
 63	int i;
 
 64
 65	for (i = 0; i < NR_CPUS; i++)
 66		set_cpu_possible(i, true);
 
 
 
 67}
 68
 69void __init smp_prepare_cpus(unsigned int max_cpus)
 70{
 71	int i;
 72
 73	/*
 74	 * Initialise the present map, which describes the set of CPUs
 75	 * actually populated at the present time.
 76	 */
 77	for (i = 0; i < max_cpus; i++)
 78		set_cpu_present(i, true);
 
 
 79}
 80
 81void __init smp_cpus_done(unsigned int max_cpus)
 82{
 83}
 84
 85static DECLARE_COMPLETION(cpu_running);
 86
 87int __cpu_up(unsigned int cpu, struct task_struct *idle)
 88{
 89	if (smp_cross_call == NULL) {
 90		pr_warn("CPU%u: failed to start, IPI controller missing",
 91			cpu);
 92		return -EIO;
 93	}
 94
 95	secondary_thread_info = task_thread_info(idle);
 96	current_pgd[cpu] = init_mm.pgd;
 97
 98	boot_secondary(cpu, idle);
 99	if (!wait_for_completion_timeout(&cpu_running,
100					msecs_to_jiffies(1000))) {
101		pr_crit("CPU%u: failed to start\n", cpu);
102		return -EIO;
103	}
104	synchronise_count_master(cpu);
105
106	return 0;
107}
108
109asmlinkage __init void secondary_start_kernel(void)
110{
111	struct mm_struct *mm = &init_mm;
112	unsigned int cpu = smp_processor_id();
113	/*
114	 * All kernel threads share the same mm context; grab a
115	 * reference and switch to it.
116	 */
117	mmgrab(mm);
118	current->active_mm = mm;
119	cpumask_set_cpu(cpu, mm_cpumask(mm));
120
121	pr_info("CPU%u: Booted secondary processor\n", cpu);
122
123	setup_cpuinfo();
124	openrisc_clockevent_init();
125
126	notify_cpu_starting(cpu);
127
128	/*
129	 * OK, now it's safe to let the boot CPU continue
130	 */
131	complete(&cpu_running);
132
133	synchronise_count_slave(cpu);
134	set_cpu_online(cpu, true);
135
136	local_irq_enable();
137
138	preempt_disable();
139	/*
140	 * OK, it's off to the idle thread for us
141	 */
142	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
143}
144
145void handle_IPI(unsigned int ipi_msg)
146{
147	unsigned int cpu = smp_processor_id();
148
149	switch (ipi_msg) {
150	case IPI_WAKEUP:
151		break;
152
153	case IPI_RESCHEDULE:
154		scheduler_ipi();
155		break;
156
157	case IPI_CALL_FUNC:
158		generic_smp_call_function_interrupt();
159		break;
160
161	case IPI_CALL_FUNC_SINGLE:
162		generic_smp_call_function_single_interrupt();
163		break;
164
165	default:
166		WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
167		break;
168	}
169}
170
171void smp_send_reschedule(int cpu)
172{
173	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
174}
175
176static void stop_this_cpu(void *dummy)
177{
178	/* Remove this CPU */
179	set_cpu_online(smp_processor_id(), false);
180
181	local_irq_disable();
182	/* CPU Doze */
183	if (mfspr(SPR_UPR) & SPR_UPR_PMP)
184		mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
185	/* If that didn't work, infinite loop */
186	while (1)
187		;
188}
189
190void smp_send_stop(void)
191{
192	smp_call_function(stop_this_cpu, NULL, 0);
193}
194
195/* not supported, yet */
196int setup_profiling_timer(unsigned int multiplier)
197{
198	return -EINVAL;
199}
200
201void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
202{
203	smp_cross_call = fn;
204}
205
206void arch_send_call_function_single_ipi(int cpu)
207{
208	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
209}
210
211void arch_send_call_function_ipi_mask(const struct cpumask *mask)
212{
213	smp_cross_call(mask, IPI_CALL_FUNC);
214}
215
216/* TLB flush operations - Performed on each CPU*/
217static inline void ipi_flush_tlb_all(void *ignored)
218{
219	local_flush_tlb_all();
220}
221
222static inline void ipi_flush_tlb_mm(void *info)
223{
224	struct mm_struct *mm = (struct mm_struct *)info;
225
226	local_flush_tlb_mm(mm);
227}
228
229static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
230{
231	unsigned int cpuid;
232
233	if (cpumask_empty(cmask))
234		return;
235
236	cpuid = get_cpu();
237
238	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
239		/* local cpu is the only cpu present in cpumask */
240		local_flush_tlb_mm(mm);
241	} else {
242		on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
243	}
244	put_cpu();
245}
246
247struct flush_tlb_data {
248	unsigned long addr1;
249	unsigned long addr2;
250};
251
252static inline void ipi_flush_tlb_page(void *info)
253{
254	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
255
256	local_flush_tlb_page(NULL, fd->addr1);
257}
258
259static inline void ipi_flush_tlb_range(void *info)
260{
261	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
262
263	local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
264}
265
266static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start,
267				unsigned long end)
268{
269	unsigned int cpuid;
270
271	if (cpumask_empty(cmask))
272		return;
273
274	cpuid = get_cpu();
275
276	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
277		/* local cpu is the only cpu present in cpumask */
278		if ((end - start) <= PAGE_SIZE)
279			local_flush_tlb_page(NULL, start);
280		else
281			local_flush_tlb_range(NULL, start, end);
282	} else {
283		struct flush_tlb_data fd;
284
285		fd.addr1 = start;
286		fd.addr2 = end;
287
288		if ((end - start) <= PAGE_SIZE)
289			on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
290		else
291			on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
292	}
293	put_cpu();
294}
295
296void flush_tlb_all(void)
297{
298	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
299}
300
301void flush_tlb_mm(struct mm_struct *mm)
302{
303	smp_flush_tlb_mm(mm_cpumask(mm), mm);
304}
305
306void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
307{
308	smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
309}
310
311void flush_tlb_range(struct vm_area_struct *vma,
312		     unsigned long start, unsigned long end)
313{
314	smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end);
 
 
315}
316
317/* Instruction cache invalidate - performed on each cpu */
318static void ipi_icache_page_inv(void *arg)
319{
320	struct page *page = arg;
321
322	local_icache_page_inv(page);
323}
324
325void smp_icache_page_inv(struct page *page)
326{
327	on_each_cpu(ipi_icache_page_inv, page, 1);
328}
329EXPORT_SYMBOL(smp_icache_page_inv);