Linux Audio

Check our new training course

Loading...
v4.6
  1/* smp.c: Sparc SMP support.
  2 *
  3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
  6 */
  7
  8#include <asm/head.h>
  9
 10#include <linux/kernel.h>
 11#include <linux/sched.h>
 12#include <linux/threads.h>
 13#include <linux/smp.h>
 14#include <linux/interrupt.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/init.h>
 17#include <linux/spinlock.h>
 18#include <linux/mm.h>
 19#include <linux/fs.h>
 20#include <linux/seq_file.h>
 21#include <linux/cache.h>
 22#include <linux/delay.h>
 23#include <linux/profile.h>
 24#include <linux/cpu.h>
 25
 26#include <asm/ptrace.h>
 27#include <linux/atomic.h>
 28
 29#include <asm/irq.h>
 30#include <asm/page.h>
 31#include <asm/pgalloc.h>
 32#include <asm/pgtable.h>
 33#include <asm/oplib.h>
 34#include <asm/cacheflush.h>
 35#include <asm/tlbflush.h>
 36#include <asm/cpudata.h>
 37#include <asm/timer.h>
 38#include <asm/leon.h>
 39
 40#include "kernel.h"
 41#include "irq.h"
 42
 43volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
 44
 45cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 46
 47const struct sparc32_ipi_ops *sparc32_ipi_ops;
 48
 49/* The only guaranteed locking primitive available on all Sparc
 50 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
 51 * places the current byte at the effective address into dest_reg and
 52 * places 0xff there afterwards.  Pretty lame locking primitive
 53 * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
 54 * instruction which is much better...
 55 */
 56
 57void smp_store_cpu_info(int id)
 58{
 59	int cpu_node;
 60	int mid;
 61
 62	cpu_data(id).udelay_val = loops_per_jiffy;
 63
 64	cpu_find_by_mid(id, &cpu_node);
 65	cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
 66						     "clock-frequency", 0);
 67	cpu_data(id).prom_node = cpu_node;
 68	mid = cpu_get_hwmid(cpu_node);
 69
 70	if (mid < 0) {
 71		printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08x", id, cpu_node);
 72		mid = 0;
 73	}
 74	cpu_data(id).mid = mid;
 75}
 76
 77void __init smp_cpus_done(unsigned int max_cpus)
 78{
 
 
 79	unsigned long bogosum = 0;
 80	int cpu, num = 0;
 81
 82	for_each_online_cpu(cpu) {
 83		num++;
 84		bogosum += cpu_data(cpu).udelay_val;
 85	}
 86
 87	printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 88		num, bogosum/(500000/HZ),
 89		(bogosum/(5000/HZ))%100);
 90
 91	switch(sparc_cpu_model) {
 92	case sun4m:
 93		smp4m_smp_done();
 94		break;
 95	case sun4d:
 96		smp4d_smp_done();
 97		break;
 98	case sparc_leon:
 99		leon_smp_done();
100		break;
101	case sun4e:
102		printk("SUN4E\n");
103		BUG();
104		break;
105	case sun4u:
106		printk("SUN4U\n");
107		BUG();
108		break;
109	default:
110		printk("UNKNOWN!\n");
111		BUG();
112		break;
113	}
114}
115
116void cpu_panic(void)
117{
118	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
119	panic("SMP bolixed\n");
120}
121
122struct linux_prom_registers smp_penguin_ctable = { 0 };
123
124void smp_send_reschedule(int cpu)
125{
126	/*
127	 * CPU model dependent way of implementing IPI generation targeting
128	 * a single CPU. The trap handler needs only to do trap entry/return
129	 * to call schedule.
130	 */
131	sparc32_ipi_ops->resched(cpu);
132}
133
134void smp_send_stop(void)
135{
136}
137
138void arch_send_call_function_single_ipi(int cpu)
139{
140	/* trigger one IPI single call on one CPU */
141	sparc32_ipi_ops->single(cpu);
142}
143
144void arch_send_call_function_ipi_mask(const struct cpumask *mask)
145{
146	int cpu;
147
148	/* trigger IPI mask call on each CPU */
149	for_each_cpu(cpu, mask)
150		sparc32_ipi_ops->mask_one(cpu);
151}
152
153void smp_resched_interrupt(void)
154{
155	irq_enter();
156	scheduler_ipi();
157	local_cpu_data().irq_resched_count++;
158	irq_exit();
159	/* re-schedule routine called by interrupt return code. */
160}
161
162void smp_call_function_single_interrupt(void)
163{
164	irq_enter();
165	generic_smp_call_function_single_interrupt();
166	local_cpu_data().irq_call_count++;
167	irq_exit();
168}
169
170void smp_call_function_interrupt(void)
171{
172	irq_enter();
173	generic_smp_call_function_interrupt();
174	local_cpu_data().irq_call_count++;
175	irq_exit();
176}
177
178int setup_profiling_timer(unsigned int multiplier)
179{
180	return -EINVAL;
181}
182
183void __init smp_prepare_cpus(unsigned int max_cpus)
184{
 
 
185	int i, cpuid, extra;
186
187	printk("Entering SMP Mode...\n");
188
189	extra = 0;
190	for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
191		if (cpuid >= NR_CPUS)
192			extra++;
193	}
194	/* i = number of cpus */
195	if (extra && max_cpus > i - extra)
196		printk("Warning: NR_CPUS is too low to start all cpus\n");
197
198	smp_store_cpu_info(boot_cpu_id);
199
200	switch(sparc_cpu_model) {
201	case sun4m:
202		smp4m_boot_cpus();
203		break;
204	case sun4d:
205		smp4d_boot_cpus();
206		break;
207	case sparc_leon:
208		leon_boot_cpus();
209		break;
210	case sun4e:
211		printk("SUN4E\n");
212		BUG();
213		break;
214	case sun4u:
215		printk("SUN4U\n");
216		BUG();
217		break;
218	default:
219		printk("UNKNOWN!\n");
220		BUG();
221		break;
222	}
223}
224
225/* Set this up early so that things like the scheduler can init
226 * properly.  We use the same cpu mask for both the present and
227 * possible cpu map.
228 */
229void __init smp_setup_cpu_possible_map(void)
230{
231	int instance, mid;
232
233	instance = 0;
234	while (!cpu_find_by_instance(instance, NULL, &mid)) {
235		if (mid < NR_CPUS) {
236			set_cpu_possible(mid, true);
237			set_cpu_present(mid, true);
238		}
239		instance++;
240	}
241}
242
243void __init smp_prepare_boot_cpu(void)
244{
245	int cpuid = hard_smp_processor_id();
246
247	if (cpuid >= NR_CPUS) {
248		prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
249		prom_halt();
250	}
251	if (cpuid != 0)
252		printk("boot cpu id != 0, this could work but is untested\n");
253
254	current_thread_info()->cpu = cpuid;
255	set_cpu_online(cpuid, true);
256	set_cpu_possible(cpuid, true);
257}
258
259int __cpu_up(unsigned int cpu, struct task_struct *tidle)
260{
 
 
261	int ret=0;
262
263	switch(sparc_cpu_model) {
264	case sun4m:
265		ret = smp4m_boot_one_cpu(cpu, tidle);
266		break;
267	case sun4d:
268		ret = smp4d_boot_one_cpu(cpu, tidle);
269		break;
270	case sparc_leon:
271		ret = leon_boot_one_cpu(cpu, tidle);
272		break;
273	case sun4e:
274		printk("SUN4E\n");
275		BUG();
276		break;
277	case sun4u:
278		printk("SUN4U\n");
279		BUG();
280		break;
281	default:
282		printk("UNKNOWN!\n");
283		BUG();
284		break;
285	}
286
287	if (!ret) {
288		cpumask_set_cpu(cpu, &smp_commenced_mask);
289		while (!cpu_online(cpu))
290			mb();
291	}
292	return ret;
293}
294
295static void arch_cpu_pre_starting(void *arg)
296{
297	local_ops->cache_all();
298	local_ops->tlb_all();
299
300	switch(sparc_cpu_model) {
301	case sun4m:
302		sun4m_cpu_pre_starting(arg);
303		break;
304	case sun4d:
305		sun4d_cpu_pre_starting(arg);
306		break;
307	case sparc_leon:
308		leon_cpu_pre_starting(arg);
309		break;
310	default:
311		BUG();
312	}
313}
314
315static void arch_cpu_pre_online(void *arg)
316{
317	unsigned int cpuid = hard_smp_processor_id();
318
319	register_percpu_ce(cpuid);
320
321	calibrate_delay();
322	smp_store_cpu_info(cpuid);
323
324	local_ops->cache_all();
325	local_ops->tlb_all();
326
327	switch(sparc_cpu_model) {
328	case sun4m:
329		sun4m_cpu_pre_online(arg);
330		break;
331	case sun4d:
332		sun4d_cpu_pre_online(arg);
333		break;
334	case sparc_leon:
335		leon_cpu_pre_online(arg);
336		break;
337	default:
338		BUG();
339	}
340}
341
342static void sparc_start_secondary(void *arg)
343{
344	unsigned int cpu;
345
346	/*
347	 * SMP booting is extremely fragile in some architectures. So run
348	 * the cpu initialization code first before anything else.
349	 */
350	arch_cpu_pre_starting(arg);
351
352	preempt_disable();
353	cpu = smp_processor_id();
354
355	/* Invoke the CPU_STARTING notifier callbacks */
356	notify_cpu_starting(cpu);
357
358	arch_cpu_pre_online(arg);
359
360	/* Set the CPU in the cpu_online_mask */
361	set_cpu_online(cpu, true);
362
363	/* Enable local interrupts now */
364	local_irq_enable();
365
366	wmb();
367	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
368
369	/* We should never reach here! */
370	BUG();
371}
372
373void smp_callin(void)
374{
375	sparc_start_secondary(NULL);
376}
377
378void smp_bogo(struct seq_file *m)
379{
380	int i;
381	
382	for_each_online_cpu(i) {
383		seq_printf(m,
384			   "Cpu%dBogo\t: %lu.%02lu\n",
385			   i,
386			   cpu_data(i).udelay_val/(500000/HZ),
387			   (cpu_data(i).udelay_val/(5000/HZ))%100);
388	}
389}
390
391void smp_info(struct seq_file *m)
392{
393	int i;
394
395	seq_printf(m, "State:\n");
396	for_each_online_cpu(i)
397		seq_printf(m, "CPU%d\t\t: online\n", i);
398}
v3.15
  1/* smp.c: Sparc SMP support.
  2 *
  3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
  6 */
  7
  8#include <asm/head.h>
  9
 10#include <linux/kernel.h>
 11#include <linux/sched.h>
 12#include <linux/threads.h>
 13#include <linux/smp.h>
 14#include <linux/interrupt.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/init.h>
 17#include <linux/spinlock.h>
 18#include <linux/mm.h>
 19#include <linux/fs.h>
 20#include <linux/seq_file.h>
 21#include <linux/cache.h>
 22#include <linux/delay.h>
 
 23#include <linux/cpu.h>
 24
 25#include <asm/ptrace.h>
 26#include <linux/atomic.h>
 27
 28#include <asm/irq.h>
 29#include <asm/page.h>
 30#include <asm/pgalloc.h>
 31#include <asm/pgtable.h>
 32#include <asm/oplib.h>
 33#include <asm/cacheflush.h>
 34#include <asm/tlbflush.h>
 35#include <asm/cpudata.h>
 36#include <asm/timer.h>
 37#include <asm/leon.h>
 38
 39#include "kernel.h"
 40#include "irq.h"
 41
 42volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
 43
 44cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 45
 46const struct sparc32_ipi_ops *sparc32_ipi_ops;
 47
 48/* The only guaranteed locking primitive available on all Sparc
 49 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
 50 * places the current byte at the effective address into dest_reg and
 51 * places 0xff there afterwards.  Pretty lame locking primitive
 52 * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
 53 * instruction which is much better...
 54 */
 55
 56void smp_store_cpu_info(int id)
 57{
 58	int cpu_node;
 59	int mid;
 60
 61	cpu_data(id).udelay_val = loops_per_jiffy;
 62
 63	cpu_find_by_mid(id, &cpu_node);
 64	cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
 65						     "clock-frequency", 0);
 66	cpu_data(id).prom_node = cpu_node;
 67	mid = cpu_get_hwmid(cpu_node);
 68
 69	if (mid < 0) {
 70		printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
 71		mid = 0;
 72	}
 73	cpu_data(id).mid = mid;
 74}
 75
 76void __init smp_cpus_done(unsigned int max_cpus)
 77{
 78	extern void smp4m_smp_done(void);
 79	extern void smp4d_smp_done(void);
 80	unsigned long bogosum = 0;
 81	int cpu, num = 0;
 82
 83	for_each_online_cpu(cpu) {
 84		num++;
 85		bogosum += cpu_data(cpu).udelay_val;
 86	}
 87
 88	printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 89		num, bogosum/(500000/HZ),
 90		(bogosum/(5000/HZ))%100);
 91
 92	switch(sparc_cpu_model) {
 93	case sun4m:
 94		smp4m_smp_done();
 95		break;
 96	case sun4d:
 97		smp4d_smp_done();
 98		break;
 99	case sparc_leon:
100		leon_smp_done();
101		break;
102	case sun4e:
103		printk("SUN4E\n");
104		BUG();
105		break;
106	case sun4u:
107		printk("SUN4U\n");
108		BUG();
109		break;
110	default:
111		printk("UNKNOWN!\n");
112		BUG();
113		break;
114	}
115}
116
117void cpu_panic(void)
118{
119	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
120	panic("SMP bolixed\n");
121}
122
123struct linux_prom_registers smp_penguin_ctable = { 0 };
124
125void smp_send_reschedule(int cpu)
126{
127	/*
128	 * CPU model dependent way of implementing IPI generation targeting
129	 * a single CPU. The trap handler needs only to do trap entry/return
130	 * to call schedule.
131	 */
132	sparc32_ipi_ops->resched(cpu);
133}
134
135void smp_send_stop(void)
136{
137}
138
139void arch_send_call_function_single_ipi(int cpu)
140{
141	/* trigger one IPI single call on one CPU */
142	sparc32_ipi_ops->single(cpu);
143}
144
145void arch_send_call_function_ipi_mask(const struct cpumask *mask)
146{
147	int cpu;
148
149	/* trigger IPI mask call on each CPU */
150	for_each_cpu(cpu, mask)
151		sparc32_ipi_ops->mask_one(cpu);
152}
153
154void smp_resched_interrupt(void)
155{
156	irq_enter();
157	scheduler_ipi();
158	local_cpu_data().irq_resched_count++;
159	irq_exit();
160	/* re-schedule routine called by interrupt return code. */
161}
162
163void smp_call_function_single_interrupt(void)
164{
165	irq_enter();
166	generic_smp_call_function_single_interrupt();
167	local_cpu_data().irq_call_count++;
168	irq_exit();
169}
170
171void smp_call_function_interrupt(void)
172{
173	irq_enter();
174	generic_smp_call_function_interrupt();
175	local_cpu_data().irq_call_count++;
176	irq_exit();
177}
178
179int setup_profiling_timer(unsigned int multiplier)
180{
181	return -EINVAL;
182}
183
184void __init smp_prepare_cpus(unsigned int max_cpus)
185{
186	extern void __init smp4m_boot_cpus(void);
187	extern void __init smp4d_boot_cpus(void);
188	int i, cpuid, extra;
189
190	printk("Entering SMP Mode...\n");
191
192	extra = 0;
193	for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
194		if (cpuid >= NR_CPUS)
195			extra++;
196	}
197	/* i = number of cpus */
198	if (extra && max_cpus > i - extra)
199		printk("Warning: NR_CPUS is too low to start all cpus\n");
200
201	smp_store_cpu_info(boot_cpu_id);
202
203	switch(sparc_cpu_model) {
204	case sun4m:
205		smp4m_boot_cpus();
206		break;
207	case sun4d:
208		smp4d_boot_cpus();
209		break;
210	case sparc_leon:
211		leon_boot_cpus();
212		break;
213	case sun4e:
214		printk("SUN4E\n");
215		BUG();
216		break;
217	case sun4u:
218		printk("SUN4U\n");
219		BUG();
220		break;
221	default:
222		printk("UNKNOWN!\n");
223		BUG();
224		break;
225	}
226}
227
228/* Set this up early so that things like the scheduler can init
229 * properly.  We use the same cpu mask for both the present and
230 * possible cpu map.
231 */
232void __init smp_setup_cpu_possible_map(void)
233{
234	int instance, mid;
235
236	instance = 0;
237	while (!cpu_find_by_instance(instance, NULL, &mid)) {
238		if (mid < NR_CPUS) {
239			set_cpu_possible(mid, true);
240			set_cpu_present(mid, true);
241		}
242		instance++;
243	}
244}
245
246void __init smp_prepare_boot_cpu(void)
247{
248	int cpuid = hard_smp_processor_id();
249
250	if (cpuid >= NR_CPUS) {
251		prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
252		prom_halt();
253	}
254	if (cpuid != 0)
255		printk("boot cpu id != 0, this could work but is untested\n");
256
257	current_thread_info()->cpu = cpuid;
258	set_cpu_online(cpuid, true);
259	set_cpu_possible(cpuid, true);
260}
261
262int __cpu_up(unsigned int cpu, struct task_struct *tidle)
263{
264	extern int smp4m_boot_one_cpu(int, struct task_struct *);
265	extern int smp4d_boot_one_cpu(int, struct task_struct *);
266	int ret=0;
267
268	switch(sparc_cpu_model) {
269	case sun4m:
270		ret = smp4m_boot_one_cpu(cpu, tidle);
271		break;
272	case sun4d:
273		ret = smp4d_boot_one_cpu(cpu, tidle);
274		break;
275	case sparc_leon:
276		ret = leon_boot_one_cpu(cpu, tidle);
277		break;
278	case sun4e:
279		printk("SUN4E\n");
280		BUG();
281		break;
282	case sun4u:
283		printk("SUN4U\n");
284		BUG();
285		break;
286	default:
287		printk("UNKNOWN!\n");
288		BUG();
289		break;
290	}
291
292	if (!ret) {
293		cpumask_set_cpu(cpu, &smp_commenced_mask);
294		while (!cpu_online(cpu))
295			mb();
296	}
297	return ret;
298}
299
300void arch_cpu_pre_starting(void *arg)
301{
302	local_ops->cache_all();
303	local_ops->tlb_all();
304
305	switch(sparc_cpu_model) {
306	case sun4m:
307		sun4m_cpu_pre_starting(arg);
308		break;
309	case sun4d:
310		sun4d_cpu_pre_starting(arg);
311		break;
312	case sparc_leon:
313		leon_cpu_pre_starting(arg);
314		break;
315	default:
316		BUG();
317	}
318}
319
320void arch_cpu_pre_online(void *arg)
321{
322	unsigned int cpuid = hard_smp_processor_id();
323
324	register_percpu_ce(cpuid);
325
326	calibrate_delay();
327	smp_store_cpu_info(cpuid);
328
329	local_ops->cache_all();
330	local_ops->tlb_all();
331
332	switch(sparc_cpu_model) {
333	case sun4m:
334		sun4m_cpu_pre_online(arg);
335		break;
336	case sun4d:
337		sun4d_cpu_pre_online(arg);
338		break;
339	case sparc_leon:
340		leon_cpu_pre_online(arg);
341		break;
342	default:
343		BUG();
344	}
345}
346
347void sparc_start_secondary(void *arg)
348{
349	unsigned int cpu;
350
351	/*
352	 * SMP booting is extremely fragile in some architectures. So run
353	 * the cpu initialization code first before anything else.
354	 */
355	arch_cpu_pre_starting(arg);
356
357	preempt_disable();
358	cpu = smp_processor_id();
359
360	/* Invoke the CPU_STARTING notifier callbacks */
361	notify_cpu_starting(cpu);
362
363	arch_cpu_pre_online(arg);
364
365	/* Set the CPU in the cpu_online_mask */
366	set_cpu_online(cpu, true);
367
368	/* Enable local interrupts now */
369	local_irq_enable();
370
371	wmb();
372	cpu_startup_entry(CPUHP_ONLINE);
373
374	/* We should never reach here! */
375	BUG();
376}
377
378void smp_callin(void)
379{
380	sparc_start_secondary(NULL);
381}
382
383void smp_bogo(struct seq_file *m)
384{
385	int i;
386	
387	for_each_online_cpu(i) {
388		seq_printf(m,
389			   "Cpu%dBogo\t: %lu.%02lu\n",
390			   i,
391			   cpu_data(i).udelay_val/(500000/HZ),
392			   (cpu_data(i).udelay_val/(5000/HZ))%100);
393	}
394}
395
396void smp_info(struct seq_file *m)
397{
398	int i;
399
400	seq_printf(m, "State:\n");
401	for_each_online_cpu(i)
402		seq_printf(m, "CPU%d\t\t: online\n", i);
403}