Loading...
1#ifndef __ASM_SH_SMP_H
2#define __ASM_SH_SMP_H
3
4#include <linux/bitops.h>
5#include <linux/cpumask.h>
6#include <asm/smp-ops.h>
7
8#ifdef CONFIG_SMP
9
10#include <linux/spinlock.h>
11#include <linux/atomic.h>
12#include <asm/current.h>
13#include <asm/percpu.h>
14
15#define raw_smp_processor_id() (current_thread_info()->cpu)
16
17/* Map from cpu id to sequential logical cpu number. */
18extern int __cpu_number_map[NR_CPUS];
19#define cpu_number_map(cpu) __cpu_number_map[cpu]
20
21/* The reverse map from sequential logical cpu number to cpu id. */
22extern int __cpu_logical_map[NR_CPUS];
23#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
24
25enum {
26 SMP_MSG_FUNCTION,
27 SMP_MSG_RESCHEDULE,
28 SMP_MSG_FUNCTION_SINGLE,
29 SMP_MSG_TIMER,
30
31 SMP_MSG_NR, /* must be last */
32};
33
34DECLARE_PER_CPU(int, cpu_state);
35
36void smp_message_recv(unsigned int msg);
37void smp_timer_broadcast(const struct cpumask *mask);
38
39void local_timer_interrupt(void);
40void local_timer_setup(unsigned int cpu);
41void local_timer_stop(unsigned int cpu);
42
43void arch_send_call_function_single_ipi(int cpu);
44void arch_send_call_function_ipi_mask(const struct cpumask *mask);
45
46void native_play_dead(void);
47void native_cpu_die(unsigned int cpu);
48int native_cpu_disable(unsigned int cpu);
49
50#ifdef CONFIG_HOTPLUG_CPU
51void play_dead_common(void);
52extern int __cpu_disable(void);
53
54static inline void __cpu_die(unsigned int cpu)
55{
56 extern struct plat_smp_ops *mp_ops; /* private */
57
58 mp_ops->cpu_die(cpu);
59}
60#endif
61
62static inline int hard_smp_processor_id(void)
63{
64 extern struct plat_smp_ops *mp_ops; /* private */
65
66 if (!mp_ops)
67 return 0; /* boot CPU */
68
69 return mp_ops->smp_processor_id();
70}
71
72#else
73
74#define hard_smp_processor_id() (0)
75
76#endif /* CONFIG_SMP */
77
78#endif /* __ASM_SH_SMP_H */