Loading...
1#ifndef _ASM_POWERPC_CPUTHREADS_H
2#define _ASM_POWERPC_CPUTHREADS_H
3
4#ifndef __ASSEMBLY__
5#include <linux/cpumask.h>
6#include <asm/cpu_has_feature.h>
7
8/*
9 * Mapping of threads to cores
10 *
11 * Note: This implementation is limited to a power of 2 number of
12 * threads per core and the same number for each core in the system
13 * (though it would work if some processors had less threads as long
14 * as the CPU numbers are still allocated, just not brought online).
15 *
16 * However, the API allows for a different implementation in the future
17 * if needed, as long as you only use the functions and not the variables
18 * directly.
19 */
20
21#ifdef CONFIG_SMP
22extern int threads_per_core;
23extern int threads_per_subcore;
24extern int threads_shift;
25extern cpumask_t threads_core_mask;
26#else
27#define threads_per_core 1
28#define threads_per_subcore 1
29#define threads_shift 0
30#define threads_core_mask (*get_cpu_mask(0))
31#endif
32
33/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
34 * hit by the argument
35 *
36 * @threads: a cpumask of online threads
37 *
38 * This function returns a cpumask which will have one online cpu's
39 * bit set for each core that has at least one thread set in the argument.
40 *
41 * This can typically be used for things like IPI for tlb invalidations
42 * since those need to be done only once per core/TLB
43 */
44static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
45{
46 cpumask_t tmp, res;
47 int i, cpu;
48
49 cpumask_clear(&res);
50 for (i = 0; i < NR_CPUS; i += threads_per_core) {
51 cpumask_shift_left(&tmp, &threads_core_mask, i);
52 if (cpumask_intersects(threads, &tmp)) {
53 cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
54 if (cpu < nr_cpu_ids)
55 cpumask_set_cpu(cpu, &res);
56 }
57 }
58 return res;
59}
60
61static inline int cpu_nr_cores(void)
62{
63 return nr_cpu_ids >> threads_shift;
64}
65
66static inline cpumask_t cpu_online_cores_map(void)
67{
68 return cpu_thread_mask_to_cores(cpu_online_mask);
69}
70
71#ifdef CONFIG_SMP
72int cpu_core_index_of_thread(int cpu);
73int cpu_first_thread_of_core(int core);
74#else
75static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
76static inline int cpu_first_thread_of_core(int core) { return core; }
77#endif
78
79static inline int cpu_thread_in_core(int cpu)
80{
81 return cpu & (threads_per_core - 1);
82}
83
84static inline int cpu_thread_in_subcore(int cpu)
85{
86 return cpu & (threads_per_subcore - 1);
87}
88
89static inline int cpu_first_thread_sibling(int cpu)
90{
91 return cpu & ~(threads_per_core - 1);
92}
93
94static inline int cpu_last_thread_sibling(int cpu)
95{
96 return cpu | (threads_per_core - 1);
97}
98
99static inline u32 get_tensr(void)
100{
101#ifdef CONFIG_BOOKE
102 if (cpu_has_feature(CPU_FTR_SMT))
103 return mfspr(SPRN_TENSR);
104#endif
105 return 1;
106}
107
108void book3e_start_thread(int thread, unsigned long addr);
109void book3e_stop_thread(int thread);
110
111#endif /* __ASSEMBLY__ */
112
113#define INVALID_THREAD_HWID 0x0fff
114
115#endif /* _ASM_POWERPC_CPUTHREADS_H */
116
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_CPUTHREADS_H
3#define _ASM_POWERPC_CPUTHREADS_H
4
5#ifndef __ASSEMBLY__
6#include <linux/cpumask.h>
7#include <asm/cpu_has_feature.h>
8
9/*
10 * Mapping of threads to cores
11 *
12 * Note: This implementation is limited to a power of 2 number of
13 * threads per core and the same number for each core in the system
14 * (though it would work if some processors had less threads as long
15 * as the CPU numbers are still allocated, just not brought online).
16 *
17 * However, the API allows for a different implementation in the future
18 * if needed, as long as you only use the functions and not the variables
19 * directly.
20 */
21
22#ifdef CONFIG_SMP
23extern int threads_per_core;
24extern int threads_per_subcore;
25extern int threads_shift;
26extern cpumask_t threads_core_mask;
27#else
28#define threads_per_core 1
29#define threads_per_subcore 1
30#define threads_shift 0
31#define has_big_cores 0
32#define threads_core_mask (*get_cpu_mask(0))
33#endif
34
35static inline int cpu_nr_cores(void)
36{
37 return nr_cpu_ids >> threads_shift;
38}
39
40#ifdef CONFIG_SMP
41int cpu_core_index_of_thread(int cpu);
42int cpu_first_thread_of_core(int core);
43#else
44static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
45static inline int cpu_first_thread_of_core(int core) { return core; }
46#endif
47
48static inline int cpu_thread_in_core(int cpu)
49{
50 return cpu & (threads_per_core - 1);
51}
52
53static inline int cpu_thread_in_subcore(int cpu)
54{
55 return cpu & (threads_per_subcore - 1);
56}
57
58static inline int cpu_first_thread_sibling(int cpu)
59{
60 return cpu & ~(threads_per_core - 1);
61}
62
63static inline int cpu_last_thread_sibling(int cpu)
64{
65 return cpu | (threads_per_core - 1);
66}
67
68/*
69 * tlb_thread_siblings are siblings which share a TLB. This is not
70 * architected, is not something a hypervisor could emulate and a future
71 * CPU may change behaviour even in compat mode, so this should only be
72 * used on PowerNV, and only with care.
73 */
74static inline int cpu_first_tlb_thread_sibling(int cpu)
75{
76 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
77 return cpu & ~0x6; /* Big Core */
78 else
79 return cpu_first_thread_sibling(cpu);
80}
81
82static inline int cpu_last_tlb_thread_sibling(int cpu)
83{
84 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
85 return cpu | 0x6; /* Big Core */
86 else
87 return cpu_last_thread_sibling(cpu);
88}
89
90static inline int cpu_tlb_thread_sibling_step(void)
91{
92 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
93 return 2; /* Big Core */
94 else
95 return 1;
96}
97
98static inline u32 get_tensr(void)
99{
100#ifdef CONFIG_BOOKE
101 if (cpu_has_feature(CPU_FTR_SMT))
102 return mfspr(SPRN_TENSR);
103#endif
104 return 1;
105}
106
107void book3e_start_thread(int thread, unsigned long addr);
108void book3e_stop_thread(int thread);
109
110#endif /* __ASSEMBLY__ */
111
112#define INVALID_THREAD_HWID 0x0fff
113
114#endif /* _ASM_POWERPC_CPUTHREADS_H */
115