Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/cpu.h>
3
4#include <xen/xen.h>
5
6#include <asm/intel-family.h>
7#include <asm/apic.h>
8#include <asm/processor.h>
9#include <asm/smp.h>
10
11#include "cpu.h"
12
13struct x86_topology_system x86_topo_system __ro_after_init;
14EXPORT_SYMBOL_GPL(x86_topo_system);
15
16unsigned int __amd_nodes_per_pkg __ro_after_init;
17EXPORT_SYMBOL_GPL(__amd_nodes_per_pkg);
18
19void topology_set_dom(struct topo_scan *tscan, enum x86_topology_domains dom,
20 unsigned int shift, unsigned int ncpus)
21{
22 topology_update_dom(tscan, dom, shift, ncpus);
23
24 /* Propagate to the upper levels */
25 for (dom++; dom < TOPO_MAX_DOMAIN; dom++) {
26 tscan->dom_shifts[dom] = tscan->dom_shifts[dom - 1];
27 tscan->dom_ncpus[dom] = tscan->dom_ncpus[dom - 1];
28 }
29}
30
31enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c)
32{
33 if (c->x86_vendor == X86_VENDOR_INTEL) {
34 switch (c->topo.intel_type) {
35 case INTEL_CPU_TYPE_ATOM: return TOPO_CPU_TYPE_EFFICIENCY;
36 case INTEL_CPU_TYPE_CORE: return TOPO_CPU_TYPE_PERFORMANCE;
37 }
38 }
39 if (c->x86_vendor == X86_VENDOR_AMD) {
40 switch (c->topo.amd_type) {
41 case 0: return TOPO_CPU_TYPE_PERFORMANCE;
42 case 1: return TOPO_CPU_TYPE_EFFICIENCY;
43 }
44 }
45
46 return TOPO_CPU_TYPE_UNKNOWN;
47}
48
49const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c)
50{
51 switch (get_topology_cpu_type(c)) {
52 case TOPO_CPU_TYPE_PERFORMANCE:
53 return "performance";
54 case TOPO_CPU_TYPE_EFFICIENCY:
55 return "efficiency";
56 default:
57 return "unknown";
58 }
59}
60
61static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
62{
63 struct {
64 u32 cache_type : 5,
65 unused : 21,
66 ncores : 6;
67 } eax;
68
69 if (c->cpuid_level < 4)
70 return 1;
71
72 cpuid_subleaf_reg(4, 0, CPUID_EAX, &eax);
73 if (!eax.cache_type)
74 return 1;
75
76 return eax.ncores + 1;
77}
78
79static void parse_legacy(struct topo_scan *tscan)
80{
81 unsigned int cores, core_shift, smt_shift = 0;
82 struct cpuinfo_x86 *c = tscan->c;
83
84 cores = parse_num_cores_legacy(c);
85 core_shift = get_count_order(cores);
86
87 if (cpu_has(c, X86_FEATURE_HT)) {
88 if (!WARN_ON_ONCE(tscan->ebx1_nproc_shift < core_shift))
89 smt_shift = tscan->ebx1_nproc_shift - core_shift;
90 /*
91 * The parser expects leaf 0xb/0x1f format, which means
92 * the number of logical processors at core level is
93 * counting threads.
94 */
95 core_shift += smt_shift;
96 cores <<= smt_shift;
97 }
98
99 topology_set_dom(tscan, TOPO_SMT_DOMAIN, smt_shift, 1U << smt_shift);
100 topology_set_dom(tscan, TOPO_CORE_DOMAIN, core_shift, cores);
101}
102
103static bool fake_topology(struct topo_scan *tscan)
104{
105 /*
106 * Preset the CORE level shift for CPUID less systems and XEN_PV,
107 * which has useless CPUID information.
108 */
109 topology_set_dom(tscan, TOPO_SMT_DOMAIN, 0, 1);
110 topology_set_dom(tscan, TOPO_CORE_DOMAIN, 0, 1);
111
112 return tscan->c->cpuid_level < 1;
113}
114
115static void parse_topology(struct topo_scan *tscan, bool early)
116{
117 const struct cpuinfo_topology topo_defaults = {
118 .cu_id = 0xff,
119 .llc_id = BAD_APICID,
120 .l2c_id = BAD_APICID,
121 .cpu_type = TOPO_CPU_TYPE_UNKNOWN,
122 };
123 struct cpuinfo_x86 *c = tscan->c;
124 struct {
125 u32 unused0 : 16,
126 nproc : 8,
127 apicid : 8;
128 } ebx;
129
130 c->topo = topo_defaults;
131
132 if (fake_topology(tscan))
133 return;
134
135 /* Preset Initial APIC ID from CPUID leaf 1 */
136 cpuid_leaf_reg(1, CPUID_EBX, &ebx);
137 c->topo.initial_apicid = ebx.apicid;
138
139 /*
140 * The initial invocation from early_identify_cpu() happens before
141 * the APIC is mapped or X2APIC enabled. For establishing the
142 * topology, that's not required. Use the initial APIC ID.
143 */
144 if (early)
145 c->topo.apicid = c->topo.initial_apicid;
146 else
147 c->topo.apicid = read_apic_id();
148
149 /* The above is sufficient for UP */
150 if (!IS_ENABLED(CONFIG_SMP))
151 return;
152
153 tscan->ebx1_nproc_shift = get_count_order(ebx.nproc);
154
155 switch (c->x86_vendor) {
156 case X86_VENDOR_AMD:
157 if (IS_ENABLED(CONFIG_CPU_SUP_AMD))
158 cpu_parse_topology_amd(tscan);
159 break;
160 case X86_VENDOR_CENTAUR:
161 case X86_VENDOR_ZHAOXIN:
162 parse_legacy(tscan);
163 break;
164 case X86_VENDOR_INTEL:
165 if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
166 parse_legacy(tscan);
167 if (c->cpuid_level >= 0x1a)
168 c->topo.cpu_type = cpuid_eax(0x1a);
169 break;
170 case X86_VENDOR_HYGON:
171 if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))
172 cpu_parse_topology_amd(tscan);
173 break;
174 }
175}
176
177static void topo_set_ids(struct topo_scan *tscan, bool early)
178{
179 struct cpuinfo_x86 *c = tscan->c;
180 u32 apicid = c->topo.apicid;
181
182 c->topo.pkg_id = topo_shift_apicid(apicid, TOPO_PKG_DOMAIN);
183 c->topo.die_id = topo_shift_apicid(apicid, TOPO_DIE_DOMAIN);
184
185 if (!early) {
186 c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN);
187 c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN);
188 }
189
190 /* Package relative core ID */
191 c->topo.core_id = (apicid & topo_domain_mask(TOPO_PKG_DOMAIN)) >>
192 x86_topo_system.dom_shifts[TOPO_SMT_DOMAIN];
193
194 c->topo.amd_node_id = tscan->amd_node_id;
195
196 if (c->x86_vendor == X86_VENDOR_AMD)
197 cpu_topology_fixup_amd(tscan);
198}
199
200void cpu_parse_topology(struct cpuinfo_x86 *c)
201{
202 unsigned int dom, cpu = smp_processor_id();
203 struct topo_scan tscan = { .c = c, };
204
205 parse_topology(&tscan, false);
206
207 if (IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
208 if (c->topo.initial_apicid != c->topo.apicid) {
209 pr_err(FW_BUG "CPU%4u: APIC ID mismatch. CPUID: 0x%04x APIC: 0x%04x\n",
210 cpu, c->topo.initial_apicid, c->topo.apicid);
211 }
212
213 if (c->topo.apicid != cpuid_to_apicid[cpu]) {
214 pr_err(FW_BUG "CPU%4u: APIC ID mismatch. Firmware: 0x%04x APIC: 0x%04x\n",
215 cpu, cpuid_to_apicid[cpu], c->topo.apicid);
216 }
217 }
218
219 for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++) {
220 if (tscan.dom_shifts[dom] == x86_topo_system.dom_shifts[dom])
221 continue;
222 pr_err(FW_BUG "CPU%d: Topology domain %u shift %u != %u\n", cpu, dom,
223 tscan.dom_shifts[dom], x86_topo_system.dom_shifts[dom]);
224 }
225
226 topo_set_ids(&tscan, false);
227}
228
229void __init cpu_init_topology(struct cpuinfo_x86 *c)
230{
231 struct topo_scan tscan = { .c = c, };
232 unsigned int dom, sft;
233
234 parse_topology(&tscan, true);
235
236 /* Copy the shift values and calculate the unit sizes. */
237 memcpy(x86_topo_system.dom_shifts, tscan.dom_shifts, sizeof(x86_topo_system.dom_shifts));
238
239 dom = TOPO_SMT_DOMAIN;
240 x86_topo_system.dom_size[dom] = 1U << x86_topo_system.dom_shifts[dom];
241
242 for (dom++; dom < TOPO_MAX_DOMAIN; dom++) {
243 sft = x86_topo_system.dom_shifts[dom] - x86_topo_system.dom_shifts[dom - 1];
244 x86_topo_system.dom_size[dom] = 1U << sft;
245 }
246
247 topo_set_ids(&tscan, true);
248
249 /*
250 * AMD systems have Nodes per package which cannot be mapped to
251 * APIC ID.
252 */
253 __amd_nodes_per_pkg = tscan.amd_nodes_per_pkg;
254}