Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Hygon Processor Support for Linux
4 *
5 * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
6 *
7 * Author: Pu Wen <puwen@hygon.cn>
8 */
9#include <linux/io.h>
10
11#include <asm/cpu.h>
12#include <asm/smp.h>
13#include <asm/cacheinfo.h>
14#include <asm/spec-ctrl.h>
15#include <asm/delay.h>
16#ifdef CONFIG_X86_64
17# include <asm/set_memory.h>
18#endif
19
20#include "cpu.h"
21
22#define APICID_SOCKET_ID_BIT 6
23
24/*
25 * nodes_per_socket: Stores the number of nodes per socket.
26 * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
27 */
28static u32 nodes_per_socket = 1;
29
30#ifdef CONFIG_NUMA
31/*
32 * To workaround broken NUMA config. Read the comment in
33 * srat_detect_node().
34 */
35static int nearby_node(int apicid)
36{
37 int i, node;
38
39 for (i = apicid - 1; i >= 0; i--) {
40 node = __apicid_to_node[i];
41 if (node != NUMA_NO_NODE && node_online(node))
42 return node;
43 }
44 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
45 node = __apicid_to_node[i];
46 if (node != NUMA_NO_NODE && node_online(node))
47 return node;
48 }
49 return first_node(node_online_map); /* Shouldn't happen */
50}
51#endif
52
53static void hygon_get_topology_early(struct cpuinfo_x86 *c)
54{
55 if (cpu_has(c, X86_FEATURE_TOPOEXT))
56 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
57}
58
59/*
60 * Fixup core topology information for
61 * (1) Hygon multi-node processors
62 * Assumption: Number of cores in each internal node is the same.
63 * (2) Hygon processors supporting compute units
64 */
65static void hygon_get_topology(struct cpuinfo_x86 *c)
66{
67 u8 node_id;
68 int cpu = smp_processor_id();
69
70 /* get information required for multi-node processors */
71 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
72 int err;
73 u32 eax, ebx, ecx, edx;
74
75 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
76
77 node_id = ecx & 0xff;
78
79 c->cpu_core_id = ebx & 0xff;
80
81 if (smp_num_siblings > 1)
82 c->x86_max_cores /= smp_num_siblings;
83
84 /*
85 * In case leaf B is available, use it to derive
86 * topology information.
87 */
88 err = detect_extended_topology(c);
89 if (!err)
90 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
91
92 /* Socket ID is ApicId[6] for these processors. */
93 c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
94
95 cacheinfo_hygon_init_llc_id(c, cpu, node_id);
96 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
97 u64 value;
98
99 rdmsrl(MSR_FAM10H_NODE_ID, value);
100 node_id = value & 7;
101
102 per_cpu(cpu_llc_id, cpu) = node_id;
103 } else
104 return;
105
106 if (nodes_per_socket > 1)
107 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
108}
109
110/*
111 * On Hygon setup the lower bits of the APIC id distinguish the cores.
112 * Assumes number of cores is a power of two.
113 */
114static void hygon_detect_cmp(struct cpuinfo_x86 *c)
115{
116 unsigned int bits;
117 int cpu = smp_processor_id();
118
119 bits = c->x86_coreid_bits;
120 /* Low order bits define the core id (index of core in socket) */
121 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
122 /* Convert the initial APIC ID into the socket ID */
123 c->phys_proc_id = c->initial_apicid >> bits;
124 /* use socket ID also for last level cache */
125 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
126}
127
128static void srat_detect_node(struct cpuinfo_x86 *c)
129{
130#ifdef CONFIG_NUMA
131 int cpu = smp_processor_id();
132 int node;
133 unsigned int apicid = c->apicid;
134
135 node = numa_cpu_node(cpu);
136 if (node == NUMA_NO_NODE)
137 node = per_cpu(cpu_llc_id, cpu);
138
139 /*
140 * On multi-fabric platform (e.g. Numascale NumaChip) a
141 * platform-specific handler needs to be called to fixup some
142 * IDs of the CPU.
143 */
144 if (x86_cpuinit.fixup_cpu_id)
145 x86_cpuinit.fixup_cpu_id(c, node);
146
147 if (!node_online(node)) {
148 /*
149 * Two possibilities here:
150 *
151 * - The CPU is missing memory and no node was created. In
152 * that case try picking one from a nearby CPU.
153 *
154 * - The APIC IDs differ from the HyperTransport node IDs.
155 * Assume they are all increased by a constant offset, but
156 * in the same order as the HT nodeids. If that doesn't
157 * result in a usable node fall back to the path for the
158 * previous case.
159 *
160 * This workaround operates directly on the mapping between
161 * APIC ID and NUMA node, assuming certain relationship
162 * between APIC ID, HT node ID and NUMA topology. As going
163 * through CPU mapping may alter the outcome, directly
164 * access __apicid_to_node[].
165 */
166 int ht_nodeid = c->initial_apicid;
167
168 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
169 node = __apicid_to_node[ht_nodeid];
170 /* Pick a nearby node */
171 if (!node_online(node))
172 node = nearby_node(apicid);
173 }
174 numa_set_node(cpu, node);
175#endif
176}
177
178static void early_init_hygon_mc(struct cpuinfo_x86 *c)
179{
180#ifdef CONFIG_SMP
181 unsigned int bits, ecx;
182
183 /* Multi core CPU? */
184 if (c->extended_cpuid_level < 0x80000008)
185 return;
186
187 ecx = cpuid_ecx(0x80000008);
188
189 c->x86_max_cores = (ecx & 0xff) + 1;
190
191 /* CPU telling us the core id bits shift? */
192 bits = (ecx >> 12) & 0xF;
193
194 /* Otherwise recompute */
195 if (bits == 0) {
196 while ((1 << bits) < c->x86_max_cores)
197 bits++;
198 }
199
200 c->x86_coreid_bits = bits;
201#endif
202}
203
204static void bsp_init_hygon(struct cpuinfo_x86 *c)
205{
206#ifdef CONFIG_X86_64
207 unsigned long long tseg;
208
209 /*
210 * Split up direct mapping around the TSEG SMM area.
211 * Don't do it for gbpages because there seems very little
212 * benefit in doing so.
213 */
214 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
215 unsigned long pfn = tseg >> PAGE_SHIFT;
216
217 pr_debug("tseg: %010llx\n", tseg);
218 if (pfn_range_is_mapped(pfn, pfn + 1))
219 set_memory_4k((unsigned long)__va(tseg), 1);
220 }
221#endif
222
223 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
224 u64 val;
225
226 rdmsrl(MSR_K7_HWCR, val);
227 if (!(val & BIT(24)))
228 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
229 }
230
231 if (cpu_has(c, X86_FEATURE_MWAITX))
232 use_mwaitx_delay();
233
234 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
235 u32 ecx;
236
237 ecx = cpuid_ecx(0x8000001e);
238 nodes_per_socket = ((ecx >> 8) & 7) + 1;
239 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
240 u64 value;
241
242 rdmsrl(MSR_FAM10H_NODE_ID, value);
243 nodes_per_socket = ((value >> 3) & 7) + 1;
244 }
245
246 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
247 !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
248 /*
249 * Try to cache the base value so further operations can
250 * avoid RMW. If that faults, do not enable SSBD.
251 */
252 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
253 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
254 setup_force_cpu_cap(X86_FEATURE_SSBD);
255 x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
256 }
257 }
258}
259
260static void early_init_hygon(struct cpuinfo_x86 *c)
261{
262 u32 dummy;
263
264 early_init_hygon_mc(c);
265
266 set_cpu_cap(c, X86_FEATURE_K8);
267
268 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
269
270 /*
271 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
272 * with P/T states and does not stop in deep C-states
273 */
274 if (c->x86_power & (1 << 8)) {
275 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
276 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
277 }
278
279 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
280 if (c->x86_power & BIT(12))
281 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
282
283#ifdef CONFIG_X86_64
284 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
285#endif
286
287#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
288 /*
289 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
290 * can safely set X86_FEATURE_EXTD_APICID unconditionally.
291 */
292 if (boot_cpu_has(X86_FEATURE_APIC))
293 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
294#endif
295
296 /*
297 * This is only needed to tell the kernel whether to use VMCALL
298 * and VMMCALL. VMMCALL is never executed except under virt, so
299 * we can set it unconditionally.
300 */
301 set_cpu_cap(c, X86_FEATURE_VMMCALL);
302
303 hygon_get_topology_early(c);
304}
305
306static void init_hygon(struct cpuinfo_x86 *c)
307{
308 early_init_hygon(c);
309
310 /*
311 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
312 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
313 */
314 clear_cpu_cap(c, 0*32+31);
315
316 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
317
318 /* get apicid instead of initial apic id from cpuid */
319 c->apicid = hard_smp_processor_id();
320
321 set_cpu_cap(c, X86_FEATURE_ZEN);
322 set_cpu_cap(c, X86_FEATURE_CPB);
323
324 cpu_detect_cache_sizes(c);
325
326 hygon_detect_cmp(c);
327 hygon_get_topology(c);
328 srat_detect_node(c);
329
330 init_hygon_cacheinfo(c);
331
332 if (cpu_has(c, X86_FEATURE_XMM2)) {
333 /*
334 * Use LFENCE for execution serialization. On families which
335 * don't have that MSR, LFENCE is already serializing.
336 * msr_set_bit() uses the safe accessors, too, even if the MSR
337 * is not present.
338 */
339 msr_set_bit(MSR_F10H_DECFG,
340 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
341
342 /* A serializing LFENCE stops RDTSC speculation */
343 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
344 }
345
346 /*
347 * Hygon processors have APIC timer running in deep C states.
348 */
349 set_cpu_cap(c, X86_FEATURE_ARAT);
350
351 /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
352 if (!cpu_has(c, X86_FEATURE_XENPV))
353 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
354}
355
356static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
357{
358 u32 ebx, eax, ecx, edx;
359 u16 mask = 0xfff;
360
361 if (c->extended_cpuid_level < 0x80000006)
362 return;
363
364 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
365
366 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
367 tlb_lli_4k[ENTRIES] = ebx & mask;
368
369 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
370 if (!((eax >> 16) & mask))
371 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
372 else
373 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
374
375 /* a 4M entry uses two 2M entries */
376 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
377
378 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
379 if (!(eax & mask)) {
380 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
381 tlb_lli_2m[ENTRIES] = eax & 0xff;
382 } else
383 tlb_lli_2m[ENTRIES] = eax & mask;
384
385 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
386}
387
388static const struct cpu_dev hygon_cpu_dev = {
389 .c_vendor = "Hygon",
390 .c_ident = { "HygonGenuine" },
391 .c_early_init = early_init_hygon,
392 .c_detect_tlb = cpu_detect_tlb_hygon,
393 .c_bsp_init = bsp_init_hygon,
394 .c_init = init_hygon,
395 .c_x86_vendor = X86_VENDOR_HYGON,
396};
397
398cpu_dev_register(hygon_cpu_dev);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Hygon Processor Support for Linux
4 *
5 * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
6 *
7 * Author: Pu Wen <puwen@hygon.cn>
8 */
9#include <linux/io.h>
10
11#include <asm/apic.h>
12#include <asm/cpu.h>
13#include <asm/smp.h>
14#include <asm/numa.h>
15#include <asm/cacheinfo.h>
16#include <asm/spec-ctrl.h>
17#include <asm/delay.h>
18
19#include "cpu.h"
20
21#define APICID_SOCKET_ID_BIT 6
22
23/*
24 * nodes_per_socket: Stores the number of nodes per socket.
25 * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
26 */
27static u32 nodes_per_socket = 1;
28
29#ifdef CONFIG_NUMA
30/*
31 * To workaround broken NUMA config. Read the comment in
32 * srat_detect_node().
33 */
34static int nearby_node(int apicid)
35{
36 int i, node;
37
38 for (i = apicid - 1; i >= 0; i--) {
39 node = __apicid_to_node[i];
40 if (node != NUMA_NO_NODE && node_online(node))
41 return node;
42 }
43 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
44 node = __apicid_to_node[i];
45 if (node != NUMA_NO_NODE && node_online(node))
46 return node;
47 }
48 return first_node(node_online_map); /* Shouldn't happen */
49}
50#endif
51
52static void hygon_get_topology_early(struct cpuinfo_x86 *c)
53{
54 if (cpu_has(c, X86_FEATURE_TOPOEXT))
55 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
56}
57
58/*
59 * Fixup core topology information for
60 * (1) Hygon multi-node processors
61 * Assumption: Number of cores in each internal node is the same.
62 * (2) Hygon processors supporting compute units
63 */
64static void hygon_get_topology(struct cpuinfo_x86 *c)
65{
66 /* get information required for multi-node processors */
67 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
68 int err;
69 u32 eax, ebx, ecx, edx;
70
71 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
72
73 c->topo.die_id = ecx & 0xff;
74
75 c->topo.core_id = ebx & 0xff;
76
77 if (smp_num_siblings > 1)
78 c->x86_max_cores /= smp_num_siblings;
79
80 /*
81 * In case leaf B is available, use it to derive
82 * topology information.
83 */
84 err = detect_extended_topology(c);
85 if (!err)
86 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
87
88 /*
89 * Socket ID is ApicId[6] for the processors with model <= 0x3
90 * when running on host.
91 */
92 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
93 c->topo.pkg_id = c->topo.apicid >> APICID_SOCKET_ID_BIT;
94
95 cacheinfo_hygon_init_llc_id(c);
96 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
97 u64 value;
98
99 rdmsrl(MSR_FAM10H_NODE_ID, value);
100 c->topo.die_id = value & 7;
101 c->topo.llc_id = c->topo.die_id;
102 } else
103 return;
104
105 if (nodes_per_socket > 1)
106 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
107}
108
109/*
110 * On Hygon setup the lower bits of the APIC id distinguish the cores.
111 * Assumes number of cores is a power of two.
112 */
113static void hygon_detect_cmp(struct cpuinfo_x86 *c)
114{
115 unsigned int bits;
116
117 bits = c->x86_coreid_bits;
118 /* Low order bits define the core id (index of core in socket) */
119 c->topo.core_id = c->topo.initial_apicid & ((1 << bits)-1);
120 /* Convert the initial APIC ID into the socket ID */
121 c->topo.pkg_id = c->topo.initial_apicid >> bits;
122 /* Use package ID also for last level cache */
123 c->topo.llc_id = c->topo.die_id = c->topo.pkg_id;
124}
125
126static void srat_detect_node(struct cpuinfo_x86 *c)
127{
128#ifdef CONFIG_NUMA
129 int cpu = smp_processor_id();
130 int node;
131 unsigned int apicid = c->topo.apicid;
132
133 node = numa_cpu_node(cpu);
134 if (node == NUMA_NO_NODE)
135 node = c->topo.llc_id;
136
137 /*
138 * On multi-fabric platform (e.g. Numascale NumaChip) a
139 * platform-specific handler needs to be called to fixup some
140 * IDs of the CPU.
141 */
142 if (x86_cpuinit.fixup_cpu_id)
143 x86_cpuinit.fixup_cpu_id(c, node);
144
145 if (!node_online(node)) {
146 /*
147 * Two possibilities here:
148 *
149 * - The CPU is missing memory and no node was created. In
150 * that case try picking one from a nearby CPU.
151 *
152 * - The APIC IDs differ from the HyperTransport node IDs.
153 * Assume they are all increased by a constant offset, but
154 * in the same order as the HT nodeids. If that doesn't
155 * result in a usable node fall back to the path for the
156 * previous case.
157 *
158 * This workaround operates directly on the mapping between
159 * APIC ID and NUMA node, assuming certain relationship
160 * between APIC ID, HT node ID and NUMA topology. As going
161 * through CPU mapping may alter the outcome, directly
162 * access __apicid_to_node[].
163 */
164 int ht_nodeid = c->topo.initial_apicid;
165
166 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
167 node = __apicid_to_node[ht_nodeid];
168 /* Pick a nearby node */
169 if (!node_online(node))
170 node = nearby_node(apicid);
171 }
172 numa_set_node(cpu, node);
173#endif
174}
175
176static void early_init_hygon_mc(struct cpuinfo_x86 *c)
177{
178#ifdef CONFIG_SMP
179 unsigned int bits, ecx;
180
181 /* Multi core CPU? */
182 if (c->extended_cpuid_level < 0x80000008)
183 return;
184
185 ecx = cpuid_ecx(0x80000008);
186
187 c->x86_max_cores = (ecx & 0xff) + 1;
188
189 /* CPU telling us the core id bits shift? */
190 bits = (ecx >> 12) & 0xF;
191
192 /* Otherwise recompute */
193 if (bits == 0) {
194 while ((1 << bits) < c->x86_max_cores)
195 bits++;
196 }
197
198 c->x86_coreid_bits = bits;
199#endif
200}
201
202static void bsp_init_hygon(struct cpuinfo_x86 *c)
203{
204 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
205 u64 val;
206
207 rdmsrl(MSR_K7_HWCR, val);
208 if (!(val & BIT(24)))
209 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
210 }
211
212 if (cpu_has(c, X86_FEATURE_MWAITX))
213 use_mwaitx_delay();
214
215 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
216 u32 ecx;
217
218 ecx = cpuid_ecx(0x8000001e);
219 __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
220 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
221 u64 value;
222
223 rdmsrl(MSR_FAM10H_NODE_ID, value);
224 __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
225 }
226
227 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
228 !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
229 /*
230 * Try to cache the base value so further operations can
231 * avoid RMW. If that faults, do not enable SSBD.
232 */
233 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
234 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
235 setup_force_cpu_cap(X86_FEATURE_SSBD);
236 x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
237 }
238 }
239}
240
241static void early_init_hygon(struct cpuinfo_x86 *c)
242{
243 u32 dummy;
244
245 early_init_hygon_mc(c);
246
247 set_cpu_cap(c, X86_FEATURE_K8);
248
249 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
250
251 /*
252 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
253 * with P/T states and does not stop in deep C-states
254 */
255 if (c->x86_power & (1 << 8)) {
256 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
257 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
258 }
259
260 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
261 if (c->x86_power & BIT(12))
262 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
263
264 /* Bit 14 indicates the Runtime Average Power Limit interface. */
265 if (c->x86_power & BIT(14))
266 set_cpu_cap(c, X86_FEATURE_RAPL);
267
268#ifdef CONFIG_X86_64
269 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
270#endif
271
272#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
273 /*
274 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
275 * can safely set X86_FEATURE_EXTD_APICID unconditionally.
276 */
277 if (boot_cpu_has(X86_FEATURE_APIC))
278 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
279#endif
280
281 /*
282 * This is only needed to tell the kernel whether to use VMCALL
283 * and VMMCALL. VMMCALL is never executed except under virt, so
284 * we can set it unconditionally.
285 */
286 set_cpu_cap(c, X86_FEATURE_VMMCALL);
287
288 hygon_get_topology_early(c);
289}
290
291static void init_hygon(struct cpuinfo_x86 *c)
292{
293 u64 vm_cr;
294
295 early_init_hygon(c);
296
297 /*
298 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
299 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
300 */
301 clear_cpu_cap(c, 0*32+31);
302
303 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
304
305 /* get apicid instead of initial apic id from cpuid */
306 c->topo.apicid = read_apic_id();
307
308 /*
309 * XXX someone from Hygon needs to confirm this DTRT
310 *
311 init_spectral_chicken(c);
312 */
313
314 set_cpu_cap(c, X86_FEATURE_ZEN);
315 set_cpu_cap(c, X86_FEATURE_CPB);
316
317 cpu_detect_cache_sizes(c);
318
319 hygon_detect_cmp(c);
320 hygon_get_topology(c);
321 srat_detect_node(c);
322
323 init_hygon_cacheinfo(c);
324
325 if (cpu_has(c, X86_FEATURE_SVM)) {
326 rdmsrl(MSR_VM_CR, vm_cr);
327 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
328 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
329 clear_cpu_cap(c, X86_FEATURE_SVM);
330 }
331 }
332
333 if (cpu_has(c, X86_FEATURE_XMM2)) {
334 /*
335 * Use LFENCE for execution serialization. On families which
336 * don't have that MSR, LFENCE is already serializing.
337 * msr_set_bit() uses the safe accessors, too, even if the MSR
338 * is not present.
339 */
340 msr_set_bit(MSR_AMD64_DE_CFG,
341 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
342
343 /* A serializing LFENCE stops RDTSC speculation */
344 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
345 }
346
347 /*
348 * Hygon processors have APIC timer running in deep C states.
349 */
350 set_cpu_cap(c, X86_FEATURE_ARAT);
351
352 /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
353 if (!cpu_feature_enabled(X86_FEATURE_XENPV))
354 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
355
356 check_null_seg_clears_base(c);
357
358 /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
359 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
360}
361
362static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
363{
364 u32 ebx, eax, ecx, edx;
365 u16 mask = 0xfff;
366
367 if (c->extended_cpuid_level < 0x80000006)
368 return;
369
370 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
371
372 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
373 tlb_lli_4k[ENTRIES] = ebx & mask;
374
375 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
376 if (!((eax >> 16) & mask))
377 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
378 else
379 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
380
381 /* a 4M entry uses two 2M entries */
382 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
383
384 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
385 if (!(eax & mask)) {
386 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
387 tlb_lli_2m[ENTRIES] = eax & 0xff;
388 } else
389 tlb_lli_2m[ENTRIES] = eax & mask;
390
391 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
392}
393
394static const struct cpu_dev hygon_cpu_dev = {
395 .c_vendor = "Hygon",
396 .c_ident = { "HygonGenuine" },
397 .c_early_init = early_init_hygon,
398 .c_detect_tlb = cpu_detect_tlb_hygon,
399 .c_bsp_init = bsp_init_hygon,
400 .c_init = init_hygon,
401 .c_x86_vendor = X86_VENDOR_HYGON,
402};
403
404cpu_dev_register(hygon_cpu_dev);