Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/sched.h>
4#include <linux/sched/clock.h>
5
6#include <asm/cpufeature.h>
7#include <asm/e820/api.h>
8#include <asm/mtrr.h>
9#include <asm/msr.h>
10
11#include "cpu.h"
12
13#define ACE_PRESENT (1 << 6)
14#define ACE_ENABLED (1 << 7)
15#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
16
17#define RNG_PRESENT (1 << 2)
18#define RNG_ENABLED (1 << 3)
19#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
20
21#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
22#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
23#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
24#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
25#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
26#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
27
28static void init_c3(struct cpuinfo_x86 *c)
29{
30 u32 lo, hi;
31
32 /* Test for Centaur Extended Feature Flags presence */
33 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
34 u32 tmp = cpuid_edx(0xC0000001);
35
36 /* enable ACE unit, if present and disabled */
37 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
38 rdmsr(MSR_VIA_FCR, lo, hi);
39 lo |= ACE_FCR; /* enable ACE unit */
40 wrmsr(MSR_VIA_FCR, lo, hi);
41 pr_info("CPU: Enabled ACE h/w crypto\n");
42 }
43
44 /* enable RNG unit, if present and disabled */
45 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
46 rdmsr(MSR_VIA_RNG, lo, hi);
47 lo |= RNG_ENABLE; /* enable RNG unit */
48 wrmsr(MSR_VIA_RNG, lo, hi);
49 pr_info("CPU: Enabled h/w RNG\n");
50 }
51
52 /* store Centaur Extended Feature Flags as
53 * word 5 of the CPU capability bit array
54 */
55 c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
56 }
57#ifdef CONFIG_X86_32
58 /* Cyrix III family needs CX8 & PGE explicitly enabled. */
59 if (c->x86_model >= 6 && c->x86_model <= 13) {
60 rdmsr(MSR_VIA_FCR, lo, hi);
61 lo |= (1<<1 | 1<<7);
62 wrmsr(MSR_VIA_FCR, lo, hi);
63 set_cpu_cap(c, X86_FEATURE_CX8);
64 }
65
66 /* Before Nehemiah, the C3's had 3dNOW! */
67 if (c->x86_model >= 6 && c->x86_model < 9)
68 set_cpu_cap(c, X86_FEATURE_3DNOW);
69#endif
70 if (c->x86 == 0x6 && c->x86_model >= 0xf) {
71 c->x86_cache_alignment = c->x86_clflush_size * 2;
72 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
73 }
74
75 cpu_detect_cache_sizes(c);
76}
77
78enum {
79 ECX8 = 1<<1,
80 EIERRINT = 1<<2,
81 DPM = 1<<3,
82 DMCE = 1<<4,
83 DSTPCLK = 1<<5,
84 ELINEAR = 1<<6,
85 DSMC = 1<<7,
86 DTLOCK = 1<<8,
87 EDCTLB = 1<<8,
88 EMMX = 1<<9,
89 DPDC = 1<<11,
90 EBRPRED = 1<<12,
91 DIC = 1<<13,
92 DDC = 1<<14,
93 DNA = 1<<15,
94 ERETSTK = 1<<16,
95 E2MMX = 1<<19,
96 EAMD3D = 1<<20,
97};
98
99static void early_init_centaur(struct cpuinfo_x86 *c)
100{
101 switch (c->x86) {
102#ifdef CONFIG_X86_32
103 case 5:
104 /* Emulate MTRRs using Centaur's MCR. */
105 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
106 break;
107#endif
108 case 6:
109 if (c->x86_model >= 0xf)
110 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
111 break;
112 }
113#ifdef CONFIG_X86_64
114 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
115#endif
116 if (c->x86_power & (1 << 8)) {
117 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
118 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
119 }
120}
121
122static void centaur_detect_vmx_virtcap(struct cpuinfo_x86 *c)
123{
124 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
125
126 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
127 msr_ctl = vmx_msr_high | vmx_msr_low;
128
129 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
130 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
131 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
132 set_cpu_cap(c, X86_FEATURE_VNMI);
133 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
134 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
135 vmx_msr_low, vmx_msr_high);
136 msr_ctl2 = vmx_msr_high | vmx_msr_low;
137 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
138 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
139 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
140 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
141 set_cpu_cap(c, X86_FEATURE_EPT);
142 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
143 set_cpu_cap(c, X86_FEATURE_VPID);
144 }
145}
146
147static void init_centaur(struct cpuinfo_x86 *c)
148{
149#ifdef CONFIG_X86_32
150 char *name;
151 u32 fcr_set = 0;
152 u32 fcr_clr = 0;
153 u32 lo, hi, newlo;
154 u32 aa, bb, cc, dd;
155
156 /*
157 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
158 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
159 */
160 clear_cpu_cap(c, 0*32+31);
161#endif
162 early_init_centaur(c);
163 init_intel_cacheinfo(c);
164 detect_num_cpu_cores(c);
165#ifdef CONFIG_X86_32
166 detect_ht(c);
167#endif
168
169 if (c->cpuid_level > 9) {
170 unsigned int eax = cpuid_eax(10);
171
172 /*
173 * Check for version and the number of counters
174 * Version(eax[7:0]) can't be 0;
175 * Counters(eax[15:8]) should be greater than 1;
176 */
177 if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1))
178 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
179 }
180
181 switch (c->x86) {
182#ifdef CONFIG_X86_32
183 case 5:
184 switch (c->x86_model) {
185 case 4:
186 name = "C6";
187 fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
188 fcr_clr = DPDC;
189 pr_notice("Disabling bugged TSC.\n");
190 clear_cpu_cap(c, X86_FEATURE_TSC);
191 break;
192 case 8:
193 switch (c->x86_stepping) {
194 default:
195 name = "2";
196 break;
197 case 7 ... 9:
198 name = "2A";
199 break;
200 case 10 ... 15:
201 name = "2B";
202 break;
203 }
204 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
205 E2MMX|EAMD3D;
206 fcr_clr = DPDC;
207 break;
208 case 9:
209 name = "3";
210 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
211 E2MMX|EAMD3D;
212 fcr_clr = DPDC;
213 break;
214 default:
215 name = "??";
216 }
217
218 rdmsr(MSR_IDT_FCR1, lo, hi);
219 newlo = (lo|fcr_set) & (~fcr_clr);
220
221 if (newlo != lo) {
222 pr_info("Centaur FCR was 0x%X now 0x%X\n",
223 lo, newlo);
224 wrmsr(MSR_IDT_FCR1, newlo, hi);
225 } else {
226 pr_info("Centaur FCR is 0x%X\n", lo);
227 }
228 /* Emulate MTRRs using Centaur's MCR. */
229 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
230 /* Report CX8 */
231 set_cpu_cap(c, X86_FEATURE_CX8);
232 /* Set 3DNow! on Winchip 2 and above. */
233 if (c->x86_model >= 8)
234 set_cpu_cap(c, X86_FEATURE_3DNOW);
235 /* See if we can find out some more. */
236 if (cpuid_eax(0x80000000) >= 0x80000005) {
237 /* Yes, we can. */
238 cpuid(0x80000005, &aa, &bb, &cc, &dd);
239 /* Add L1 data and code cache sizes. */
240 c->x86_cache_size = (cc>>24)+(dd>>24);
241 }
242 sprintf(c->x86_model_id, "WinChip %s", name);
243 break;
244#endif
245 case 6:
246 init_c3(c);
247 break;
248 }
249#ifdef CONFIG_X86_64
250 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
251#endif
252
253 if (cpu_has(c, X86_FEATURE_VMX))
254 centaur_detect_vmx_virtcap(c);
255}
256
257#ifdef CONFIG_X86_32
258static unsigned int
259centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
260{
261 /* VIA C3 CPUs (670-68F) need further shifting. */
262 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
263 size >>= 8;
264
265 /*
266 * There's also an erratum in Nehemiah stepping 1, which
267 * returns '65KB' instead of '64KB'
268 * - Note, it seems this may only be in engineering samples.
269 */
270 if ((c->x86 == 6) && (c->x86_model == 9) &&
271 (c->x86_stepping == 1) && (size == 65))
272 size -= 1;
273 return size;
274}
275#endif
276
277static const struct cpu_dev centaur_cpu_dev = {
278 .c_vendor = "Centaur",
279 .c_ident = { "CentaurHauls" },
280 .c_early_init = early_init_centaur,
281 .c_init = init_centaur,
282#ifdef CONFIG_X86_32
283 .legacy_cache_size = centaur_size_cache,
284#endif
285 .c_x86_vendor = X86_VENDOR_CENTAUR,
286};
287
288cpu_dev_register(centaur_cpu_dev);
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/sched.h>
4#include <linux/sched/clock.h>
5
6#include <asm/cpu.h>
7#include <asm/cpufeature.h>
8#include <asm/e820/api.h>
9#include <asm/mtrr.h>
10#include <asm/msr.h>
11
12#include "cpu.h"
13
14#define ACE_PRESENT (1 << 6)
15#define ACE_ENABLED (1 << 7)
16#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
17
18#define RNG_PRESENT (1 << 2)
19#define RNG_ENABLED (1 << 3)
20#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
21
22static void init_c3(struct cpuinfo_x86 *c)
23{
24 u32 lo, hi;
25
26 /* Test for Centaur Extended Feature Flags presence */
27 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
28 u32 tmp = cpuid_edx(0xC0000001);
29
30 /* enable ACE unit, if present and disabled */
31 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
32 rdmsr(MSR_VIA_FCR, lo, hi);
33 lo |= ACE_FCR; /* enable ACE unit */
34 wrmsr(MSR_VIA_FCR, lo, hi);
35 pr_info("CPU: Enabled ACE h/w crypto\n");
36 }
37
38 /* enable RNG unit, if present and disabled */
39 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
40 rdmsr(MSR_VIA_RNG, lo, hi);
41 lo |= RNG_ENABLE; /* enable RNG unit */
42 wrmsr(MSR_VIA_RNG, lo, hi);
43 pr_info("CPU: Enabled h/w RNG\n");
44 }
45
46 /* store Centaur Extended Feature Flags as
47 * word 5 of the CPU capability bit array
48 */
49 c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
50 }
51#ifdef CONFIG_X86_32
52 /* Cyrix III family needs CX8 & PGE explicitly enabled. */
53 if (c->x86_model >= 6 && c->x86_model <= 13) {
54 rdmsr(MSR_VIA_FCR, lo, hi);
55 lo |= (1<<1 | 1<<7);
56 wrmsr(MSR_VIA_FCR, lo, hi);
57 set_cpu_cap(c, X86_FEATURE_CX8);
58 }
59
60 /* Before Nehemiah, the C3's had 3dNOW! */
61 if (c->x86_model >= 6 && c->x86_model < 9)
62 set_cpu_cap(c, X86_FEATURE_3DNOW);
63#endif
64 if (c->x86 == 0x6 && c->x86_model >= 0xf) {
65 c->x86_cache_alignment = c->x86_clflush_size * 2;
66 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
67 }
68}
69
70enum {
71 ECX8 = 1<<1,
72 EIERRINT = 1<<2,
73 DPM = 1<<3,
74 DMCE = 1<<4,
75 DSTPCLK = 1<<5,
76 ELINEAR = 1<<6,
77 DSMC = 1<<7,
78 DTLOCK = 1<<8,
79 EDCTLB = 1<<8,
80 EMMX = 1<<9,
81 DPDC = 1<<11,
82 EBRPRED = 1<<12,
83 DIC = 1<<13,
84 DDC = 1<<14,
85 DNA = 1<<15,
86 ERETSTK = 1<<16,
87 E2MMX = 1<<19,
88 EAMD3D = 1<<20,
89};
90
91static void early_init_centaur(struct cpuinfo_x86 *c)
92{
93 switch (c->x86) {
94#ifdef CONFIG_X86_32
95 case 5:
96 /* Emulate MTRRs using Centaur's MCR. */
97 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
98 break;
99#endif
100 case 6:
101 if (c->x86_model >= 0xf)
102 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
103 break;
104 }
105#ifdef CONFIG_X86_64
106 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
107#endif
108 if (c->x86_power & (1 << 8)) {
109 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
110 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
111 }
112}
113
114static void init_centaur(struct cpuinfo_x86 *c)
115{
116#ifdef CONFIG_X86_32
117 char *name;
118 u32 fcr_set = 0;
119 u32 fcr_clr = 0;
120 u32 lo, hi, newlo;
121 u32 aa, bb, cc, dd;
122
123 /*
124 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
125 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
126 */
127 clear_cpu_cap(c, 0*32+31);
128#endif
129 early_init_centaur(c);
130 init_intel_cacheinfo(c);
131 detect_num_cpu_cores(c);
132#ifdef CONFIG_X86_32
133 detect_ht(c);
134#endif
135
136 if (c->cpuid_level > 9) {
137 unsigned int eax = cpuid_eax(10);
138
139 /*
140 * Check for version and the number of counters
141 * Version(eax[7:0]) can't be 0;
142 * Counters(eax[15:8]) should be greater than 1;
143 */
144 if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1))
145 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
146 }
147
148 switch (c->x86) {
149#ifdef CONFIG_X86_32
150 case 5:
151 switch (c->x86_model) {
152 case 4:
153 name = "C6";
154 fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
155 fcr_clr = DPDC;
156 pr_notice("Disabling bugged TSC.\n");
157 clear_cpu_cap(c, X86_FEATURE_TSC);
158 break;
159 case 8:
160 switch (c->x86_stepping) {
161 default:
162 name = "2";
163 break;
164 case 7 ... 9:
165 name = "2A";
166 break;
167 case 10 ... 15:
168 name = "2B";
169 break;
170 }
171 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
172 E2MMX|EAMD3D;
173 fcr_clr = DPDC;
174 break;
175 case 9:
176 name = "3";
177 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
178 E2MMX|EAMD3D;
179 fcr_clr = DPDC;
180 break;
181 default:
182 name = "??";
183 }
184
185 rdmsr(MSR_IDT_FCR1, lo, hi);
186 newlo = (lo|fcr_set) & (~fcr_clr);
187
188 if (newlo != lo) {
189 pr_info("Centaur FCR was 0x%X now 0x%X\n",
190 lo, newlo);
191 wrmsr(MSR_IDT_FCR1, newlo, hi);
192 } else {
193 pr_info("Centaur FCR is 0x%X\n", lo);
194 }
195 /* Emulate MTRRs using Centaur's MCR. */
196 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
197 /* Report CX8 */
198 set_cpu_cap(c, X86_FEATURE_CX8);
199 /* Set 3DNow! on Winchip 2 and above. */
200 if (c->x86_model >= 8)
201 set_cpu_cap(c, X86_FEATURE_3DNOW);
202 /* See if we can find out some more. */
203 if (cpuid_eax(0x80000000) >= 0x80000005) {
204 /* Yes, we can. */
205 cpuid(0x80000005, &aa, &bb, &cc, &dd);
206 /* Add L1 data and code cache sizes. */
207 c->x86_cache_size = (cc>>24)+(dd>>24);
208 }
209 sprintf(c->x86_model_id, "WinChip %s", name);
210 break;
211#endif
212 case 6:
213 init_c3(c);
214 break;
215 }
216#ifdef CONFIG_X86_64
217 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
218#endif
219
220 init_ia32_feat_ctl(c);
221}
222
223#ifdef CONFIG_X86_32
224static unsigned int
225centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
226{
227 /* VIA C3 CPUs (670-68F) need further shifting. */
228 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
229 size >>= 8;
230
231 /*
232 * There's also an erratum in Nehemiah stepping 1, which
233 * returns '65KB' instead of '64KB'
234 * - Note, it seems this may only be in engineering samples.
235 */
236 if ((c->x86 == 6) && (c->x86_model == 9) &&
237 (c->x86_stepping == 1) && (size == 65))
238 size -= 1;
239 return size;
240}
241#endif
242
243static const struct cpu_dev centaur_cpu_dev = {
244 .c_vendor = "Centaur",
245 .c_ident = { "CentaurHauls" },
246 .c_early_init = early_init_centaur,
247 .c_init = init_centaur,
248#ifdef CONFIG_X86_32
249 .legacy_cache_size = centaur_size_cache,
250#endif
251 .c_x86_vendor = X86_VENDOR_CENTAUR,
252};
253
254cpu_dev_register(centaur_cpu_dev);