Loading...
1#include <linux/bitops.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4
5#include <asm/processor.h>
6#include <asm/e820.h>
7#include <asm/mtrr.h>
8#include <asm/msr.h>
9
10#include "cpu.h"
11
12#ifdef CONFIG_X86_OOSTORE
13
14static u32 __cpuinit power2(u32 x)
15{
16 u32 s = 1;
17
18 while (s <= x)
19 s <<= 1;
20
21 return s >>= 1;
22}
23
24
25/*
26 * Set up an actual MCR
27 */
28static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
29{
30 u32 lo, hi;
31
32 hi = base & ~0xFFF;
33 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
34 lo &= ~0xFFF; /* Remove the ctrl value bits */
35 lo |= key; /* Attribute we wish to set */
36 wrmsr(reg+MSR_IDT_MCR0, lo, hi);
37 mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
38}
39
40/*
41 * Figure what we can cover with MCR's
42 *
43 * Shortcut: We know you can't put 4Gig of RAM on a winchip
44 */
45static u32 __cpuinit ramtop(void)
46{
47 u32 clip = 0xFFFFFFFFUL;
48 u32 top = 0;
49 int i;
50
51 for (i = 0; i < e820.nr_map; i++) {
52 unsigned long start, end;
53
54 if (e820.map[i].addr > 0xFFFFFFFFUL)
55 continue;
56 /*
57 * Don't MCR over reserved space. Ignore the ISA hole
58 * we frob around that catastrophe already
59 */
60 if (e820.map[i].type == E820_RESERVED) {
61 if (e820.map[i].addr >= 0x100000UL &&
62 e820.map[i].addr < clip)
63 clip = e820.map[i].addr;
64 continue;
65 }
66 start = e820.map[i].addr;
67 end = e820.map[i].addr + e820.map[i].size;
68 if (start >= end)
69 continue;
70 if (end > top)
71 top = end;
72 }
73 /*
74 * Everything below 'top' should be RAM except for the ISA hole.
75 * Because of the limited MCR's we want to map NV/ACPI into our
76 * MCR range for gunk in RAM
77 *
78 * Clip might cause us to MCR insufficient RAM but that is an
79 * acceptable failure mode and should only bite obscure boxes with
80 * a VESA hole at 15Mb
81 *
82 * The second case Clip sometimes kicks in is when the EBDA is marked
83 * as reserved. Again we fail safe with reasonable results
84 */
85 if (top > clip)
86 top = clip;
87
88 return top;
89}
90
91/*
92 * Compute a set of MCR's to give maximum coverage
93 */
94static int __cpuinit centaur_mcr_compute(int nr, int key)
95{
96 u32 mem = ramtop();
97 u32 root = power2(mem);
98 u32 base = root;
99 u32 top = root;
100 u32 floor = 0;
101 int ct = 0;
102
103 while (ct < nr) {
104 u32 fspace = 0;
105 u32 high;
106 u32 low;
107
108 /*
109 * Find the largest block we will fill going upwards
110 */
111 high = power2(mem-top);
112
113 /*
114 * Find the largest block we will fill going downwards
115 */
116 low = base/2;
117
118 /*
119 * Don't fill below 1Mb going downwards as there
120 * is an ISA hole in the way.
121 */
122 if (base <= 1024*1024)
123 low = 0;
124
125 /*
126 * See how much space we could cover by filling below
127 * the ISA hole
128 */
129
130 if (floor == 0)
131 fspace = 512*1024;
132 else if (floor == 512*1024)
133 fspace = 128*1024;
134
135 /* And forget ROM space */
136
137 /*
138 * Now install the largest coverage we get
139 */
140 if (fspace > high && fspace > low) {
141 centaur_mcr_insert(ct, floor, fspace, key);
142 floor += fspace;
143 } else if (high > low) {
144 centaur_mcr_insert(ct, top, high, key);
145 top += high;
146 } else if (low > 0) {
147 base -= low;
148 centaur_mcr_insert(ct, base, low, key);
149 } else
150 break;
151 ct++;
152 }
153 /*
154 * We loaded ct values. We now need to set the mask. The caller
155 * must do this bit.
156 */
157 return ct;
158}
159
160static void __cpuinit centaur_create_optimal_mcr(void)
161{
162 int used;
163 int i;
164
165 /*
166 * Allocate up to 6 mcrs to mark as much of ram as possible
167 * as write combining and weak write ordered.
168 *
169 * To experiment with: Linux never uses stack operations for
170 * mmio spaces so we could globally enable stack operation wc
171 *
172 * Load the registers with type 31 - full write combining, all
173 * writes weakly ordered.
174 */
175 used = centaur_mcr_compute(6, 31);
176
177 /*
178 * Wipe unused MCRs
179 */
180 for (i = used; i < 8; i++)
181 wrmsr(MSR_IDT_MCR0+i, 0, 0);
182}
183
184static void __cpuinit winchip2_create_optimal_mcr(void)
185{
186 u32 lo, hi;
187 int used;
188 int i;
189
190 /*
191 * Allocate up to 6 mcrs to mark as much of ram as possible
192 * as write combining, weak store ordered.
193 *
194 * Load the registers with type 25
195 * 8 - weak write ordering
196 * 16 - weak read ordering
197 * 1 - write combining
198 */
199 used = centaur_mcr_compute(6, 25);
200
201 /*
202 * Mark the registers we are using.
203 */
204 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
205 for (i = 0; i < used; i++)
206 lo |= 1<<(9+i);
207 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
208
209 /*
210 * Wipe unused MCRs
211 */
212
213 for (i = used; i < 8; i++)
214 wrmsr(MSR_IDT_MCR0+i, 0, 0);
215}
216
217/*
218 * Handle the MCR key on the Winchip 2.
219 */
220static void __cpuinit winchip2_unprotect_mcr(void)
221{
222 u32 lo, hi;
223 u32 key;
224
225 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
226 lo &= ~0x1C0; /* blank bits 8-6 */
227 key = (lo>>17) & 7;
228 lo |= key<<6; /* replace with unlock key */
229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
230}
231
232static void __cpuinit winchip2_protect_mcr(void)
233{
234 u32 lo, hi;
235
236 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
237 lo &= ~0x1C0; /* blank bits 8-6 */
238 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
239}
240#endif /* CONFIG_X86_OOSTORE */
241
242#define ACE_PRESENT (1 << 6)
243#define ACE_ENABLED (1 << 7)
244#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
245
246#define RNG_PRESENT (1 << 2)
247#define RNG_ENABLED (1 << 3)
248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
249
250static void __cpuinit init_c3(struct cpuinfo_x86 *c)
251{
252 u32 lo, hi;
253
254 /* Test for Centaur Extended Feature Flags presence */
255 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
256 u32 tmp = cpuid_edx(0xC0000001);
257
258 /* enable ACE unit, if present and disabled */
259 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
260 rdmsr(MSR_VIA_FCR, lo, hi);
261 lo |= ACE_FCR; /* enable ACE unit */
262 wrmsr(MSR_VIA_FCR, lo, hi);
263 printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
264 }
265
266 /* enable RNG unit, if present and disabled */
267 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
268 rdmsr(MSR_VIA_RNG, lo, hi);
269 lo |= RNG_ENABLE; /* enable RNG unit */
270 wrmsr(MSR_VIA_RNG, lo, hi);
271 printk(KERN_INFO "CPU: Enabled h/w RNG\n");
272 }
273
274 /* store Centaur Extended Feature Flags as
275 * word 5 of the CPU capability bit array
276 */
277 c->x86_capability[5] = cpuid_edx(0xC0000001);
278 }
279#ifdef CONFIG_X86_32
280 /* Cyrix III family needs CX8 & PGE explicitly enabled. */
281 if (c->x86_model >= 6 && c->x86_model <= 13) {
282 rdmsr(MSR_VIA_FCR, lo, hi);
283 lo |= (1<<1 | 1<<7);
284 wrmsr(MSR_VIA_FCR, lo, hi);
285 set_cpu_cap(c, X86_FEATURE_CX8);
286 }
287
288 /* Before Nehemiah, the C3's had 3dNOW! */
289 if (c->x86_model >= 6 && c->x86_model < 9)
290 set_cpu_cap(c, X86_FEATURE_3DNOW);
291#endif
292 if (c->x86 == 0x6 && c->x86_model >= 0xf) {
293 c->x86_cache_alignment = c->x86_clflush_size * 2;
294 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
295 }
296
297 cpu_detect_cache_sizes(c);
298}
299
300enum {
301 ECX8 = 1<<1,
302 EIERRINT = 1<<2,
303 DPM = 1<<3,
304 DMCE = 1<<4,
305 DSTPCLK = 1<<5,
306 ELINEAR = 1<<6,
307 DSMC = 1<<7,
308 DTLOCK = 1<<8,
309 EDCTLB = 1<<8,
310 EMMX = 1<<9,
311 DPDC = 1<<11,
312 EBRPRED = 1<<12,
313 DIC = 1<<13,
314 DDC = 1<<14,
315 DNA = 1<<15,
316 ERETSTK = 1<<16,
317 E2MMX = 1<<19,
318 EAMD3D = 1<<20,
319};
320
321static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
322{
323 switch (c->x86) {
324#ifdef CONFIG_X86_32
325 case 5:
326 /* Emulate MTRRs using Centaur's MCR. */
327 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
328 break;
329#endif
330 case 6:
331 if (c->x86_model >= 0xf)
332 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
333 break;
334 }
335#ifdef CONFIG_X86_64
336 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
337#endif
338}
339
340static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
341{
342#ifdef CONFIG_X86_32
343 char *name;
344 u32 fcr_set = 0;
345 u32 fcr_clr = 0;
346 u32 lo, hi, newlo;
347 u32 aa, bb, cc, dd;
348
349 /*
350 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
351 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
352 */
353 clear_cpu_cap(c, 0*32+31);
354#endif
355 early_init_centaur(c);
356 switch (c->x86) {
357#ifdef CONFIG_X86_32
358 case 5:
359 switch (c->x86_model) {
360 case 4:
361 name = "C6";
362 fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
363 fcr_clr = DPDC;
364 printk(KERN_NOTICE "Disabling bugged TSC.\n");
365 clear_cpu_cap(c, X86_FEATURE_TSC);
366#ifdef CONFIG_X86_OOSTORE
367 centaur_create_optimal_mcr();
368 /*
369 * Enable:
370 * write combining on non-stack, non-string
371 * write combining on string, all types
372 * weak write ordering
373 *
374 * The C6 original lacks weak read order
375 *
376 * Note 0x120 is write only on Winchip 1
377 */
378 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
379#endif
380 break;
381 case 8:
382 switch (c->x86_mask) {
383 default:
384 name = "2";
385 break;
386 case 7 ... 9:
387 name = "2A";
388 break;
389 case 10 ... 15:
390 name = "2B";
391 break;
392 }
393 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
394 E2MMX|EAMD3D;
395 fcr_clr = DPDC;
396#ifdef CONFIG_X86_OOSTORE
397 winchip2_unprotect_mcr();
398 winchip2_create_optimal_mcr();
399 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
400 /*
401 * Enable:
402 * write combining on non-stack, non-string
403 * write combining on string, all types
404 * weak write ordering
405 */
406 lo |= 31;
407 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
408 winchip2_protect_mcr();
409#endif
410 break;
411 case 9:
412 name = "3";
413 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
414 E2MMX|EAMD3D;
415 fcr_clr = DPDC;
416#ifdef CONFIG_X86_OOSTORE
417 winchip2_unprotect_mcr();
418 winchip2_create_optimal_mcr();
419 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
420 /*
421 * Enable:
422 * write combining on non-stack, non-string
423 * write combining on string, all types
424 * weak write ordering
425 */
426 lo |= 31;
427 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
428 winchip2_protect_mcr();
429#endif
430 break;
431 default:
432 name = "??";
433 }
434
435 rdmsr(MSR_IDT_FCR1, lo, hi);
436 newlo = (lo|fcr_set) & (~fcr_clr);
437
438 if (newlo != lo) {
439 printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n",
440 lo, newlo);
441 wrmsr(MSR_IDT_FCR1, newlo, hi);
442 } else {
443 printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
444 }
445 /* Emulate MTRRs using Centaur's MCR. */
446 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
447 /* Report CX8 */
448 set_cpu_cap(c, X86_FEATURE_CX8);
449 /* Set 3DNow! on Winchip 2 and above. */
450 if (c->x86_model >= 8)
451 set_cpu_cap(c, X86_FEATURE_3DNOW);
452 /* See if we can find out some more. */
453 if (cpuid_eax(0x80000000) >= 0x80000005) {
454 /* Yes, we can. */
455 cpuid(0x80000005, &aa, &bb, &cc, &dd);
456 /* Add L1 data and code cache sizes. */
457 c->x86_cache_size = (cc>>24)+(dd>>24);
458 }
459 sprintf(c->x86_model_id, "WinChip %s", name);
460 break;
461#endif
462 case 6:
463 init_c3(c);
464 break;
465 }
466#ifdef CONFIG_X86_64
467 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
468#endif
469}
470
471static unsigned int __cpuinit
472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
473{
474#ifdef CONFIG_X86_32
475 /* VIA C3 CPUs (670-68F) need further shifting. */
476 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
477 size >>= 8;
478
479 /*
480 * There's also an erratum in Nehemiah stepping 1, which
481 * returns '65KB' instead of '64KB'
482 * - Note, it seems this may only be in engineering samples.
483 */
484 if ((c->x86 == 6) && (c->x86_model == 9) &&
485 (c->x86_mask == 1) && (size == 65))
486 size -= 1;
487#endif
488 return size;
489}
490
491static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
492 .c_vendor = "Centaur",
493 .c_ident = { "CentaurHauls" },
494 .c_early_init = early_init_centaur,
495 .c_init = init_centaur,
496 .c_size_cache = centaur_size_cache,
497 .c_x86_vendor = X86_VENDOR_CENTAUR,
498};
499
500cpu_dev_register(centaur_cpu_dev);
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/sched.h>
4#include <linux/sched/clock.h>
5
6#include <asm/cpu.h>
7#include <asm/cpufeature.h>
8#include <asm/e820/api.h>
9#include <asm/mtrr.h>
10#include <asm/msr.h>
11
12#include "cpu.h"
13
14#define ACE_PRESENT (1 << 6)
15#define ACE_ENABLED (1 << 7)
16#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
17
18#define RNG_PRESENT (1 << 2)
19#define RNG_ENABLED (1 << 3)
20#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
21
22static void init_c3(struct cpuinfo_x86 *c)
23{
24 u32 lo, hi;
25
26 /* Test for Centaur Extended Feature Flags presence */
27 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
28 u32 tmp = cpuid_edx(0xC0000001);
29
30 /* enable ACE unit, if present and disabled */
31 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
32 rdmsr(MSR_VIA_FCR, lo, hi);
33 lo |= ACE_FCR; /* enable ACE unit */
34 wrmsr(MSR_VIA_FCR, lo, hi);
35 pr_info("CPU: Enabled ACE h/w crypto\n");
36 }
37
38 /* enable RNG unit, if present and disabled */
39 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
40 rdmsr(MSR_VIA_RNG, lo, hi);
41 lo |= RNG_ENABLE; /* enable RNG unit */
42 wrmsr(MSR_VIA_RNG, lo, hi);
43 pr_info("CPU: Enabled h/w RNG\n");
44 }
45
46 /* store Centaur Extended Feature Flags as
47 * word 5 of the CPU capability bit array
48 */
49 c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
50 }
51#ifdef CONFIG_X86_32
52 /* Cyrix III family needs CX8 & PGE explicitly enabled. */
53 if (c->x86_model >= 6 && c->x86_model <= 13) {
54 rdmsr(MSR_VIA_FCR, lo, hi);
55 lo |= (1<<1 | 1<<7);
56 wrmsr(MSR_VIA_FCR, lo, hi);
57 set_cpu_cap(c, X86_FEATURE_CX8);
58 }
59
60 /* Before Nehemiah, the C3's had 3dNOW! */
61 if (c->x86_model >= 6 && c->x86_model < 9)
62 set_cpu_cap(c, X86_FEATURE_3DNOW);
63#endif
64 if (c->x86 == 0x6 && c->x86_model >= 0xf) {
65 c->x86_cache_alignment = c->x86_clflush_size * 2;
66 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
67 }
68
69 if (c->x86 >= 7)
70 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
71}
72
73enum {
74 ECX8 = 1<<1,
75 EIERRINT = 1<<2,
76 DPM = 1<<3,
77 DMCE = 1<<4,
78 DSTPCLK = 1<<5,
79 ELINEAR = 1<<6,
80 DSMC = 1<<7,
81 DTLOCK = 1<<8,
82 EDCTLB = 1<<8,
83 EMMX = 1<<9,
84 DPDC = 1<<11,
85 EBRPRED = 1<<12,
86 DIC = 1<<13,
87 DDC = 1<<14,
88 DNA = 1<<15,
89 ERETSTK = 1<<16,
90 E2MMX = 1<<19,
91 EAMD3D = 1<<20,
92};
93
94static void early_init_centaur(struct cpuinfo_x86 *c)
95{
96#ifdef CONFIG_X86_32
97 /* Emulate MTRRs using Centaur's MCR. */
98 if (c->x86 == 5)
99 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
100#endif
101 if ((c->x86 == 6 && c->x86_model >= 0xf) ||
102 (c->x86 >= 7))
103 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
104
105#ifdef CONFIG_X86_64
106 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
107#endif
108 if (c->x86_power & (1 << 8)) {
109 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
110 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
111 }
112}
113
114static void init_centaur(struct cpuinfo_x86 *c)
115{
116#ifdef CONFIG_X86_32
117 char *name;
118 u32 fcr_set = 0;
119 u32 fcr_clr = 0;
120 u32 lo, hi, newlo;
121 u32 aa, bb, cc, dd;
122
123 /*
124 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
125 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
126 */
127 clear_cpu_cap(c, 0*32+31);
128#endif
129 early_init_centaur(c);
130 init_intel_cacheinfo(c);
131 detect_num_cpu_cores(c);
132#ifdef CONFIG_X86_32
133 detect_ht(c);
134#endif
135
136 if (c->cpuid_level > 9) {
137 unsigned int eax = cpuid_eax(10);
138
139 /*
140 * Check for version and the number of counters
141 * Version(eax[7:0]) can't be 0;
142 * Counters(eax[15:8]) should be greater than 1;
143 */
144 if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1))
145 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
146 }
147
148#ifdef CONFIG_X86_32
149 if (c->x86 == 5) {
150 switch (c->x86_model) {
151 case 4:
152 name = "C6";
153 fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
154 fcr_clr = DPDC;
155 pr_notice("Disabling bugged TSC.\n");
156 clear_cpu_cap(c, X86_FEATURE_TSC);
157 break;
158 case 8:
159 switch (c->x86_stepping) {
160 default:
161 name = "2";
162 break;
163 case 7 ... 9:
164 name = "2A";
165 break;
166 case 10 ... 15:
167 name = "2B";
168 break;
169 }
170 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
171 E2MMX|EAMD3D;
172 fcr_clr = DPDC;
173 break;
174 case 9:
175 name = "3";
176 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
177 E2MMX|EAMD3D;
178 fcr_clr = DPDC;
179 break;
180 default:
181 name = "??";
182 }
183
184 rdmsr(MSR_IDT_FCR1, lo, hi);
185 newlo = (lo|fcr_set) & (~fcr_clr);
186
187 if (newlo != lo) {
188 pr_info("Centaur FCR was 0x%X now 0x%X\n",
189 lo, newlo);
190 wrmsr(MSR_IDT_FCR1, newlo, hi);
191 } else {
192 pr_info("Centaur FCR is 0x%X\n", lo);
193 }
194 /* Emulate MTRRs using Centaur's MCR. */
195 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
196 /* Report CX8 */
197 set_cpu_cap(c, X86_FEATURE_CX8);
198 /* Set 3DNow! on Winchip 2 and above. */
199 if (c->x86_model >= 8)
200 set_cpu_cap(c, X86_FEATURE_3DNOW);
201 /* See if we can find out some more. */
202 if (cpuid_eax(0x80000000) >= 0x80000005) {
203 /* Yes, we can. */
204 cpuid(0x80000005, &aa, &bb, &cc, &dd);
205 /* Add L1 data and code cache sizes. */
206 c->x86_cache_size = (cc>>24)+(dd>>24);
207 }
208 sprintf(c->x86_model_id, "WinChip %s", name);
209 }
210#endif
211 if (c->x86 == 6 || c->x86 >= 7)
212 init_c3(c);
213#ifdef CONFIG_X86_64
214 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
215#endif
216
217 init_ia32_feat_ctl(c);
218}
219
220#ifdef CONFIG_X86_32
221static unsigned int
222centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
223{
224 /* VIA C3 CPUs (670-68F) need further shifting. */
225 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
226 size >>= 8;
227
228 /*
229 * There's also an erratum in Nehemiah stepping 1, which
230 * returns '65KB' instead of '64KB'
231 * - Note, it seems this may only be in engineering samples.
232 */
233 if ((c->x86 == 6) && (c->x86_model == 9) &&
234 (c->x86_stepping == 1) && (size == 65))
235 size -= 1;
236 return size;
237}
238#endif
239
240static const struct cpu_dev centaur_cpu_dev = {
241 .c_vendor = "Centaur",
242 .c_ident = { "CentaurHauls" },
243 .c_early_init = early_init_centaur,
244 .c_init = init_centaur,
245#ifdef CONFIG_X86_32
246 .legacy_cache_size = centaur_size_cache,
247#endif
248 .c_x86_vendor = X86_VENDOR_CENTAUR,
249};
250
251cpu_dev_register(centaur_cpu_dev);