Loading...
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007 rPath, Inc. - All Rights Reserved
5 *
6 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2.
8 *
9 * ----------------------------------------------------------------------- */
10
11/*
12 * Check for obligatory CPU features and abort if the features are not
13 * present. This code should be compilable as 16-, 32- or 64-bit
14 * code, so be very careful with types and inline assembly.
15 *
16 * This code should not contain any messages; that requires an
17 * additional wrapper.
18 *
19 * As written, this code is not safe for inclusion into the kernel
20 * proper (after FPU initialization, in particular).
21 */
22
23#ifdef _SETUP
24# include "boot.h"
25#endif
26#include <linux/types.h>
27#include <asm/processor-flags.h>
28#include <asm/required-features.h>
29#include <asm/msr-index.h>
30
31struct cpu_features cpu;
32static u32 cpu_vendor[3];
33static u32 err_flags[NCAPINTS];
34
35static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
36
37static const u32 req_flags[NCAPINTS] =
38{
39 REQUIRED_MASK0,
40 REQUIRED_MASK1,
41 0, /* REQUIRED_MASK2 not implemented in this file */
42 0, /* REQUIRED_MASK3 not implemented in this file */
43 REQUIRED_MASK4,
44 0, /* REQUIRED_MASK5 not implemented in this file */
45 REQUIRED_MASK6,
46 0, /* REQUIRED_MASK7 not implemented in this file */
47};
48
49#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
50
51static int is_amd(void)
52{
53 return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
54 cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
55 cpu_vendor[2] == A32('c', 'A', 'M', 'D');
56}
57
58static int is_centaur(void)
59{
60 return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
61 cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
62 cpu_vendor[2] == A32('a', 'u', 'l', 's');
63}
64
65static int is_transmeta(void)
66{
67 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
68 cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
69 cpu_vendor[2] == A32('M', 'x', '8', '6');
70}
71
72static int has_fpu(void)
73{
74 u16 fcw = -1, fsw = -1;
75 u32 cr0;
76
77 asm("movl %%cr0,%0" : "=r" (cr0));
78 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
79 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
80 asm volatile("movl %0,%%cr0" : : "r" (cr0));
81 }
82
83 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
84 : "+m" (fsw), "+m" (fcw));
85
86 return fsw == 0 && (fcw & 0x103f) == 0x003f;
87}
88
89static int has_eflag(u32 mask)
90{
91 u32 f0, f1;
92
93 asm("pushfl ; "
94 "pushfl ; "
95 "popl %0 ; "
96 "movl %0,%1 ; "
97 "xorl %2,%1 ; "
98 "pushl %1 ; "
99 "popfl ; "
100 "pushfl ; "
101 "popl %1 ; "
102 "popfl"
103 : "=&r" (f0), "=&r" (f1)
104 : "ri" (mask));
105
106 return !!((f0^f1) & mask);
107}
108
109static void get_flags(void)
110{
111 u32 max_intel_level, max_amd_level;
112 u32 tfms;
113
114 if (has_fpu())
115 set_bit(X86_FEATURE_FPU, cpu.flags);
116
117 if (has_eflag(X86_EFLAGS_ID)) {
118 asm("cpuid"
119 : "=a" (max_intel_level),
120 "=b" (cpu_vendor[0]),
121 "=d" (cpu_vendor[1]),
122 "=c" (cpu_vendor[2])
123 : "a" (0));
124
125 if (max_intel_level >= 0x00000001 &&
126 max_intel_level <= 0x0000ffff) {
127 asm("cpuid"
128 : "=a" (tfms),
129 "=c" (cpu.flags[4]),
130 "=d" (cpu.flags[0])
131 : "a" (0x00000001)
132 : "ebx");
133 cpu.level = (tfms >> 8) & 15;
134 cpu.model = (tfms >> 4) & 15;
135 if (cpu.level >= 6)
136 cpu.model += ((tfms >> 16) & 0xf) << 4;
137 }
138
139 asm("cpuid"
140 : "=a" (max_amd_level)
141 : "a" (0x80000000)
142 : "ebx", "ecx", "edx");
143
144 if (max_amd_level >= 0x80000001 &&
145 max_amd_level <= 0x8000ffff) {
146 u32 eax = 0x80000001;
147 asm("cpuid"
148 : "+a" (eax),
149 "=c" (cpu.flags[6]),
150 "=d" (cpu.flags[1])
151 : : "ebx");
152 }
153 }
154}
155
156/* Returns a bitmask of which words we have error bits in */
157static int check_flags(void)
158{
159 u32 err;
160 int i;
161
162 err = 0;
163 for (i = 0; i < NCAPINTS; i++) {
164 err_flags[i] = req_flags[i] & ~cpu.flags[i];
165 if (err_flags[i])
166 err |= 1 << i;
167 }
168
169 return err;
170}
171
172/*
173 * Returns -1 on error.
174 *
175 * *cpu_level is set to the current CPU level; *req_level to the required
176 * level. x86-64 is considered level 64 for this purpose.
177 *
178 * *err_flags_ptr is set to the flags error array if there are flags missing.
179 */
180int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
181{
182 int err;
183
184 memset(&cpu.flags, 0, sizeof cpu.flags);
185 cpu.level = 3;
186
187 if (has_eflag(X86_EFLAGS_AC))
188 cpu.level = 4;
189
190 get_flags();
191 err = check_flags();
192
193 if (test_bit(X86_FEATURE_LM, cpu.flags))
194 cpu.level = 64;
195
196 if (err == 0x01 &&
197 !(err_flags[0] &
198 ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
199 is_amd()) {
200 /* If this is an AMD and we're only missing SSE+SSE2, try to
201 turn them on */
202
203 u32 ecx = MSR_K7_HWCR;
204 u32 eax, edx;
205
206 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
207 eax &= ~(1 << 15);
208 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
209
210 get_flags(); /* Make sure it really did something */
211 err = check_flags();
212 } else if (err == 0x01 &&
213 !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
214 is_centaur() && cpu.model >= 6) {
215 /* If this is a VIA C3, we might have to enable CX8
216 explicitly */
217
218 u32 ecx = MSR_VIA_FCR;
219 u32 eax, edx;
220
221 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
222 eax |= (1<<1)|(1<<7);
223 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
224
225 set_bit(X86_FEATURE_CX8, cpu.flags);
226 err = check_flags();
227 } else if (err == 0x01 && is_transmeta()) {
228 /* Transmeta might have masked feature bits in word 0 */
229
230 u32 ecx = 0x80860004;
231 u32 eax, edx;
232 u32 level = 1;
233
234 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
235 asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
236 asm("cpuid"
237 : "+a" (level), "=d" (cpu.flags[0])
238 : : "ecx", "ebx");
239 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
240
241 err = check_flags();
242 }
243
244 if (err_flags_ptr)
245 *err_flags_ptr = err ? err_flags : NULL;
246 if (cpu_level_ptr)
247 *cpu_level_ptr = cpu.level;
248 if (req_level_ptr)
249 *req_level_ptr = req_level;
250
251 return (cpu.level < req_level || err) ? -1 : 0;
252}
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007 rPath, Inc. - All Rights Reserved
5 *
6 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2.
8 *
9 * ----------------------------------------------------------------------- */
10
11/*
12 * Check for obligatory CPU features and abort if the features are not
13 * present. This code should be compilable as 16-, 32- or 64-bit
14 * code, so be very careful with types and inline assembly.
15 *
16 * This code should not contain any messages; that requires an
17 * additional wrapper.
18 *
19 * As written, this code is not safe for inclusion into the kernel
20 * proper (after FPU initialization, in particular).
21 */
22
23#ifdef _SETUP
24# include "boot.h"
25#endif
26#include <linux/types.h>
27#include <asm/processor-flags.h>
28#include <asm/required-features.h>
29#include <asm/msr-index.h>
30#include "string.h"
31
32static u32 err_flags[NCAPINTS];
33
34static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
35
36static const u32 req_flags[NCAPINTS] =
37{
38 REQUIRED_MASK0,
39 REQUIRED_MASK1,
40 0, /* REQUIRED_MASK2 not implemented in this file */
41 0, /* REQUIRED_MASK3 not implemented in this file */
42 REQUIRED_MASK4,
43 0, /* REQUIRED_MASK5 not implemented in this file */
44 REQUIRED_MASK6,
45 0, /* REQUIRED_MASK7 not implemented in this file */
46};
47
48#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
49
50static int is_amd(void)
51{
52 return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
53 cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
54 cpu_vendor[2] == A32('c', 'A', 'M', 'D');
55}
56
57static int is_centaur(void)
58{
59 return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
60 cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
61 cpu_vendor[2] == A32('a', 'u', 'l', 's');
62}
63
64static int is_transmeta(void)
65{
66 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
67 cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
68 cpu_vendor[2] == A32('M', 'x', '8', '6');
69}
70
71static int is_intel(void)
72{
73 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
74 cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
75 cpu_vendor[2] == A32('n', 't', 'e', 'l');
76}
77
78/* Returns a bitmask of which words we have error bits in */
79static int check_cpuflags(void)
80{
81 u32 err;
82 int i;
83
84 err = 0;
85 for (i = 0; i < NCAPINTS; i++) {
86 err_flags[i] = req_flags[i] & ~cpu.flags[i];
87 if (err_flags[i])
88 err |= 1 << i;
89 }
90
91 return err;
92}
93
94/*
95 * Returns -1 on error.
96 *
97 * *cpu_level is set to the current CPU level; *req_level to the required
98 * level. x86-64 is considered level 64 for this purpose.
99 *
100 * *err_flags_ptr is set to the flags error array if there are flags missing.
101 */
102int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
103{
104 int err;
105
106 memset(&cpu.flags, 0, sizeof cpu.flags);
107 cpu.level = 3;
108
109 if (has_eflag(X86_EFLAGS_AC))
110 cpu.level = 4;
111
112 get_cpuflags();
113 err = check_cpuflags();
114
115 if (test_bit(X86_FEATURE_LM, cpu.flags))
116 cpu.level = 64;
117
118 if (err == 0x01 &&
119 !(err_flags[0] &
120 ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
121 is_amd()) {
122 /* If this is an AMD and we're only missing SSE+SSE2, try to
123 turn them on */
124
125 u32 ecx = MSR_K7_HWCR;
126 u32 eax, edx;
127
128 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
129 eax &= ~(1 << 15);
130 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
131
132 get_cpuflags(); /* Make sure it really did something */
133 err = check_cpuflags();
134 } else if (err == 0x01 &&
135 !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
136 is_centaur() && cpu.model >= 6) {
137 /* If this is a VIA C3, we might have to enable CX8
138 explicitly */
139
140 u32 ecx = MSR_VIA_FCR;
141 u32 eax, edx;
142
143 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
144 eax |= (1<<1)|(1<<7);
145 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
146
147 set_bit(X86_FEATURE_CX8, cpu.flags);
148 err = check_cpuflags();
149 } else if (err == 0x01 && is_transmeta()) {
150 /* Transmeta might have masked feature bits in word 0 */
151
152 u32 ecx = 0x80860004;
153 u32 eax, edx;
154 u32 level = 1;
155
156 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
157 asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
158 asm("cpuid"
159 : "+a" (level), "=d" (cpu.flags[0])
160 : : "ecx", "ebx");
161 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
162
163 err = check_cpuflags();
164 } else if (err == 0x01 &&
165 !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
166 is_intel() && cpu.level == 6 &&
167 (cpu.model == 9 || cpu.model == 13)) {
168 /* PAE is disabled on this Pentium M but can be forced */
169 if (cmdline_find_option_bool("forcepae")) {
170 puts("WARNING: Forcing PAE in CPU flags\n");
171 set_bit(X86_FEATURE_PAE, cpu.flags);
172 err = check_cpuflags();
173 }
174 else {
175 puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
176 }
177 }
178
179 if (err_flags_ptr)
180 *err_flags_ptr = err ? err_flags : NULL;
181 if (cpu_level_ptr)
182 *cpu_level_ptr = cpu.level;
183 if (req_level_ptr)
184 *req_level_ptr = req_level;
185
186 return (cpu.level < req_level || err) ? -1 : 0;
187}