Loading...
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007 rPath, Inc. - All Rights Reserved
5 *
6 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2.
8 *
9 * ----------------------------------------------------------------------- */
10
11/*
12 * Check for obligatory CPU features and abort if the features are not
13 * present. This code should be compilable as 16-, 32- or 64-bit
14 * code, so be very careful with types and inline assembly.
15 *
16 * This code should not contain any messages; that requires an
17 * additional wrapper.
18 *
19 * As written, this code is not safe for inclusion into the kernel
20 * proper (after FPU initialization, in particular).
21 */
22
23#ifdef _SETUP
24# include "boot.h"
25#endif
26#include <linux/types.h>
27#include <asm/processor-flags.h>
28#include <asm/required-features.h>
29#include <asm/msr-index.h>
30
31struct cpu_features cpu;
32static u32 cpu_vendor[3];
33static u32 err_flags[NCAPINTS];
34
35static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
36
37static const u32 req_flags[NCAPINTS] =
38{
39 REQUIRED_MASK0,
40 REQUIRED_MASK1,
41 0, /* REQUIRED_MASK2 not implemented in this file */
42 0, /* REQUIRED_MASK3 not implemented in this file */
43 REQUIRED_MASK4,
44 0, /* REQUIRED_MASK5 not implemented in this file */
45 REQUIRED_MASK6,
46 0, /* REQUIRED_MASK7 not implemented in this file */
47};
48
49#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
50
51static int is_amd(void)
52{
53 return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
54 cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
55 cpu_vendor[2] == A32('c', 'A', 'M', 'D');
56}
57
58static int is_centaur(void)
59{
60 return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
61 cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
62 cpu_vendor[2] == A32('a', 'u', 'l', 's');
63}
64
65static int is_transmeta(void)
66{
67 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
68 cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
69 cpu_vendor[2] == A32('M', 'x', '8', '6');
70}
71
72static int has_fpu(void)
73{
74 u16 fcw = -1, fsw = -1;
75 u32 cr0;
76
77 asm("movl %%cr0,%0" : "=r" (cr0));
78 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
79 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
80 asm volatile("movl %0,%%cr0" : : "r" (cr0));
81 }
82
83 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
84 : "+m" (fsw), "+m" (fcw));
85
86 return fsw == 0 && (fcw & 0x103f) == 0x003f;
87}
88
89static int has_eflag(u32 mask)
90{
91 u32 f0, f1;
92
93 asm("pushfl ; "
94 "pushfl ; "
95 "popl %0 ; "
96 "movl %0,%1 ; "
97 "xorl %2,%1 ; "
98 "pushl %1 ; "
99 "popfl ; "
100 "pushfl ; "
101 "popl %1 ; "
102 "popfl"
103 : "=&r" (f0), "=&r" (f1)
104 : "ri" (mask));
105
106 return !!((f0^f1) & mask);
107}
108
109static void get_flags(void)
110{
111 u32 max_intel_level, max_amd_level;
112 u32 tfms;
113
114 if (has_fpu())
115 set_bit(X86_FEATURE_FPU, cpu.flags);
116
117 if (has_eflag(X86_EFLAGS_ID)) {
118 asm("cpuid"
119 : "=a" (max_intel_level),
120 "=b" (cpu_vendor[0]),
121 "=d" (cpu_vendor[1]),
122 "=c" (cpu_vendor[2])
123 : "a" (0));
124
125 if (max_intel_level >= 0x00000001 &&
126 max_intel_level <= 0x0000ffff) {
127 asm("cpuid"
128 : "=a" (tfms),
129 "=c" (cpu.flags[4]),
130 "=d" (cpu.flags[0])
131 : "a" (0x00000001)
132 : "ebx");
133 cpu.level = (tfms >> 8) & 15;
134 cpu.model = (tfms >> 4) & 15;
135 if (cpu.level >= 6)
136 cpu.model += ((tfms >> 16) & 0xf) << 4;
137 }
138
139 asm("cpuid"
140 : "=a" (max_amd_level)
141 : "a" (0x80000000)
142 : "ebx", "ecx", "edx");
143
144 if (max_amd_level >= 0x80000001 &&
145 max_amd_level <= 0x8000ffff) {
146 u32 eax = 0x80000001;
147 asm("cpuid"
148 : "+a" (eax),
149 "=c" (cpu.flags[6]),
150 "=d" (cpu.flags[1])
151 : : "ebx");
152 }
153 }
154}
155
156/* Returns a bitmask of which words we have error bits in */
157static int check_flags(void)
158{
159 u32 err;
160 int i;
161
162 err = 0;
163 for (i = 0; i < NCAPINTS; i++) {
164 err_flags[i] = req_flags[i] & ~cpu.flags[i];
165 if (err_flags[i])
166 err |= 1 << i;
167 }
168
169 return err;
170}
171
172/*
173 * Returns -1 on error.
174 *
175 * *cpu_level is set to the current CPU level; *req_level to the required
176 * level. x86-64 is considered level 64 for this purpose.
177 *
178 * *err_flags_ptr is set to the flags error array if there are flags missing.
179 */
180int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
181{
182 int err;
183
184 memset(&cpu.flags, 0, sizeof cpu.flags);
185 cpu.level = 3;
186
187 if (has_eflag(X86_EFLAGS_AC))
188 cpu.level = 4;
189
190 get_flags();
191 err = check_flags();
192
193 if (test_bit(X86_FEATURE_LM, cpu.flags))
194 cpu.level = 64;
195
196 if (err == 0x01 &&
197 !(err_flags[0] &
198 ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
199 is_amd()) {
200 /* If this is an AMD and we're only missing SSE+SSE2, try to
201 turn them on */
202
203 u32 ecx = MSR_K7_HWCR;
204 u32 eax, edx;
205
206 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
207 eax &= ~(1 << 15);
208 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
209
210 get_flags(); /* Make sure it really did something */
211 err = check_flags();
212 } else if (err == 0x01 &&
213 !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
214 is_centaur() && cpu.model >= 6) {
215 /* If this is a VIA C3, we might have to enable CX8
216 explicitly */
217
218 u32 ecx = MSR_VIA_FCR;
219 u32 eax, edx;
220
221 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
222 eax |= (1<<1)|(1<<7);
223 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
224
225 set_bit(X86_FEATURE_CX8, cpu.flags);
226 err = check_flags();
227 } else if (err == 0x01 && is_transmeta()) {
228 /* Transmeta might have masked feature bits in word 0 */
229
230 u32 ecx = 0x80860004;
231 u32 eax, edx;
232 u32 level = 1;
233
234 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
235 asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
236 asm("cpuid"
237 : "+a" (level), "=d" (cpu.flags[0])
238 : : "ecx", "ebx");
239 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
240
241 err = check_flags();
242 }
243
244 if (err_flags_ptr)
245 *err_flags_ptr = err ? err_flags : NULL;
246 if (cpu_level_ptr)
247 *cpu_level_ptr = cpu.level;
248 if (req_level_ptr)
249 *req_level_ptr = req_level;
250
251 return (cpu.level < req_level || err) ? -1 : 0;
252}
1// SPDX-License-Identifier: GPL-2.0-only
2/* -*- linux-c -*- ------------------------------------------------------- *
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright 2007 rPath, Inc. - All Rights Reserved
6 *
7 * ----------------------------------------------------------------------- */
8
9/*
10 * Check for obligatory CPU features and abort if the features are not
11 * present. This code should be compilable as 16-, 32- or 64-bit
12 * code, so be very careful with types and inline assembly.
13 *
14 * This code should not contain any messages; that requires an
15 * additional wrapper.
16 *
17 * As written, this code is not safe for inclusion into the kernel
18 * proper (after FPU initialization, in particular).
19 */
20
21#ifdef _SETUP
22# include "boot.h"
23#endif
24#include <linux/types.h>
25#include <asm/intel-family.h>
26#include <asm/processor-flags.h>
27#include <asm/required-features.h>
28#include <asm/msr-index.h>
29#include "string.h"
30#include "msr.h"
31
32static u32 err_flags[NCAPINTS];
33
34static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
35
36static const u32 req_flags[NCAPINTS] =
37{
38 REQUIRED_MASK0,
39 REQUIRED_MASK1,
40 0, /* REQUIRED_MASK2 not implemented in this file */
41 0, /* REQUIRED_MASK3 not implemented in this file */
42 REQUIRED_MASK4,
43 0, /* REQUIRED_MASK5 not implemented in this file */
44 REQUIRED_MASK6,
45 0, /* REQUIRED_MASK7 not implemented in this file */
46 0, /* REQUIRED_MASK8 not implemented in this file */
47 0, /* REQUIRED_MASK9 not implemented in this file */
48 0, /* REQUIRED_MASK10 not implemented in this file */
49 0, /* REQUIRED_MASK11 not implemented in this file */
50 0, /* REQUIRED_MASK12 not implemented in this file */
51 0, /* REQUIRED_MASK13 not implemented in this file */
52 0, /* REQUIRED_MASK14 not implemented in this file */
53 0, /* REQUIRED_MASK15 not implemented in this file */
54 REQUIRED_MASK16,
55};
56
57#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
58
59static int is_amd(void)
60{
61 return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
62 cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
63 cpu_vendor[2] == A32('c', 'A', 'M', 'D');
64}
65
66static int is_centaur(void)
67{
68 return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
69 cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
70 cpu_vendor[2] == A32('a', 'u', 'l', 's');
71}
72
73static int is_transmeta(void)
74{
75 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
76 cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
77 cpu_vendor[2] == A32('M', 'x', '8', '6');
78}
79
80static int is_intel(void)
81{
82 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
83 cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
84 cpu_vendor[2] == A32('n', 't', 'e', 'l');
85}
86
87/* Returns a bitmask of which words we have error bits in */
88static int check_cpuflags(void)
89{
90 u32 err;
91 int i;
92
93 err = 0;
94 for (i = 0; i < NCAPINTS; i++) {
95 err_flags[i] = req_flags[i] & ~cpu.flags[i];
96 if (err_flags[i])
97 err |= 1 << i;
98 }
99
100 return err;
101}
102
103/*
104 * Returns -1 on error.
105 *
106 * *cpu_level is set to the current CPU level; *req_level to the required
107 * level. x86-64 is considered level 64 for this purpose.
108 *
109 * *err_flags_ptr is set to the flags error array if there are flags missing.
110 */
111int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
112{
113 int err;
114
115 memset(&cpu.flags, 0, sizeof(cpu.flags));
116 cpu.level = 3;
117
118 if (has_eflag(X86_EFLAGS_AC))
119 cpu.level = 4;
120
121 get_cpuflags();
122 err = check_cpuflags();
123
124 if (test_bit(X86_FEATURE_LM, cpu.flags))
125 cpu.level = 64;
126
127 if (err == 0x01 &&
128 !(err_flags[0] &
129 ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
130 is_amd()) {
131 /* If this is an AMD and we're only missing SSE+SSE2, try to
132 turn them on */
133
134 struct msr m;
135
136 boot_rdmsr(MSR_K7_HWCR, &m);
137 m.l &= ~(1 << 15);
138 boot_wrmsr(MSR_K7_HWCR, &m);
139
140 get_cpuflags(); /* Make sure it really did something */
141 err = check_cpuflags();
142 } else if (err == 0x01 &&
143 !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
144 is_centaur() && cpu.model >= 6) {
145 /* If this is a VIA C3, we might have to enable CX8
146 explicitly */
147
148 struct msr m;
149
150 boot_rdmsr(MSR_VIA_FCR, &m);
151 m.l |= (1 << 1) | (1 << 7);
152 boot_wrmsr(MSR_VIA_FCR, &m);
153
154 set_bit(X86_FEATURE_CX8, cpu.flags);
155 err = check_cpuflags();
156 } else if (err == 0x01 && is_transmeta()) {
157 /* Transmeta might have masked feature bits in word 0 */
158
159 struct msr m, m_tmp;
160 u32 level = 1;
161
162 boot_rdmsr(0x80860004, &m);
163 m_tmp = m;
164 m_tmp.l = ~0;
165 boot_wrmsr(0x80860004, &m_tmp);
166 asm("cpuid"
167 : "+a" (level), "=d" (cpu.flags[0])
168 : : "ecx", "ebx");
169 boot_wrmsr(0x80860004, &m);
170
171 err = check_cpuflags();
172 } else if (err == 0x01 &&
173 !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
174 is_intel() && cpu.level == 6 &&
175 (cpu.model == 9 || cpu.model == 13)) {
176 /* PAE is disabled on this Pentium M but can be forced */
177 if (cmdline_find_option_bool("forcepae")) {
178 puts("WARNING: Forcing PAE in CPU flags\n");
179 set_bit(X86_FEATURE_PAE, cpu.flags);
180 err = check_cpuflags();
181 }
182 else {
183 puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
184 }
185 }
186 if (!err)
187 err = check_knl_erratum();
188
189 if (err_flags_ptr)
190 *err_flags_ptr = err ? err_flags : NULL;
191 if (cpu_level_ptr)
192 *cpu_level_ptr = cpu.level;
193 if (req_level_ptr)
194 *req_level_ptr = req_level;
195
196 return (cpu.level < req_level || err) ? -1 : 0;
197}
198
199int check_knl_erratum(void)
200{
201 /*
202 * First check for the affected model/family:
203 */
204 if (!is_intel() ||
205 cpu.family != 6 ||
206 cpu.model != INTEL_FAM6_XEON_PHI_KNL)
207 return 0;
208
209 /*
210 * This erratum affects the Accessed/Dirty bits, and can
211 * cause stray bits to be set in !Present PTEs. We have
212 * enough bits in our 64-bit PTEs (which we have on real
213 * 64-bit mode or PAE) to avoid using these troublesome
214 * bits. But, we do not have enough space in our 32-bit
215 * PTEs. So, refuse to run on 32-bit non-PAE kernels.
216 */
217 if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
218 return 0;
219
220 puts("This 32-bit kernel can not run on this Xeon Phi x200\n"
221 "processor due to a processor erratum. Use a 64-bit\n"
222 "kernel, or enable PAE in this 32-bit kernel.\n\n");
223
224 return -1;
225}
226
227