Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel specific MCE features.
4 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
5 * Copyright (C) 2008, 2009 Intel Corporation
6 * Author: Andi Kleen
7 */
8
9#include <linux/gfp.h>
10#include <linux/interrupt.h>
11#include <linux/percpu.h>
12#include <linux/sched.h>
13#include <linux/cpumask.h>
14#include <asm/apic.h>
15#include <asm/cpufeature.h>
16#include <asm/intel-family.h>
17#include <asm/processor.h>
18#include <asm/msr.h>
19#include <asm/mce.h>
20
21#include "internal.h"
22
23/*
24 * Support for Intel Correct Machine Check Interrupts. This allows
25 * the CPU to raise an interrupt when a corrected machine check happened.
26 * Normally we pick those up using a regular polling timer.
27 * Also supports reliable discovery of shared banks.
28 */
29
30/*
31 * CMCI can be delivered to multiple cpus that share a machine check bank
32 * so we need to designate a single cpu to process errors logged in each bank
33 * in the interrupt handler (otherwise we would have many races and potential
34 * double reporting of the same error).
35 * Note that this can change when a cpu is offlined or brought online since
36 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
37 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
38 * this point, cmci_rediscover() kicks in and a different cpu may end up
39 * taking ownership of some of the shared MCA banks that were previously
40 * owned by the offlined cpu.
41 */
42static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
43
44/*
45 * CMCI storm detection backoff counter
46 *
47 * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've
48 * encountered an error. If not, we decrement it by one. We signal the end of
49 * the CMCI storm when it reaches 0.
50 */
51static DEFINE_PER_CPU(int, cmci_backoff_cnt);
52
53/*
54 * cmci_discover_lock protects against parallel discovery attempts
55 * which could race against each other.
56 */
57static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
58
59#define CMCI_THRESHOLD 1
60#define CMCI_POLL_INTERVAL (30 * HZ)
61#define CMCI_STORM_INTERVAL (HZ)
62#define CMCI_STORM_THRESHOLD 15
63
64static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
65static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
66static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
67
68enum {
69 CMCI_STORM_NONE,
70 CMCI_STORM_ACTIVE,
71 CMCI_STORM_SUBSIDED,
72};
73
74static atomic_t cmci_storm_on_cpus;
75
76static int cmci_supported(int *banks)
77{
78 u64 cap;
79
80 if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
81 return 0;
82
83 /*
84 * Vendor check is not strictly needed, but the initial
85 * initialization is vendor keyed and this
86 * makes sure none of the backdoors are entered otherwise.
87 */
88 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
89 return 0;
90 if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
91 return 0;
92 rdmsrl(MSR_IA32_MCG_CAP, cap);
93 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
94 return !!(cap & MCG_CMCI_P);
95}
96
97static bool lmce_supported(void)
98{
99 u64 tmp;
100
101 if (mca_cfg.lmce_disabled)
102 return false;
103
104 rdmsrl(MSR_IA32_MCG_CAP, tmp);
105
106 /*
107 * LMCE depends on recovery support in the processor. Hence both
108 * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP.
109 */
110 if ((tmp & (MCG_SER_P | MCG_LMCE_P)) !=
111 (MCG_SER_P | MCG_LMCE_P))
112 return false;
113
114 /*
115 * BIOS should indicate support for LMCE by setting bit 20 in
116 * IA32_FEATURE_CONTROL without which touching MCG_EXT_CTL will
117 * generate a #GP fault.
118 */
119 rdmsrl(MSR_IA32_FEATURE_CONTROL, tmp);
120 if ((tmp & (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE)) ==
121 (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE))
122 return true;
123
124 return false;
125}
126
127bool mce_intel_cmci_poll(void)
128{
129 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
130 return false;
131
132 /*
133 * Reset the counter if we've logged an error in the last poll
134 * during the storm.
135 */
136 if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)))
137 this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
138 else
139 this_cpu_dec(cmci_backoff_cnt);
140
141 return true;
142}
143
144void mce_intel_hcpu_update(unsigned long cpu)
145{
146 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
147 atomic_dec(&cmci_storm_on_cpus);
148
149 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
150}
151
152static void cmci_toggle_interrupt_mode(bool on)
153{
154 unsigned long flags, *owned;
155 int bank;
156 u64 val;
157
158 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
159 owned = this_cpu_ptr(mce_banks_owned);
160 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
161 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
162
163 if (on)
164 val |= MCI_CTL2_CMCI_EN;
165 else
166 val &= ~MCI_CTL2_CMCI_EN;
167
168 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
169 }
170 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
171}
172
173unsigned long cmci_intel_adjust_timer(unsigned long interval)
174{
175 if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
176 (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) {
177 mce_notify_irq();
178 return CMCI_STORM_INTERVAL;
179 }
180
181 switch (__this_cpu_read(cmci_storm_state)) {
182 case CMCI_STORM_ACTIVE:
183
184 /*
185 * We switch back to interrupt mode once the poll timer has
186 * silenced itself. That means no events recorded and the timer
187 * interval is back to our poll interval.
188 */
189 __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
190 if (!atomic_sub_return(1, &cmci_storm_on_cpus))
191 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
192
193 /* FALLTHROUGH */
194
195 case CMCI_STORM_SUBSIDED:
196 /*
197 * We wait for all CPUs to go back to SUBSIDED state. When that
198 * happens we switch back to interrupt mode.
199 */
200 if (!atomic_read(&cmci_storm_on_cpus)) {
201 __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
202 cmci_toggle_interrupt_mode(true);
203 cmci_recheck();
204 }
205 return CMCI_POLL_INTERVAL;
206 default:
207
208 /* We have shiny weather. Let the poll do whatever it thinks. */
209 return interval;
210 }
211}
212
213static bool cmci_storm_detect(void)
214{
215 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
216 unsigned long ts = __this_cpu_read(cmci_time_stamp);
217 unsigned long now = jiffies;
218 int r;
219
220 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
221 return true;
222
223 if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
224 cnt++;
225 } else {
226 cnt = 1;
227 __this_cpu_write(cmci_time_stamp, now);
228 }
229 __this_cpu_write(cmci_storm_cnt, cnt);
230
231 if (cnt <= CMCI_STORM_THRESHOLD)
232 return false;
233
234 cmci_toggle_interrupt_mode(false);
235 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
236 r = atomic_add_return(1, &cmci_storm_on_cpus);
237 mce_timer_kick(CMCI_STORM_INTERVAL);
238 this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
239
240 if (r == 1)
241 pr_notice("CMCI storm detected: switching to poll mode\n");
242 return true;
243}
244
245/*
246 * The interrupt handler. This is called on every event.
247 * Just call the poller directly to log any events.
248 * This could in theory increase the threshold under high load,
249 * but doesn't for now.
250 */
251static void intel_threshold_interrupt(void)
252{
253 if (cmci_storm_detect())
254 return;
255
256 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
257}
258
259/*
260 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
261 * on this CPU. Use the algorithm recommended in the SDM to discover shared
262 * banks.
263 */
264static void cmci_discover(int banks)
265{
266 unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
267 unsigned long flags;
268 int i;
269 int bios_wrong_thresh = 0;
270
271 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
272 for (i = 0; i < banks; i++) {
273 u64 val;
274 int bios_zero_thresh = 0;
275
276 if (test_bit(i, owned))
277 continue;
278
279 /* Skip banks in firmware first mode */
280 if (test_bit(i, mce_banks_ce_disabled))
281 continue;
282
283 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
284
285 /* Already owned by someone else? */
286 if (val & MCI_CTL2_CMCI_EN) {
287 clear_bit(i, owned);
288 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
289 continue;
290 }
291
292 if (!mca_cfg.bios_cmci_threshold) {
293 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
294 val |= CMCI_THRESHOLD;
295 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
296 /*
297 * If bios_cmci_threshold boot option was specified
298 * but the threshold is zero, we'll try to initialize
299 * it to 1.
300 */
301 bios_zero_thresh = 1;
302 val |= CMCI_THRESHOLD;
303 }
304
305 val |= MCI_CTL2_CMCI_EN;
306 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
307 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
308
309 /* Did the enable bit stick? -- the bank supports CMCI */
310 if (val & MCI_CTL2_CMCI_EN) {
311 set_bit(i, owned);
312 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
313 /*
314 * We are able to set thresholds for some banks that
315 * had a threshold of 0. This means the BIOS has not
316 * set the thresholds properly or does not work with
317 * this boot option. Note down now and report later.
318 */
319 if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
320 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
321 bios_wrong_thresh = 1;
322 } else {
323 WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
324 }
325 }
326 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
327 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
328 pr_info_once(
329 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
330 pr_info_once(
331 "bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
332 }
333}
334
335/*
336 * Just in case we missed an event during initialization check
337 * all the CMCI owned banks.
338 */
339void cmci_recheck(void)
340{
341 unsigned long flags;
342 int banks;
343
344 if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
345 return;
346
347 local_irq_save(flags);
348 machine_check_poll(0, this_cpu_ptr(&mce_banks_owned));
349 local_irq_restore(flags);
350}
351
352/* Caller must hold the lock on cmci_discover_lock */
353static void __cmci_disable_bank(int bank)
354{
355 u64 val;
356
357 if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
358 return;
359 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
360 val &= ~MCI_CTL2_CMCI_EN;
361 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
362 __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
363}
364
365/*
366 * Disable CMCI on this CPU for all banks it owns when it goes down.
367 * This allows other CPUs to claim the banks on rediscovery.
368 */
369void cmci_clear(void)
370{
371 unsigned long flags;
372 int i;
373 int banks;
374
375 if (!cmci_supported(&banks))
376 return;
377 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
378 for (i = 0; i < banks; i++)
379 __cmci_disable_bank(i);
380 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
381}
382
383static void cmci_rediscover_work_func(void *arg)
384{
385 int banks;
386
387 /* Recheck banks in case CPUs don't all have the same */
388 if (cmci_supported(&banks))
389 cmci_discover(banks);
390}
391
392/* After a CPU went down cycle through all the others and rediscover */
393void cmci_rediscover(void)
394{
395 int banks;
396
397 if (!cmci_supported(&banks))
398 return;
399
400 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
401}
402
403/*
404 * Reenable CMCI on this CPU in case a CPU down failed.
405 */
406void cmci_reenable(void)
407{
408 int banks;
409 if (cmci_supported(&banks))
410 cmci_discover(banks);
411}
412
413void cmci_disable_bank(int bank)
414{
415 int banks;
416 unsigned long flags;
417
418 if (!cmci_supported(&banks))
419 return;
420
421 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
422 __cmci_disable_bank(bank);
423 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
424}
425
426static void intel_init_cmci(void)
427{
428 int banks;
429
430 if (!cmci_supported(&banks))
431 return;
432
433 mce_threshold_vector = intel_threshold_interrupt;
434 cmci_discover(banks);
435 /*
436 * For CPU #0 this runs with still disabled APIC, but that's
437 * ok because only the vector is set up. We still do another
438 * check for the banks later for CPU #0 just to make sure
439 * to not miss any events.
440 */
441 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
442 cmci_recheck();
443}
444
445static void intel_init_lmce(void)
446{
447 u64 val;
448
449 if (!lmce_supported())
450 return;
451
452 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
453
454 if (!(val & MCG_EXT_CTL_LMCE_EN))
455 wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
456}
457
458static void intel_clear_lmce(void)
459{
460 u64 val;
461
462 if (!lmce_supported())
463 return;
464
465 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
466 val &= ~MCG_EXT_CTL_LMCE_EN;
467 wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
468}
469
470static void intel_ppin_init(struct cpuinfo_x86 *c)
471{
472 unsigned long long val;
473
474 /*
475 * Even if testing the presence of the MSR would be enough, we don't
476 * want to risk the situation where other models reuse this MSR for
477 * other purposes.
478 */
479 switch (c->x86_model) {
480 case INTEL_FAM6_IVYBRIDGE_X:
481 case INTEL_FAM6_HASWELL_X:
482 case INTEL_FAM6_BROADWELL_D:
483 case INTEL_FAM6_BROADWELL_X:
484 case INTEL_FAM6_SKYLAKE_X:
485 case INTEL_FAM6_XEON_PHI_KNL:
486 case INTEL_FAM6_XEON_PHI_KNM:
487
488 if (rdmsrl_safe(MSR_PPIN_CTL, &val))
489 return;
490
491 if ((val & 3UL) == 1UL) {
492 /* PPIN available but disabled: */
493 return;
494 }
495
496 /* If PPIN is disabled, but not locked, try to enable: */
497 if (!(val & 3UL)) {
498 wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
499 rdmsrl_safe(MSR_PPIN_CTL, &val);
500 }
501
502 if ((val & 3UL) == 2UL)
503 set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
504 }
505}
506
507void mce_intel_feature_init(struct cpuinfo_x86 *c)
508{
509 intel_init_thermal(c);
510 intel_init_cmci();
511 intel_init_lmce();
512 intel_ppin_init(c);
513}
514
515void mce_intel_feature_clear(struct cpuinfo_x86 *c)
516{
517 intel_clear_lmce();
518}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel specific MCE features.
4 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
5 * Copyright (C) 2008, 2009 Intel Corporation
6 * Author: Andi Kleen
7 */
8
9#include <linux/gfp.h>
10#include <linux/interrupt.h>
11#include <linux/percpu.h>
12#include <linux/sched.h>
13#include <linux/cpumask.h>
14#include <asm/apic.h>
15#include <asm/cpufeature.h>
16#include <asm/intel-family.h>
17#include <asm/processor.h>
18#include <asm/msr.h>
19#include <asm/mce.h>
20
21#include "internal.h"
22
23/*
24 * Support for Intel Correct Machine Check Interrupts. This allows
25 * the CPU to raise an interrupt when a corrected machine check happened.
26 * Normally we pick those up using a regular polling timer.
27 * Also supports reliable discovery of shared banks.
28 */
29
30/*
31 * CMCI can be delivered to multiple cpus that share a machine check bank
32 * so we need to designate a single cpu to process errors logged in each bank
33 * in the interrupt handler (otherwise we would have many races and potential
34 * double reporting of the same error).
35 * Note that this can change when a cpu is offlined or brought online since
36 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
37 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
38 * this point, cmci_rediscover() kicks in and a different cpu may end up
39 * taking ownership of some of the shared MCA banks that were previously
40 * owned by the offlined cpu.
41 */
42static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
43
44/*
45 * cmci_discover_lock protects against parallel discovery attempts
46 * which could race against each other.
47 */
48static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
49
50/*
51 * On systems that do support CMCI but it's disabled, polling for MCEs can
52 * cause the same event to be reported multiple times because IA32_MCi_STATUS
53 * is shared by the same package.
54 */
55static DEFINE_SPINLOCK(cmci_poll_lock);
56
57/* Linux non-storm CMCI threshold (may be overridden by BIOS) */
58#define CMCI_THRESHOLD 1
59
60/*
61 * MCi_CTL2 threshold for each bank when there is no storm.
62 * Default value for each bank may have been set by BIOS.
63 */
64static u16 cmci_threshold[MAX_NR_BANKS];
65
66/*
67 * High threshold to limit CMCI rate during storms. Max supported is
68 * 0x7FFF. Use this slightly smaller value so it has a distinctive
69 * signature when some asks "Why am I not seeing all corrected errors?"
70 * A high threshold is used instead of just disabling CMCI for a
71 * bank because both corrected and uncorrected errors may be logged
72 * in the same bank and signalled with CMCI. The threshold only applies
73 * to corrected errors, so keeping CMCI enabled means that uncorrected
74 * errors will still be processed in a timely fashion.
75 */
76#define CMCI_STORM_THRESHOLD 32749
77
78static int cmci_supported(int *banks)
79{
80 u64 cap;
81
82 if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
83 return 0;
84
85 /*
86 * Vendor check is not strictly needed, but the initial
87 * initialization is vendor keyed and this
88 * makes sure none of the backdoors are entered otherwise.
89 */
90 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
91 boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
92 return 0;
93
94 if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
95 return 0;
96 rdmsrl(MSR_IA32_MCG_CAP, cap);
97 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
98 return !!(cap & MCG_CMCI_P);
99}
100
101static bool lmce_supported(void)
102{
103 u64 tmp;
104
105 if (mca_cfg.lmce_disabled)
106 return false;
107
108 rdmsrl(MSR_IA32_MCG_CAP, tmp);
109
110 /*
111 * LMCE depends on recovery support in the processor. Hence both
112 * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP.
113 */
114 if ((tmp & (MCG_SER_P | MCG_LMCE_P)) !=
115 (MCG_SER_P | MCG_LMCE_P))
116 return false;
117
118 /*
119 * BIOS should indicate support for LMCE by setting bit 20 in
120 * IA32_FEAT_CTL without which touching MCG_EXT_CTL will generate a #GP
121 * fault. The MSR must also be locked for LMCE_ENABLED to take effect.
122 * WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally
123 * locks the MSR in the event that it wasn't already locked by BIOS.
124 */
125 rdmsrl(MSR_IA32_FEAT_CTL, tmp);
126 if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED)))
127 return false;
128
129 return tmp & FEAT_CTL_LMCE_ENABLED;
130}
131
132/*
133 * Set a new CMCI threshold value. Preserve the state of the
134 * MCI_CTL2_CMCI_EN bit in case this happens during a
135 * cmci_rediscover() operation.
136 */
137static void cmci_set_threshold(int bank, int thresh)
138{
139 unsigned long flags;
140 u64 val;
141
142 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
143 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
144 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
145 wrmsrl(MSR_IA32_MCx_CTL2(bank), val | thresh);
146 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
147}
148
149void mce_intel_handle_storm(int bank, bool on)
150{
151 if (on)
152 cmci_set_threshold(bank, CMCI_STORM_THRESHOLD);
153 else
154 cmci_set_threshold(bank, cmci_threshold[bank]);
155}
156
157/*
158 * The interrupt handler. This is called on every event.
159 * Just call the poller directly to log any events.
160 * This could in theory increase the threshold under high load,
161 * but doesn't for now.
162 */
163static void intel_threshold_interrupt(void)
164{
165 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
166}
167
168/*
169 * Check all the reasons why current CPU cannot claim
170 * ownership of a bank.
171 * 1: CPU already owns this bank
172 * 2: BIOS owns this bank
173 * 3: Some other CPU owns this bank
174 */
175static bool cmci_skip_bank(int bank, u64 *val)
176{
177 unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
178
179 if (test_bit(bank, owned))
180 return true;
181
182 /* Skip banks in firmware first mode */
183 if (test_bit(bank, mce_banks_ce_disabled))
184 return true;
185
186 rdmsrl(MSR_IA32_MCx_CTL2(bank), *val);
187
188 /* Already owned by someone else? */
189 if (*val & MCI_CTL2_CMCI_EN) {
190 clear_bit(bank, owned);
191 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
192 return true;
193 }
194
195 return false;
196}
197
198/*
199 * Decide which CMCI interrupt threshold to use:
200 * 1: If this bank is in storm mode from whichever CPU was
201 * the previous owner, stay in storm mode.
202 * 2: If ignoring any threshold set by BIOS, set Linux default
203 * 3: Try to honor BIOS threshold (unless buggy BIOS set it at zero).
204 */
205static u64 cmci_pick_threshold(u64 val, int *bios_zero_thresh)
206{
207 if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD)
208 return val;
209
210 if (!mca_cfg.bios_cmci_threshold) {
211 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
212 val |= CMCI_THRESHOLD;
213 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
214 /*
215 * If bios_cmci_threshold boot option was specified
216 * but the threshold is zero, we'll try to initialize
217 * it to 1.
218 */
219 *bios_zero_thresh = 1;
220 val |= CMCI_THRESHOLD;
221 }
222
223 return val;
224}
225
226/*
227 * Try to claim ownership of a bank.
228 */
229static void cmci_claim_bank(int bank, u64 val, int bios_zero_thresh, int *bios_wrong_thresh)
230{
231 struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
232
233 val |= MCI_CTL2_CMCI_EN;
234 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
235 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
236
237 /* If the enable bit did not stick, this bank should be polled. */
238 if (!(val & MCI_CTL2_CMCI_EN)) {
239 WARN_ON(!test_bit(bank, this_cpu_ptr(mce_poll_banks)));
240 storm->banks[bank].poll_only = true;
241 return;
242 }
243
244 /* This CPU successfully set the enable bit. */
245 set_bit(bank, (void *)this_cpu_ptr(&mce_banks_owned));
246
247 if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD) {
248 pr_notice("CPU%d BANK%d CMCI inherited storm\n", smp_processor_id(), bank);
249 mce_inherit_storm(bank);
250 cmci_storm_begin(bank);
251 } else {
252 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
253 }
254
255 /*
256 * We are able to set thresholds for some banks that
257 * had a threshold of 0. This means the BIOS has not
258 * set the thresholds properly or does not work with
259 * this boot option. Note down now and report later.
260 */
261 if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
262 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
263 *bios_wrong_thresh = 1;
264
265 /* Save default threshold for each bank */
266 if (cmci_threshold[bank] == 0)
267 cmci_threshold[bank] = val & MCI_CTL2_CMCI_THRESHOLD_MASK;
268}
269
270/*
271 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
272 * on this CPU. Use the algorithm recommended in the SDM to discover shared
273 * banks. Called during initial bootstrap, and also for hotplug CPU operations
274 * to rediscover/reassign machine check banks.
275 */
276static void cmci_discover(int banks)
277{
278 int bios_wrong_thresh = 0;
279 unsigned long flags;
280 int i;
281
282 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
283 for (i = 0; i < banks; i++) {
284 u64 val;
285 int bios_zero_thresh = 0;
286
287 if (cmci_skip_bank(i, &val))
288 continue;
289
290 val = cmci_pick_threshold(val, &bios_zero_thresh);
291 cmci_claim_bank(i, val, bios_zero_thresh, &bios_wrong_thresh);
292 }
293 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
294 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
295 pr_info_once(
296 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
297 pr_info_once(
298 "bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
299 }
300}
301
302/*
303 * Just in case we missed an event during initialization check
304 * all the CMCI owned banks.
305 */
306void cmci_recheck(void)
307{
308 unsigned long flags;
309 int banks;
310
311 if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
312 return;
313
314 local_irq_save(flags);
315 machine_check_poll(0, this_cpu_ptr(&mce_banks_owned));
316 local_irq_restore(flags);
317}
318
319/* Caller must hold the lock on cmci_discover_lock */
320static void __cmci_disable_bank(int bank)
321{
322 u64 val;
323
324 if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
325 return;
326 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
327 val &= ~MCI_CTL2_CMCI_EN;
328 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
329 __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
330
331 if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD)
332 cmci_storm_end(bank);
333}
334
335/*
336 * Disable CMCI on this CPU for all banks it owns when it goes down.
337 * This allows other CPUs to claim the banks on rediscovery.
338 */
339void cmci_clear(void)
340{
341 unsigned long flags;
342 int i;
343 int banks;
344
345 if (!cmci_supported(&banks))
346 return;
347 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
348 for (i = 0; i < banks; i++)
349 __cmci_disable_bank(i);
350 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
351}
352
353static void cmci_rediscover_work_func(void *arg)
354{
355 int banks;
356
357 /* Recheck banks in case CPUs don't all have the same */
358 if (cmci_supported(&banks))
359 cmci_discover(banks);
360}
361
362/* After a CPU went down cycle through all the others and rediscover */
363void cmci_rediscover(void)
364{
365 int banks;
366
367 if (!cmci_supported(&banks))
368 return;
369
370 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
371}
372
373/*
374 * Reenable CMCI on this CPU in case a CPU down failed.
375 */
376void cmci_reenable(void)
377{
378 int banks;
379 if (cmci_supported(&banks))
380 cmci_discover(banks);
381}
382
383void cmci_disable_bank(int bank)
384{
385 int banks;
386 unsigned long flags;
387
388 if (!cmci_supported(&banks))
389 return;
390
391 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
392 __cmci_disable_bank(bank);
393 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
394}
395
396/* Bank polling function when CMCI is disabled. */
397static void cmci_mc_poll_banks(void)
398{
399 spin_lock(&cmci_poll_lock);
400 machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
401 spin_unlock(&cmci_poll_lock);
402}
403
404void intel_init_cmci(void)
405{
406 int banks;
407
408 if (!cmci_supported(&banks)) {
409 mc_poll_banks = cmci_mc_poll_banks;
410 return;
411 }
412
413 mce_threshold_vector = intel_threshold_interrupt;
414 cmci_discover(banks);
415 /*
416 * For CPU #0 this runs with still disabled APIC, but that's
417 * ok because only the vector is set up. We still do another
418 * check for the banks later for CPU #0 just to make sure
419 * to not miss any events.
420 */
421 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
422 cmci_recheck();
423}
424
425void intel_init_lmce(void)
426{
427 u64 val;
428
429 if (!lmce_supported())
430 return;
431
432 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
433
434 if (!(val & MCG_EXT_CTL_LMCE_EN))
435 wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
436}
437
438void intel_clear_lmce(void)
439{
440 u64 val;
441
442 if (!lmce_supported())
443 return;
444
445 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
446 val &= ~MCG_EXT_CTL_LMCE_EN;
447 wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
448}
449
450/*
451 * Enable additional error logs from the integrated
452 * memory controller on processors that support this.
453 */
454static void intel_imc_init(struct cpuinfo_x86 *c)
455{
456 u64 error_control;
457
458 switch (c->x86_model) {
459 case INTEL_FAM6_SANDYBRIDGE_X:
460 case INTEL_FAM6_IVYBRIDGE_X:
461 case INTEL_FAM6_HASWELL_X:
462 if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control))
463 return;
464 error_control |= 2;
465 wrmsrl_safe(MSR_ERROR_CONTROL, error_control);
466 break;
467 }
468}
469
470void mce_intel_feature_init(struct cpuinfo_x86 *c)
471{
472 intel_init_cmci();
473 intel_init_lmce();
474 intel_imc_init(c);
475}
476
477void mce_intel_feature_clear(struct cpuinfo_x86 *c)
478{
479 intel_clear_lmce();
480}
481
482bool intel_filter_mce(struct mce *m)
483{
484 struct cpuinfo_x86 *c = &boot_cpu_data;
485
486 /* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */
487 if ((c->x86 == 6) &&
488 ((c->x86_model == INTEL_FAM6_HASWELL) ||
489 (c->x86_model == INTEL_FAM6_HASWELL_L) ||
490 (c->x86_model == INTEL_FAM6_BROADWELL) ||
491 (c->x86_model == INTEL_FAM6_HASWELL_G) ||
492 (c->x86_model == INTEL_FAM6_SKYLAKE_X)) &&
493 (m->bank == 0) &&
494 ((m->status & 0xa0000000ffffffff) == 0x80000000000f0005))
495 return true;
496
497 return false;
498}
499
500/*
501 * Check if the address reported by the CPU is in a format we can parse.
502 * It would be possible to add code for most other cases, but all would
503 * be somewhat complicated (e.g. segment offset would require an instruction
504 * parser). So only support physical addresses up to page granularity for now.
505 */
506bool intel_mce_usable_address(struct mce *m)
507{
508 if (!(m->status & MCI_STATUS_MISCV))
509 return false;
510
511 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
512 return false;
513
514 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
515 return false;
516
517 return true;
518}