Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Machine check handler.
   4 *
   5 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
   6 * Rest from unknown author(s).
   7 * 2004 Andi Kleen. Rewrote most of it.
   8 * Copyright 2008 Intel Corporation
   9 * Author: Andi Kleen
  10 */
  11
  12#include <linux/thread_info.h>
  13#include <linux/capability.h>
  14#include <linux/miscdevice.h>
  15#include <linux/ratelimit.h>
  16#include <linux/rcupdate.h>
  17#include <linux/kobject.h>
  18#include <linux/uaccess.h>
  19#include <linux/kdebug.h>
  20#include <linux/kernel.h>
  21#include <linux/percpu.h>
  22#include <linux/string.h>
  23#include <linux/device.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/delay.h>
  26#include <linux/ctype.h>
  27#include <linux/sched.h>
  28#include <linux/sysfs.h>
  29#include <linux/types.h>
  30#include <linux/slab.h>
  31#include <linux/init.h>
  32#include <linux/kmod.h>
  33#include <linux/poll.h>
  34#include <linux/nmi.h>
  35#include <linux/cpu.h>
  36#include <linux/ras.h>
  37#include <linux/smp.h>
  38#include <linux/fs.h>
  39#include <linux/mm.h>
  40#include <linux/debugfs.h>
  41#include <linux/irq_work.h>
  42#include <linux/export.h>
  43#include <linux/jump_label.h>
  44#include <linux/set_memory.h>
 
 
 
 
  45
  46#include <asm/intel-family.h>
  47#include <asm/processor.h>
  48#include <asm/traps.h>
  49#include <asm/tlbflush.h>
  50#include <asm/mce.h>
  51#include <asm/msr.h>
  52#include <asm/reboot.h>
 
  53
  54#include "internal.h"
  55
  56static DEFINE_MUTEX(mce_log_mutex);
  57
  58/* sysfs synchronization */
  59static DEFINE_MUTEX(mce_sysfs_mutex);
  60
  61#define CREATE_TRACE_POINTS
  62#include <trace/events/mce.h>
  63
  64#define SPINUNIT		100	/* 100ns */
  65
  66DEFINE_PER_CPU(unsigned, mce_exception_count);
  67
  68DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
  69
  70struct mce_bank {
  71	u64			ctl;			/* subevents to enable */
  72	bool			init;			/* initialise bank? */
  73};
  74static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
  75
  76#define ATTR_LEN               16
  77/* One object for each MCE bank, shared by all CPUs */
  78struct mce_bank_dev {
  79	struct device_attribute	attr;			/* device attribute */
  80	char			attrname[ATTR_LEN];	/* attribute name */
  81	u8			bank;			/* bank number */
  82};
  83static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS];
  84
  85struct mce_vendor_flags mce_flags __read_mostly;
  86
  87struct mca_config mca_cfg __read_mostly = {
  88	.bootlog  = -1,
  89	/*
  90	 * Tolerant levels:
  91	 * 0: always panic on uncorrected errors, log corrected errors
  92	 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  93	 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
  94	 * 3: never panic or SIGBUS, log all errors (for testing only)
  95	 */
  96	.tolerant = 1,
  97	.monarch_timeout = -1
  98};
  99
 100static DEFINE_PER_CPU(struct mce, mces_seen);
 101static unsigned long mce_need_notify;
 102static int cpu_missing;
 103
 104/*
 105 * MCA banks polled by the period polling timer for corrected events.
 106 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 107 */
 108DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
 109	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
 110};
 111
 112/*
 113 * MCA banks controlled through firmware first for corrected errors.
 114 * This is a global list of banks for which we won't enable CMCI and we
 115 * won't poll. Firmware controls these banks and is responsible for
 116 * reporting corrected errors through GHES. Uncorrected/recoverable
 117 * errors are still notified through a machine check.
 118 */
 119mce_banks_t mce_banks_ce_disabled;
 120
 121static struct work_struct mce_work;
 122static struct irq_work mce_irq_work;
 123
 124static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
 125
 126/*
 127 * CPU/chipset specific EDAC code can register a notifier call here to print
 128 * MCE errors in a human-readable form.
 129 */
 130BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
 131
 132/* Do initial initialization of a struct mce */
 133void mce_setup(struct mce *m)
 134{
 135	memset(m, 0, sizeof(struct mce));
 136	m->cpu = m->extcpu = smp_processor_id();
 137	/* need the internal __ version to avoid deadlocks */
 138	m->time = __ktime_get_real_seconds();
 139	m->cpuvendor = boot_cpu_data.x86_vendor;
 140	m->cpuid = cpuid_eax(1);
 141	m->socketid = cpu_data(m->extcpu).phys_proc_id;
 142	m->apicid = cpu_data(m->extcpu).initial_apicid;
 143	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
 144
 145	if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
 146		rdmsrl(MSR_PPIN, m->ppin);
 147
 148	m->microcode = boot_cpu_data.microcode;
 149}
 150
 151DEFINE_PER_CPU(struct mce, injectm);
 152EXPORT_PER_CPU_SYMBOL_GPL(injectm);
 153
 154void mce_log(struct mce *m)
 155{
 156	if (!mce_gen_pool_add(m))
 157		irq_work_queue(&mce_irq_work);
 158}
 159
 160void mce_inject_log(struct mce *m)
 161{
 162	mutex_lock(&mce_log_mutex);
 163	mce_log(m);
 164	mutex_unlock(&mce_log_mutex);
 165}
 166EXPORT_SYMBOL_GPL(mce_inject_log);
 167
 168static struct notifier_block mce_srao_nb;
 169
 170/*
 171 * We run the default notifier if we have only the SRAO, the first and the
 172 * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
 173 * notifiers registered on the chain.
 174 */
 175#define NUM_DEFAULT_NOTIFIERS	3
 176static atomic_t num_notifiers;
 177
 178void mce_register_decode_chain(struct notifier_block *nb)
 179{
 180	if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC))
 
 181		return;
 182
 183	atomic_inc(&num_notifiers);
 184
 185	blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
 186}
 187EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 188
 189void mce_unregister_decode_chain(struct notifier_block *nb)
 190{
 191	atomic_dec(&num_notifiers);
 192
 193	blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
 194}
 195EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 196
 197static inline u32 ctl_reg(int bank)
 198{
 199	return MSR_IA32_MCx_CTL(bank);
 200}
 201
 202static inline u32 status_reg(int bank)
 203{
 204	return MSR_IA32_MCx_STATUS(bank);
 205}
 206
 207static inline u32 addr_reg(int bank)
 208{
 209	return MSR_IA32_MCx_ADDR(bank);
 210}
 211
 212static inline u32 misc_reg(int bank)
 213{
 214	return MSR_IA32_MCx_MISC(bank);
 215}
 216
 217static inline u32 smca_ctl_reg(int bank)
 218{
 219	return MSR_AMD64_SMCA_MCx_CTL(bank);
 220}
 221
 222static inline u32 smca_status_reg(int bank)
 223{
 224	return MSR_AMD64_SMCA_MCx_STATUS(bank);
 225}
 226
 227static inline u32 smca_addr_reg(int bank)
 228{
 229	return MSR_AMD64_SMCA_MCx_ADDR(bank);
 230}
 231
 232static inline u32 smca_misc_reg(int bank)
 233{
 234	return MSR_AMD64_SMCA_MCx_MISC(bank);
 235}
 236
 237struct mca_msr_regs msr_ops = {
 238	.ctl	= ctl_reg,
 239	.status	= status_reg,
 240	.addr	= addr_reg,
 241	.misc	= misc_reg
 242};
 243
 244static void __print_mce(struct mce *m)
 245{
 246	pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
 247		 m->extcpu,
 248		 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
 249		 m->mcgstatus, m->bank, m->status);
 250
 251	if (m->ip) {
 252		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
 253			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
 254			m->cs, m->ip);
 255
 256		if (m->cs == __KERNEL_CS)
 257			pr_cont("{%pS}", (void *)(unsigned long)m->ip);
 258		pr_cont("\n");
 259	}
 260
 261	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
 262	if (m->addr)
 263		pr_cont("ADDR %llx ", m->addr);
 264	if (m->misc)
 265		pr_cont("MISC %llx ", m->misc);
 
 
 266
 267	if (mce_flags.smca) {
 268		if (m->synd)
 269			pr_cont("SYND %llx ", m->synd);
 270		if (m->ipid)
 271			pr_cont("IPID %llx ", m->ipid);
 272	}
 273
 274	pr_cont("\n");
 
 275	/*
 276	 * Note this output is parsed by external tools and old fields
 277	 * should not be changed.
 278	 */
 279	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
 280		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
 281		m->microcode);
 282}
 283
 284static void print_mce(struct mce *m)
 285{
 286	__print_mce(m);
 287
 288	if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
 289		pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 290}
 291
 292#define PANIC_TIMEOUT 5 /* 5 seconds */
 293
 294static atomic_t mce_panicked;
 295
 296static int fake_panic;
 297static atomic_t mce_fake_panicked;
 298
 299/* Panic in progress. Enable interrupts and wait for final IPI */
 300static void wait_for_panic(void)
 301{
 302	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
 303
 304	preempt_disable();
 305	local_irq_enable();
 306	while (timeout-- > 0)
 307		udelay(1);
 308	if (panic_timeout == 0)
 309		panic_timeout = mca_cfg.panic_timeout;
 310	panic("Panicing machine check CPU died");
 311}
 312
 313static void mce_panic(const char *msg, struct mce *final, char *exp)
 
 
 
 
 
 
 
 
 314{
 315	int apei_err = 0;
 316	struct llist_node *pending;
 317	struct mce_evt_llist *l;
 
 
 
 
 
 
 
 
 318
 319	if (!fake_panic) {
 320		/*
 321		 * Make sure only one CPU runs in machine check panic
 322		 */
 323		if (atomic_inc_return(&mce_panicked) > 1)
 324			wait_for_panic();
 325		barrier();
 326
 327		bust_spinlocks(1);
 328		console_verbose();
 329	} else {
 330		/* Don't log too much for fake panic */
 331		if (atomic_inc_return(&mce_fake_panicked) > 1)
 332			return;
 333	}
 334	pending = mce_gen_pool_prepare_records();
 335	/* First print corrected ones that are still unlogged */
 336	llist_for_each_entry(l, pending, llnode) {
 337		struct mce *m = &l->mce;
 338		if (!(m->status & MCI_STATUS_UC)) {
 339			print_mce(m);
 340			if (!apei_err)
 341				apei_err = apei_write_mce(m);
 342		}
 343	}
 344	/* Now print uncorrected but with the final one last */
 345	llist_for_each_entry(l, pending, llnode) {
 346		struct mce *m = &l->mce;
 347		if (!(m->status & MCI_STATUS_UC))
 348			continue;
 349		if (!final || mce_cmp(m, final)) {
 350			print_mce(m);
 351			if (!apei_err)
 352				apei_err = apei_write_mce(m);
 353		}
 354	}
 355	if (final) {
 356		print_mce(final);
 357		if (!apei_err)
 358			apei_err = apei_write_mce(final);
 359	}
 360	if (cpu_missing)
 361		pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
 362	if (exp)
 363		pr_emerg(HW_ERR "Machine check: %s\n", exp);
 
 
 
 
 
 364	if (!fake_panic) {
 365		if (panic_timeout == 0)
 366			panic_timeout = mca_cfg.panic_timeout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367		panic(msg);
 368	} else
 369		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
 
 
 
 370}
 371
 372/* Support code for software error injection */
 373
 374static int msr_to_offset(u32 msr)
 375{
 376	unsigned bank = __this_cpu_read(injectm.bank);
 377
 378	if (msr == mca_cfg.rip_msr)
 379		return offsetof(struct mce, ip);
 380	if (msr == msr_ops.status(bank))
 381		return offsetof(struct mce, status);
 382	if (msr == msr_ops.addr(bank))
 383		return offsetof(struct mce, addr);
 384	if (msr == msr_ops.misc(bank))
 385		return offsetof(struct mce, misc);
 386	if (msr == MSR_IA32_MCG_STATUS)
 387		return offsetof(struct mce, mcgstatus);
 388	return -1;
 389}
 390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 391/* MSR access wrappers used for error injection */
 392static u64 mce_rdmsrl(u32 msr)
 393{
 394	u64 v;
 395
 396	if (__this_cpu_read(injectm.finished)) {
 397		int offset = msr_to_offset(msr);
 
 398
 
 
 
 399		if (offset < 0)
 400			return 0;
 401		return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
 402	}
 403
 404	if (rdmsrl_safe(msr, &v)) {
 405		WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
 406		/*
 407		 * Return zero in case the access faulted. This should
 408		 * not happen normally but can happen if the CPU does
 409		 * something weird, or if the code is buggy.
 410		 */
 411		v = 0;
 412	}
 413
 414	return v;
 
 
 
 
 
 
 
 
 
 
 
 415}
 416
 417static void mce_wrmsrl(u32 msr, u64 v)
 418{
 
 
 419	if (__this_cpu_read(injectm.finished)) {
 420		int offset = msr_to_offset(msr);
 
 
 421
 
 422		if (offset >= 0)
 423			*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
 
 
 
 424		return;
 425	}
 426	wrmsrl(msr, v);
 
 
 
 
 
 
 
 
 427}
 428
 429/*
 430 * Collect all global (w.r.t. this processor) status about this machine
 431 * check into our "mce" struct so that we can use it later to assess
 432 * the severity of the problem as we read per-bank specific details.
 433 */
 434static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
 435{
 
 
 
 
 
 436	mce_setup(m);
 
 437
 438	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
 439	if (regs) {
 440		/*
 441		 * Get the address of the instruction at the time of
 442		 * the machine check error.
 443		 */
 444		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
 445			m->ip = regs->ip;
 446			m->cs = regs->cs;
 447
 448			/*
 449			 * When in VM86 mode make the cs look like ring 3
 450			 * always. This is a lie, but it's better than passing
 451			 * the additional vm86 bit around everywhere.
 452			 */
 453			if (v8086_mode(regs))
 454				m->cs |= 3;
 455		}
 456		/* Use accurate RIP reporting if available. */
 457		if (mca_cfg.rip_msr)
 458			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
 459	}
 460}
 461
 462int mce_available(struct cpuinfo_x86 *c)
 463{
 464	if (mca_cfg.disabled)
 465		return 0;
 466	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
 467}
 468
 469static void mce_schedule_work(void)
 470{
 471	if (!mce_gen_pool_empty())
 472		schedule_work(&mce_work);
 473}
 474
 475static void mce_irq_work_cb(struct irq_work *entry)
 476{
 477	mce_schedule_work();
 478}
 479
 480/*
 481 * Check if the address reported by the CPU is in a format we can parse.
 482 * It would be possible to add code for most other cases, but all would
 483 * be somewhat complicated (e.g. segment offset would require an instruction
 484 * parser). So only support physical addresses up to page granuality for now.
 485 */
 486int mce_usable_address(struct mce *m)
 487{
 488	if (!(m->status & MCI_STATUS_ADDRV))
 489		return 0;
 490
 491	/* Checks after this one are Intel-specific: */
 492	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 493		return 1;
 494
 495	if (!(m->status & MCI_STATUS_MISCV))
 496		return 0;
 497
 498	if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
 499		return 0;
 
 500
 501	if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
 502		return 0;
 
 503
 504	return 1;
 
 
 505}
 506EXPORT_SYMBOL_GPL(mce_usable_address);
 507
 508bool mce_is_memory_error(struct mce *m)
 509{
 510	if (m->cpuvendor == X86_VENDOR_AMD ||
 511	    m->cpuvendor == X86_VENDOR_HYGON) {
 
 512		return amd_mce_is_memory_error(m);
 513	} else if (m->cpuvendor == X86_VENDOR_INTEL) {
 
 
 514		/*
 515		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
 516		 *
 517		 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
 518		 * indicating a memory error. Bit 8 is used for indicating a
 519		 * cache hierarchy error. The combination of bit 2 and bit 3
 520		 * is used for indicating a `generic' cache hierarchy error
 521		 * But we can't just blindly check the above bits, because if
 522		 * bit 11 is set, then it is a bus/interconnect error - and
 523		 * either way the above bits just gives more detail on what
 524		 * bus/interconnect error happened. Note that bit 12 can be
 525		 * ignored, as it's the "filter" bit.
 526		 */
 527		return (m->status & 0xef80) == BIT(7) ||
 528		       (m->status & 0xef00) == BIT(8) ||
 529		       (m->status & 0xeffc) == 0xc;
 530	}
 531
 532	return false;
 
 
 533}
 534EXPORT_SYMBOL_GPL(mce_is_memory_error);
 535
 
 
 
 
 
 
 
 
 536bool mce_is_correctable(struct mce *m)
 537{
 538	if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
 539		return false;
 540
 541	if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
 542		return false;
 543
 544	if (m->status & MCI_STATUS_UC)
 545		return false;
 546
 547	return true;
 548}
 549EXPORT_SYMBOL_GPL(mce_is_correctable);
 550
 551static bool cec_add_mce(struct mce *m)
 552{
 553	if (!m)
 554		return false;
 555
 556	/* We eat only correctable DRAM errors with usable addresses. */
 557	if (mce_is_memory_error(m) &&
 558	    mce_is_correctable(m)  &&
 559	    mce_usable_address(m))
 560		if (!cec_add_elem(m->addr >> PAGE_SHIFT))
 561			return true;
 562
 563	return false;
 564}
 565
 566static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
 567			      void *data)
 568{
 569	struct mce *m = (struct mce *)data;
 570
 571	if (!m)
 572		return NOTIFY_DONE;
 573
 574	if (cec_add_mce(m))
 575		return NOTIFY_STOP;
 576
 577	/* Emit the trace record: */
 578	trace_mce_record(m);
 579
 580	set_bit(0, &mce_need_notify);
 581
 582	mce_notify_irq();
 583
 584	return NOTIFY_DONE;
 585}
 586
 587static struct notifier_block first_nb = {
 588	.notifier_call	= mce_first_notifier,
 589	.priority	= MCE_PRIO_FIRST,
 590};
 591
 592static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
 593				void *data)
 594{
 595	struct mce *mce = (struct mce *)data;
 596	unsigned long pfn;
 597
 598	if (!mce)
 
 
 
 
 599		return NOTIFY_DONE;
 600
 601	if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
 602		pfn = mce->addr >> PAGE_SHIFT;
 603		if (!memory_failure(pfn, 0))
 604			set_mce_nospec(pfn);
 605	}
 606
 607	return NOTIFY_OK;
 608}
 609static struct notifier_block mce_srao_nb = {
 610	.notifier_call	= srao_decode_notifier,
 611	.priority	= MCE_PRIO_SRAO,
 
 612};
 613
 614static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
 615				void *data)
 616{
 617	struct mce *m = (struct mce *)data;
 618
 619	if (!m)
 620		return NOTIFY_DONE;
 621
 622	if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS)
 623		return NOTIFY_DONE;
 624
 625	__print_mce(m);
 626
 627	return NOTIFY_DONE;
 628}
 629
 630static struct notifier_block mce_default_nb = {
 631	.notifier_call	= mce_default_notifier,
 632	/* lowest prio, we want it to run last. */
 633	.priority	= MCE_PRIO_LOWEST,
 634};
 635
 636/*
 637 * Read ADDR and MISC registers.
 638 */
 639static void mce_read_aux(struct mce *m, int i)
 640{
 641	if (m->status & MCI_STATUS_MISCV)
 642		m->misc = mce_rdmsrl(msr_ops.misc(i));
 643
 644	if (m->status & MCI_STATUS_ADDRV) {
 645		m->addr = mce_rdmsrl(msr_ops.addr(i));
 646
 647		/*
 648		 * Mask the reported address by the reported granularity.
 649		 */
 650		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
 651			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
 652			m->addr >>= shift;
 653			m->addr <<= shift;
 654		}
 655
 656		/*
 657		 * Extract [55:<lsb>] where lsb is the least significant
 658		 * *valid* bit of the address bits.
 659		 */
 660		if (mce_flags.smca) {
 661			u8 lsb = (m->addr >> 56) & 0x3f;
 662
 663			m->addr &= GENMASK_ULL(55, lsb);
 664		}
 665	}
 666
 667	if (mce_flags.smca) {
 668		m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
 669
 670		if (m->status & MCI_STATUS_SYNDV)
 671			m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
 672	}
 673}
 674
 675DEFINE_PER_CPU(unsigned, mce_poll_count);
 676
 677/*
 678 * Poll for corrected events or events that happened before reset.
 679 * Those are just logged through /dev/mcelog.
 680 *
 681 * This is executed in standard interrupt context.
 682 *
 683 * Note: spec recommends to panic for fatal unsignalled
 684 * errors here. However this would be quite problematic --
 685 * we would need to reimplement the Monarch handling and
 686 * it would mess up the exclusion between exception handler
 687 * and poll handler -- * so we skip this for now.
 688 * These cases should not happen anyways, or only when the CPU
 689 * is already totally * confused. In this case it's likely it will
 690 * not fully execute the machine check handler either.
 691 */
 692bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 693{
 694	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
 695	bool error_seen = false;
 696	struct mce m;
 697	int i;
 698
 699	this_cpu_inc(mce_poll_count);
 700
 701	mce_gather_info(&m, NULL);
 702
 703	if (flags & MCP_TIMESTAMP)
 704		m.tsc = rdtsc();
 705
 706	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 707		if (!mce_banks[i].ctl || !test_bit(i, *b))
 708			continue;
 709
 710		m.misc = 0;
 711		m.addr = 0;
 712		m.bank = i;
 713
 714		barrier();
 715		m.status = mce_rdmsrl(msr_ops.status(i));
 
 
 
 
 
 
 
 
 
 
 716
 717		/* If this entry is not valid, ignore it */
 718		if (!(m.status & MCI_STATUS_VAL))
 719			continue;
 720
 721		/*
 722		 * If we are logging everything (at CPU online) or this
 723		 * is a corrected error, then we must log it.
 724		 */
 725		if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
 726			goto log_it;
 727
 728		/*
 729		 * Newer Intel systems that support software error
 730		 * recovery need to make additional checks. Other
 731		 * CPUs should skip over uncorrected errors, but log
 732		 * everything else.
 733		 */
 734		if (!mca_cfg.ser) {
 735			if (m.status & MCI_STATUS_UC)
 736				continue;
 737			goto log_it;
 738		}
 739
 740		/* Log "not enabled" (speculative) errors */
 741		if (!(m.status & MCI_STATUS_EN))
 742			goto log_it;
 743
 744		/*
 745		 * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
 746		 * UC == 1 && PCC == 0 && S == 0
 747		 */
 748		if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
 749			goto log_it;
 750
 751		/*
 752		 * Skip anything else. Presumption is that our read of this
 753		 * bank is racing with a machine check. Leave the log alone
 754		 * for do_machine_check() to deal with it.
 755		 */
 756		continue;
 757
 758log_it:
 759		error_seen = true;
 760
 761		mce_read_aux(&m, i);
 762
 763		m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
 764
 
 
 765		/*
 766		 * Don't get the IP here because it's unlikely to
 767		 * have anything to do with the actual error location.
 768		 */
 769		if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
 
 
 
 
 
 
 770			mce_log(&m);
 771		else if (mce_usable_address(&m)) {
 772			/*
 773			 * Although we skipped logging this, we still want
 774			 * to take action. Add to the pool so the registered
 775			 * notifiers will see it.
 776			 */
 777			if (!mce_gen_pool_add(&m))
 778				mce_schedule_work();
 779		}
 780
 
 781		/*
 782		 * Clear state for this bank.
 783		 */
 784		mce_wrmsrl(msr_ops.status(i), 0);
 785	}
 786
 787	/*
 788	 * Don't clear MCG_STATUS here because it's only defined for
 789	 * exceptions.
 790	 */
 791
 792	sync_core();
 793
 794	return error_seen;
 795}
 796EXPORT_SYMBOL_GPL(machine_check_poll);
 797
 798/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799 * Do a quick check if any of the events requires a panic.
 800 * This decides if we keep the events around or clear them.
 801 */
 802static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
 803			  struct pt_regs *regs)
 804{
 805	char *tmp;
 806	int i;
 807
 808	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 809		m->status = mce_rdmsrl(msr_ops.status(i));
 810		if (!(m->status & MCI_STATUS_VAL))
 811			continue;
 812
 813		__set_bit(i, validp);
 814		if (quirk_no_way_out)
 815			quirk_no_way_out(i, m, regs);
 816
 817		if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
 818			m->bank = i;
 
 
 
 819			mce_read_aux(m, i);
 820			*msg = tmp;
 821			return 1;
 822		}
 823	}
 824	return 0;
 825}
 826
 827/*
 828 * Variable to establish order between CPUs while scanning.
 829 * Each CPU spins initially until executing is equal its number.
 830 */
 831static atomic_t mce_executing;
 832
 833/*
 834 * Defines order of CPUs on entry. First CPU becomes Monarch.
 835 */
 836static atomic_t mce_callin;
 837
 838/*
 
 
 
 
 
 
 839 * Check if a timeout waiting for other CPUs happened.
 840 */
 841static int mce_timed_out(u64 *t, const char *msg)
 842{
 
 
 
 
 
 843	/*
 844	 * The others already did panic for some reason.
 845	 * Bail out like in a timeout.
 846	 * rmb() to tell the compiler that system_state
 847	 * might have been modified by someone else.
 848	 */
 849	rmb();
 850	if (atomic_read(&mce_panicked))
 851		wait_for_panic();
 852	if (!mca_cfg.monarch_timeout)
 853		goto out;
 854	if ((s64)*t < SPINUNIT) {
 855		if (mca_cfg.tolerant <= 1)
 856			mce_panic(msg, NULL, NULL);
 857		cpu_missing = 1;
 858		return 1;
 
 
 
 859	}
 860	*t -= SPINUNIT;
 
 861out:
 862	touch_nmi_watchdog();
 863	return 0;
 
 
 
 864}
 865
 866/*
 867 * The Monarch's reign.  The Monarch is the CPU who entered
 868 * the machine check handler first. It waits for the others to
 869 * raise the exception too and then grades them. When any
 870 * error is fatal panic. Only then let the others continue.
 871 *
 872 * The other CPUs entering the MCE handler will be controlled by the
 873 * Monarch. They are called Subjects.
 874 *
 875 * This way we prevent any potential data corruption in a unrecoverable case
 876 * and also makes sure always all CPU's errors are examined.
 877 *
 878 * Also this detects the case of a machine check event coming from outer
 879 * space (not detected by any CPUs) In this case some external agent wants
 880 * us to shut down, so panic too.
 881 *
 882 * The other CPUs might still decide to panic if the handler happens
 883 * in a unrecoverable place, but in this case the system is in a semi-stable
 884 * state and won't corrupt anything by itself. It's ok to let the others
 885 * continue for a bit first.
 886 *
 887 * All the spin loops have timeouts; when a timeout happens a CPU
 888 * typically elects itself to be Monarch.
 889 */
 890static void mce_reign(void)
 891{
 892	int cpu;
 893	struct mce *m = NULL;
 894	int global_worst = 0;
 895	char *msg = NULL;
 896	char *nmsg = NULL;
 897
 898	/*
 899	 * This CPU is the Monarch and the other CPUs have run
 900	 * through their handlers.
 901	 * Grade the severity of the errors of all the CPUs.
 902	 */
 903	for_each_possible_cpu(cpu) {
 904		int severity = mce_severity(&per_cpu(mces_seen, cpu),
 905					    mca_cfg.tolerant,
 906					    &nmsg, true);
 907		if (severity > global_worst) {
 908			msg = nmsg;
 909			global_worst = severity;
 910			m = &per_cpu(mces_seen, cpu);
 911		}
 912	}
 913
 914	/*
 915	 * Cannot recover? Panic here then.
 916	 * This dumps all the mces in the log buffer and stops the
 917	 * other CPUs.
 918	 */
 919	if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
 
 
 920		mce_panic("Fatal machine check", m, msg);
 
 921
 922	/*
 923	 * For UC somewhere we let the CPU who detects it handle it.
 924	 * Also must let continue the others, otherwise the handling
 925	 * CPU could deadlock on a lock.
 926	 */
 927
 928	/*
 929	 * No machine check event found. Must be some external
 930	 * source or one CPU is hung. Panic.
 931	 */
 932	if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
 933		mce_panic("Fatal machine check from unknown source", NULL, NULL);
 934
 935	/*
 936	 * Now clear all the mces_seen so that they don't reappear on
 937	 * the next mce.
 938	 */
 939	for_each_possible_cpu(cpu)
 940		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
 941}
 942
 943static atomic_t global_nwo;
 944
 945/*
 946 * Start of Monarch synchronization. This waits until all CPUs have
 947 * entered the exception handler and then determines if any of them
 948 * saw a fatal event that requires panic. Then it executes them
 949 * in the entry order.
 950 * TBD double check parallel CPU hotunplug
 951 */
 952static int mce_start(int *no_way_out)
 953{
 954	int order;
 955	int cpus = num_online_cpus();
 956	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
 
 957
 958	if (!timeout)
 959		return -1;
 960
 961	atomic_add(*no_way_out, &global_nwo);
 962	/*
 963	 * Rely on the implied barrier below, such that global_nwo
 964	 * is updated before mce_callin.
 965	 */
 966	order = atomic_inc_return(&mce_callin);
 
 
 
 
 967
 968	/*
 969	 * Wait for everyone.
 970	 */
 971	while (atomic_read(&mce_callin) != cpus) {
 972		if (mce_timed_out(&timeout,
 973				  "Timeout: Not all CPUs entered broadcast exception handler")) {
 974			atomic_set(&global_nwo, 0);
 975			return -1;
 976		}
 977		ndelay(SPINUNIT);
 978	}
 979
 980	/*
 981	 * mce_callin should be read before global_nwo
 982	 */
 983	smp_rmb();
 984
 985	if (order == 1) {
 986		/*
 987		 * Monarch: Starts executing now, the others wait.
 988		 */
 989		atomic_set(&mce_executing, 1);
 990	} else {
 991		/*
 992		 * Subject: Now start the scanning loop one by one in
 993		 * the original callin order.
 994		 * This way when there are any shared banks it will be
 995		 * only seen by one CPU before cleared, avoiding duplicates.
 996		 */
 997		while (atomic_read(&mce_executing) < order) {
 998			if (mce_timed_out(&timeout,
 999					  "Timeout: Subject CPUs unable to finish machine check processing")) {
1000				atomic_set(&global_nwo, 0);
1001				return -1;
1002			}
1003			ndelay(SPINUNIT);
1004		}
1005	}
1006
1007	/*
1008	 * Cache the global no_way_out state.
1009	 */
1010	*no_way_out = atomic_read(&global_nwo);
 
 
1011
1012	return order;
 
 
 
1013}
1014
1015/*
1016 * Synchronize between CPUs after main scanning loop.
1017 * This invokes the bulk of the Monarch processing.
1018 */
1019static int mce_end(int order)
1020{
1021	int ret = -1;
1022	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
 
 
 
 
1023
1024	if (!timeout)
1025		goto reset;
1026	if (order < 0)
1027		goto reset;
1028
1029	/*
1030	 * Allow others to run.
1031	 */
1032	atomic_inc(&mce_executing);
1033
1034	if (order == 1) {
1035		/* CHECKME: Can this race with a parallel hotplug? */
1036		int cpus = num_online_cpus();
1037
1038		/*
1039		 * Monarch: Wait for everyone to go through their scanning
1040		 * loops.
1041		 */
1042		while (atomic_read(&mce_executing) <= cpus) {
1043			if (mce_timed_out(&timeout,
1044					  "Timeout: Monarch CPU unable to finish machine check processing"))
1045				goto reset;
1046			ndelay(SPINUNIT);
1047		}
1048
1049		mce_reign();
1050		barrier();
1051		ret = 0;
1052	} else {
1053		/*
1054		 * Subject: Wait for Monarch to finish.
1055		 */
1056		while (atomic_read(&mce_executing) != 0) {
1057			if (mce_timed_out(&timeout,
1058					  "Timeout: Monarch CPU did not finish machine check processing"))
1059				goto reset;
1060			ndelay(SPINUNIT);
1061		}
1062
1063		/*
1064		 * Don't reset anything. That's done by the Monarch.
1065		 */
1066		return 0;
 
1067	}
1068
1069	/*
1070	 * Reset all global state.
1071	 */
1072reset:
1073	atomic_set(&global_nwo, 0);
1074	atomic_set(&mce_callin, 0);
 
1075	barrier();
1076
1077	/*
1078	 * Let others run again.
1079	 */
1080	atomic_set(&mce_executing, 0);
 
 
 
 
1081	return ret;
1082}
1083
1084static void mce_clear_state(unsigned long *toclear)
1085{
1086	int i;
1087
1088	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1089		if (test_bit(i, toclear))
1090			mce_wrmsrl(msr_ops.status(i), 0);
1091	}
1092}
1093
1094static int do_memory_failure(struct mce *m)
1095{
1096	int flags = MF_ACTION_REQUIRED;
1097	int ret;
1098
1099	pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
1100	if (!(m->mcgstatus & MCG_STATUS_RIPV))
1101		flags |= MF_MUST_KILL;
1102	ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
1103	if (ret)
1104		pr_err("Memory error not recovered");
1105	else
1106		set_mce_nospec(m->addr >> PAGE_SHIFT);
1107	return ret;
1108}
1109
1110
1111/*
1112 * Cases where we avoid rendezvous handler timeout:
1113 * 1) If this CPU is offline.
1114 *
1115 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1116 *  skip those CPUs which remain looping in the 1st kernel - see
1117 *  crash_nmi_callback().
1118 *
1119 * Note: there still is a small window between kexec-ing and the new,
1120 * kdump kernel establishing a new #MC handler where a broadcasted MCE
1121 * might not get handled properly.
1122 */
1123static bool __mc_check_crashing_cpu(int cpu)
1124{
1125	if (cpu_is_offline(cpu) ||
 
 
1126	    (crashing_cpu != -1 && crashing_cpu != cpu)) {
1127		u64 mcgstatus;
1128
1129		mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
 
 
 
 
 
 
1130		if (mcgstatus & MCG_STATUS_RIPV) {
1131			mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1132			return true;
1133		}
1134	}
1135	return false;
1136}
1137
1138static void __mc_scan_banks(struct mce *m, struct mce *final,
1139			    unsigned long *toclear, unsigned long *valid_banks,
1140			    int no_way_out, int *worst)
 
1141{
1142	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1143	struct mca_config *cfg = &mca_cfg;
1144	int severity, i;
1145
1146	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1147		__clear_bit(i, toclear);
1148		if (!test_bit(i, valid_banks))
1149			continue;
1150
1151		if (!mce_banks[i].ctl)
1152			continue;
1153
1154		m->misc = 0;
1155		m->addr = 0;
1156		m->bank = i;
1157
1158		m->status = mce_rdmsrl(msr_ops.status(i));
1159		if (!(m->status & MCI_STATUS_VAL))
1160			continue;
1161
1162		/*
1163		 * Corrected or non-signaled errors are handled by
1164		 * machine_check_poll(). Leave them alone, unless this panics.
1165		 */
1166		if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1167			!no_way_out)
1168			continue;
1169
1170		/* Set taint even when machine check was not enabled. */
1171		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1172
1173		severity = mce_severity(m, cfg->tolerant, NULL, true);
1174
1175		/*
1176		 * When machine check was for corrected/deferred handler don't
1177		 * touch, unless we're panicking.
1178		 */
1179		if ((severity == MCE_KEEP_SEVERITY ||
1180		     severity == MCE_UCNA_SEVERITY) && !no_way_out)
1181			continue;
1182
1183		__set_bit(i, toclear);
1184
1185		/* Machine check event was not enabled. Clear, but ignore. */
1186		if (severity == MCE_NO_SEVERITY)
1187			continue;
1188
1189		mce_read_aux(m, i);
1190
1191		/* assuming valid severity level != 0 */
1192		m->severity = severity;
1193
 
 
 
 
 
1194		mce_log(m);
 
1195
1196		if (severity > *worst) {
1197			*final = *m;
1198			*worst = severity;
1199		}
1200	}
1201
1202	/* mce_clear_state will clear *final, save locally for use later */
1203	*m = *final;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1204}
1205
1206/*
1207 * The actual machine check handler. This only handles real
1208 * exceptions when something got corrupted coming in through int 18.
1209 *
1210 * This is executed in NMI context not subject to normal locking rules. This
1211 * implies that most kernel services cannot be safely used. Don't even
1212 * think about putting a printk in there!
1213 *
1214 * On Intel systems this is entered on all CPUs in parallel through
1215 * MCE broadcast. However some CPUs might be broken beyond repair,
1216 * so be always careful when synchronizing with others.
1217 */
1218void do_machine_check(struct pt_regs *regs, long error_code)
1219{
1220	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1221	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1222	struct mca_config *cfg = &mca_cfg;
1223	int cpu = smp_processor_id();
1224	char *msg = "Unknown";
 
 
 
 
 
 
 
 
 
 
 
 
1225	struct mce m, *final;
1226	int worst = 0;
 
 
 
 
 
 
 
 
 
 
1227
1228	/*
1229	 * Establish sequential order between the CPUs entering the machine
1230	 * check handler.
1231	 */
1232	int order = -1;
1233
1234	/*
1235	 * If no_way_out gets set, there is no safe way to recover from this
1236	 * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1237	 */
1238	int no_way_out = 0;
1239
1240	/*
1241	 * If kill_it gets set, there might be a way to recover from this
1242	 * error.
1243	 */
1244	int kill_it = 0;
1245
1246	/*
1247	 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1248	 * on Intel.
1249	 */
1250	int lmce = 1;
1251
1252	if (__mc_check_crashing_cpu(cpu))
1253		return;
1254
1255	ist_enter(regs);
1256
1257	this_cpu_inc(mce_exception_count);
1258
1259	mce_gather_info(&m, regs);
1260	m.tsc = rdtsc();
1261
1262	final = this_cpu_ptr(&mces_seen);
1263	*final = m;
1264
1265	memset(valid_banks, 0, sizeof(valid_banks));
1266	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1267
1268	barrier();
1269
1270	/*
1271	 * When no restart IP might need to kill or panic.
1272	 * Assume the worst for now, but if we find the
1273	 * severity is MCE_AR_SEVERITY we have other options.
1274	 */
1275	if (!(m.mcgstatus & MCG_STATUS_RIPV))
1276		kill_it = 1;
1277
1278	/*
1279	 * Check if this MCE is signaled to only this logical processor,
1280	 * on Intel only.
1281	 */
1282	if (m.cpuvendor == X86_VENDOR_INTEL)
 
1283		lmce = m.mcgstatus & MCG_STATUS_LMCES;
1284
1285	/*
1286	 * Local machine check may already know that we have to panic.
1287	 * Broadcast machine check begins rendezvous in mce_start()
1288	 * Go through all banks in exclusion of the other CPUs. This way we
1289	 * don't report duplicated events on shared banks because the first one
1290	 * to see it will clear it.
1291	 */
1292	if (lmce) {
1293		if (no_way_out)
1294			mce_panic("Fatal local machine check", &m, msg);
1295	} else {
1296		order = mce_start(&no_way_out);
1297	}
1298
1299	__mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
1300
1301	if (!no_way_out)
1302		mce_clear_state(toclear);
1303
1304	/*
1305	 * Do most of the synchronization with other CPUs.
1306	 * When there's any problem use only local no_way_out state.
1307	 */
1308	if (!lmce) {
1309		if (mce_end(order) < 0)
1310			no_way_out = worst >= MCE_PANIC_SEVERITY;
 
 
 
 
 
1311	} else {
1312		/*
1313		 * If there was a fatal machine check we should have
1314		 * already called mce_panic earlier in this function.
1315		 * Since we re-read the banks, we might have found
1316		 * something new. Check again to see if we found a
1317		 * fatal error. We call "mce_severity()" again to
1318		 * make sure we have the right "msg".
1319		 */
1320		if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
1321			mce_severity(&m, cfg->tolerant, &msg, true);
1322			mce_panic("Local fatal machine check!", &m, msg);
1323		}
1324	}
1325
1326	/*
1327	 * If tolerant is at an insane level we drop requests to kill
1328	 * processes and continue even when there is no way out.
 
1329	 */
1330	if (cfg->tolerant == 3)
1331		kill_it = 0;
1332	else if (no_way_out)
1333		mce_panic("Fatal machine check on current CPU", &m, msg);
1334
1335	if (worst > 0)
1336		irq_work_queue(&mce_irq_work);
1337
1338	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1339
1340	sync_core();
 
1341
1342	if (worst != MCE_AR_SEVERITY && !kill_it)
1343		goto out_ist;
1344
1345	/* Fault was in user mode and we need to take some action */
1346	if ((m.cs & 3) == 3) {
1347		ist_begin_non_atomic(regs);
1348		local_irq_enable();
 
 
 
 
 
1349
1350		if (kill_it || do_memory_failure(&m))
1351			force_sig(SIGBUS);
1352		local_irq_disable();
1353		ist_end_non_atomic();
1354	} else {
1355		if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
1356			mce_panic("Failed kernel mode recovery", &m, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357	}
1358
1359out_ist:
1360	ist_exit(regs);
 
 
 
1361}
1362EXPORT_SYMBOL_GPL(do_machine_check);
1363
1364#ifndef CONFIG_MEMORY_FAILURE
1365int memory_failure(unsigned long pfn, int flags)
1366{
1367	/* mce_severity() should not hand us an ACTION_REQUIRED error */
1368	BUG_ON(flags & MF_ACTION_REQUIRED);
1369	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1370	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1371	       pfn);
1372
1373	return 0;
1374}
1375#endif
1376
1377/*
1378 * Periodic polling timer for "silent" machine check errors.  If the
1379 * poller finds an MCE, poll 2x faster.  When the poller finds no more
1380 * errors, poll 2x slower (up to check_interval seconds).
1381 */
1382static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1383
1384static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1385static DEFINE_PER_CPU(struct timer_list, mce_timer);
1386
1387static unsigned long mce_adjust_timer_default(unsigned long interval)
1388{
1389	return interval;
1390}
1391
1392static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1393
1394static void __start_timer(struct timer_list *t, unsigned long interval)
1395{
1396	unsigned long when = jiffies + interval;
1397	unsigned long flags;
1398
1399	local_irq_save(flags);
1400
1401	if (!timer_pending(t) || time_before(when, t->expires))
1402		mod_timer(t, round_jiffies(when));
1403
1404	local_irq_restore(flags);
1405}
1406
 
 
 
 
 
 
 
1407static void mce_timer_fn(struct timer_list *t)
1408{
1409	struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1410	unsigned long iv;
1411
1412	WARN_ON(cpu_t != t);
1413
1414	iv = __this_cpu_read(mce_next_interval);
1415
1416	if (mce_available(this_cpu_ptr(&cpu_info))) {
1417		machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1418
1419		if (mce_intel_cmci_poll()) {
1420			iv = mce_adjust_timer(iv);
1421			goto done;
1422		}
1423	}
1424
1425	/*
1426	 * Alert userspace if needed. If we logged an MCE, reduce the polling
1427	 * interval, otherwise increase the polling interval.
1428	 */
1429	if (mce_notify_irq())
1430		iv = max(iv / 2, (unsigned long) HZ/100);
1431	else
1432		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1433
1434done:
1435	__this_cpu_write(mce_next_interval, iv);
1436	__start_timer(t, iv);
 
 
 
1437}
1438
1439/*
1440 * Ensure that the timer is firing in @interval from now.
 
 
1441 */
1442void mce_timer_kick(unsigned long interval)
1443{
1444	struct timer_list *t = this_cpu_ptr(&mce_timer);
1445	unsigned long iv = __this_cpu_read(mce_next_interval);
1446
1447	__start_timer(t, interval);
1448
1449	if (interval < iv)
1450		__this_cpu_write(mce_next_interval, interval);
 
 
1451}
1452
1453/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1454static void mce_timer_delete_all(void)
1455{
1456	int cpu;
1457
1458	for_each_online_cpu(cpu)
1459		del_timer_sync(&per_cpu(mce_timer, cpu));
1460}
1461
1462/*
1463 * Notify the user(s) about new machine check events.
1464 * Can be called from interrupt context, but not from machine check/NMI
1465 * context.
1466 */
1467int mce_notify_irq(void)
1468{
1469	/* Not more than two messages every minute */
1470	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1471
1472	if (test_and_clear_bit(0, &mce_need_notify)) {
1473		mce_work_trigger();
1474
1475		if (__ratelimit(&ratelimit))
1476			pr_info(HW_ERR "Machine check events logged\n");
1477
1478		return 1;
1479	}
1480	return 0;
1481}
1482EXPORT_SYMBOL_GPL(mce_notify_irq);
1483
1484static void __mcheck_cpu_mce_banks_init(void)
1485{
1486	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1487	u8 n_banks = this_cpu_read(mce_num_banks);
1488	int i;
1489
1490	for (i = 0; i < n_banks; i++) {
1491		struct mce_bank *b = &mce_banks[i];
1492
1493		/*
1494		 * Init them all, __mcheck_cpu_apply_quirks() is going to apply
1495		 * the required vendor quirks before
1496		 * __mcheck_cpu_init_clear_banks() does the final bank setup.
1497		 */
1498		b->ctl = -1ULL;
1499		b->init = 1;
1500	}
1501}
1502
1503/*
1504 * Initialize Machine Checks for a CPU.
1505 */
1506static void __mcheck_cpu_cap_init(void)
1507{
1508	u64 cap;
1509	u8 b;
1510
1511	rdmsrl(MSR_IA32_MCG_CAP, cap);
1512
1513	b = cap & MCG_BANKCNT_MASK;
1514
1515	if (b > MAX_NR_BANKS) {
1516		pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
1517			smp_processor_id(), MAX_NR_BANKS, b);
1518		b = MAX_NR_BANKS;
1519	}
1520
1521	this_cpu_write(mce_num_banks, b);
1522
1523	__mcheck_cpu_mce_banks_init();
1524
1525	/* Use accurate RIP reporting if available. */
1526	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1527		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1528
1529	if (cap & MCG_SER_P)
1530		mca_cfg.ser = 1;
1531}
1532
1533static void __mcheck_cpu_init_generic(void)
1534{
1535	enum mcp_flags m_fl = 0;
1536	mce_banks_t all_banks;
1537	u64 cap;
1538
1539	if (!mca_cfg.bootlog)
1540		m_fl = MCP_DONTLOG;
1541
1542	/*
1543	 * Log the machine checks left over from the previous reset.
 
 
1544	 */
1545	bitmap_fill(all_banks, MAX_NR_BANKS);
1546	machine_check_poll(MCP_UC | m_fl, &all_banks);
1547
1548	cr4_set_bits(X86_CR4_MCE);
1549
1550	rdmsrl(MSR_IA32_MCG_CAP, cap);
1551	if (cap & MCG_CTL_P)
1552		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1553}
1554
1555static void __mcheck_cpu_init_clear_banks(void)
1556{
1557	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1558	int i;
1559
1560	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1561		struct mce_bank *b = &mce_banks[i];
1562
1563		if (!b->init)
1564			continue;
1565		wrmsrl(msr_ops.ctl(i), b->ctl);
1566		wrmsrl(msr_ops.status(i), 0);
1567	}
1568}
1569
1570/*
1571 * Do a final check to see if there are any unused/RAZ banks.
1572 *
1573 * This must be done after the banks have been initialized and any quirks have
1574 * been applied.
1575 *
1576 * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs.
1577 * Otherwise, a user who disables a bank will not be able to re-enable it
1578 * without a system reboot.
1579 */
1580static void __mcheck_cpu_check_banks(void)
1581{
1582	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1583	u64 msrval;
1584	int i;
1585
1586	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1587		struct mce_bank *b = &mce_banks[i];
1588
1589		if (!b->init)
1590			continue;
1591
1592		rdmsrl(msr_ops.ctl(i), msrval);
1593		b->init = !!msrval;
1594	}
1595}
1596
1597/*
1598 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1599 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1600 * Vol 3B Table 15-20). But this confuses both the code that determines
1601 * whether the machine check occurred in kernel or user mode, and also
1602 * the severity assessment code. Pretend that EIPV was set, and take the
1603 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1604 */
1605static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1606{
1607	if (bank != 0)
1608		return;
1609	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1610		return;
1611	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1612		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1613			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1614			  MCACOD)) !=
1615			 (MCI_STATUS_UC|MCI_STATUS_EN|
1616			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1617			  MCI_STATUS_AR|MCACOD_INSTR))
1618		return;
1619
1620	m->mcgstatus |= MCG_STATUS_EIPV;
1621	m->ip = regs->ip;
1622	m->cs = regs->cs;
1623}
1624
1625/* Add per CPU specific workarounds here */
1626static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1627{
1628	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1629	struct mca_config *cfg = &mca_cfg;
1630
1631	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1632		pr_info("unknown CPU type - not enabling MCE support\n");
1633		return -EOPNOTSUPP;
1634	}
1635
1636	/* This should be disabled by the BIOS, but isn't always */
1637	if (c->x86_vendor == X86_VENDOR_AMD) {
1638		if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
1639			/*
1640			 * disable GART TBL walk error reporting, which
1641			 * trips off incorrectly with the IOMMU & 3ware
1642			 * & Cerberus:
1643			 */
1644			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1645		}
1646		if (c->x86 < 0x11 && cfg->bootlog < 0) {
1647			/*
1648			 * Lots of broken BIOS around that don't clear them
1649			 * by default and leave crap in there. Don't log:
1650			 */
1651			cfg->bootlog = 0;
1652		}
1653		/*
1654		 * Various K7s with broken bank 0 around. Always disable
1655		 * by default.
1656		 */
1657		if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
1658			mce_banks[0].ctl = 0;
1659
1660		/*
1661		 * overflow_recov is supported for F15h Models 00h-0fh
1662		 * even though we don't have a CPUID bit for it.
1663		 */
1664		if (c->x86 == 0x15 && c->x86_model <= 0xf)
1665			mce_flags.overflow_recov = 1;
1666
 
 
 
1667	}
1668
1669	if (c->x86_vendor == X86_VENDOR_INTEL) {
1670		/*
1671		 * SDM documents that on family 6 bank 0 should not be written
1672		 * because it aliases to another special BIOS controlled
1673		 * register.
1674		 * But it's not aliased anymore on model 0x1a+
1675		 * Don't ignore bank 0 completely because there could be a
1676		 * valid event later, merely don't write CTL0.
1677		 */
1678
1679		if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
1680			mce_banks[0].init = 0;
1681
1682		/*
1683		 * All newer Intel systems support MCE broadcasting. Enable
1684		 * synchronization with a one second timeout.
1685		 */
1686		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1687			cfg->monarch_timeout < 0)
1688			cfg->monarch_timeout = USEC_PER_SEC;
1689
1690		/*
1691		 * There are also broken BIOSes on some Pentium M and
1692		 * earlier systems:
1693		 */
1694		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1695			cfg->bootlog = 0;
1696
1697		if (c->x86 == 6 && c->x86_model == 45)
1698			quirk_no_way_out = quirk_sandybridge_ifu;
 
 
 
 
 
 
 
1699	}
 
 
 
 
 
 
 
 
 
 
 
 
1700	if (cfg->monarch_timeout < 0)
1701		cfg->monarch_timeout = 0;
1702	if (cfg->bootlog != 0)
1703		cfg->panic_timeout = 30;
1704
1705	return 0;
1706}
1707
1708static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1709{
1710	if (c->x86 != 5)
1711		return 0;
1712
1713	switch (c->x86_vendor) {
1714	case X86_VENDOR_INTEL:
1715		intel_p5_mcheck_init(c);
 
1716		return 1;
1717		break;
1718	case X86_VENDOR_CENTAUR:
1719		winchip_mcheck_init(c);
 
1720		return 1;
1721		break;
1722	default:
1723		return 0;
1724	}
1725
1726	return 0;
1727}
1728
1729/*
1730 * Init basic CPU features needed for early decoding of MCEs.
1731 */
1732static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
1733{
1734	if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
1735		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1736		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
1737		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
1738
1739		if (mce_flags.smca) {
1740			msr_ops.ctl	= smca_ctl_reg;
1741			msr_ops.status	= smca_status_reg;
1742			msr_ops.addr	= smca_addr_reg;
1743			msr_ops.misc	= smca_misc_reg;
1744		}
1745	}
1746}
1747
1748static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
1749{
1750	struct mca_config *cfg = &mca_cfg;
1751
1752	 /*
1753	  * All newer Centaur CPUs support MCE broadcasting. Enable
1754	  * synchronization with a one second timeout.
1755	  */
1756	if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
1757	     c->x86 > 6) {
1758		if (cfg->monarch_timeout < 0)
1759			cfg->monarch_timeout = USEC_PER_SEC;
1760	}
1761}
1762
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1763static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1764{
1765	switch (c->x86_vendor) {
1766	case X86_VENDOR_INTEL:
1767		mce_intel_feature_init(c);
1768		mce_adjust_timer = cmci_intel_adjust_timer;
1769		break;
1770
1771	case X86_VENDOR_AMD: {
1772		mce_amd_feature_init(c);
1773		break;
1774		}
1775
1776	case X86_VENDOR_HYGON:
1777		mce_hygon_feature_init(c);
1778		break;
1779
1780	case X86_VENDOR_CENTAUR:
1781		mce_centaur_feature_init(c);
1782		break;
1783
 
 
 
 
1784	default:
1785		break;
1786	}
1787}
1788
1789static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1790{
1791	switch (c->x86_vendor) {
1792	case X86_VENDOR_INTEL:
1793		mce_intel_feature_clear(c);
1794		break;
 
 
 
 
 
1795	default:
1796		break;
1797	}
1798}
1799
1800static void mce_start_timer(struct timer_list *t)
1801{
1802	unsigned long iv = check_interval * HZ;
1803
1804	if (mca_cfg.ignore_ce || !iv)
1805		return;
1806
1807	this_cpu_write(mce_next_interval, iv);
1808	__start_timer(t, iv);
1809}
1810
1811static void __mcheck_cpu_setup_timer(void)
1812{
1813	struct timer_list *t = this_cpu_ptr(&mce_timer);
1814
1815	timer_setup(t, mce_timer_fn, TIMER_PINNED);
1816}
1817
1818static void __mcheck_cpu_init_timer(void)
1819{
1820	struct timer_list *t = this_cpu_ptr(&mce_timer);
1821
1822	timer_setup(t, mce_timer_fn, TIMER_PINNED);
1823	mce_start_timer(t);
1824}
1825
1826bool filter_mce(struct mce *m)
1827{
1828	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1829		return amd_filter_mce(m);
 
 
1830
1831	return false;
1832}
1833
1834/* Handle unconfigured int18 (should never happen) */
1835static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1836{
1837	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1838	       smp_processor_id());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1839}
1840
1841/* Call the installed machine check handler for this CPU setup. */
1842void (*machine_check_vector)(struct pt_regs *, long error_code) =
1843						unexpected_machine_check;
 
 
 
 
 
 
 
 
 
 
 
1844
1845dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1846{
1847	machine_check_vector(regs, error_code);
 
 
 
 
 
 
 
1848}
 
1849
1850/*
1851 * Called for each booted CPU to set up machine checks.
1852 * Must be called with preempt off:
1853 */
1854void mcheck_cpu_init(struct cpuinfo_x86 *c)
1855{
1856	if (mca_cfg.disabled)
1857		return;
1858
1859	if (__mcheck_cpu_ancient_init(c))
1860		return;
1861
1862	if (!mce_available(c))
1863		return;
1864
1865	__mcheck_cpu_cap_init();
1866
1867	if (__mcheck_cpu_apply_quirks(c) < 0) {
1868		mca_cfg.disabled = 1;
1869		return;
1870	}
1871
1872	if (mce_gen_pool_init()) {
1873		mca_cfg.disabled = 1;
1874		pr_emerg("Couldn't allocate MCE records pool!\n");
1875		return;
1876	}
1877
1878	machine_check_vector = do_machine_check;
1879
1880	__mcheck_cpu_init_early(c);
1881	__mcheck_cpu_init_generic();
1882	__mcheck_cpu_init_vendor(c);
1883	__mcheck_cpu_init_clear_banks();
1884	__mcheck_cpu_check_banks();
1885	__mcheck_cpu_setup_timer();
1886}
1887
1888/*
1889 * Called for each booted CPU to clear some machine checks opt-ins
1890 */
1891void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1892{
1893	if (mca_cfg.disabled)
1894		return;
1895
1896	if (!mce_available(c))
1897		return;
1898
1899	/*
1900	 * Possibly to clear general settings generic to x86
1901	 * __mcheck_cpu_clear_generic(c);
1902	 */
1903	__mcheck_cpu_clear_vendor(c);
1904
1905}
1906
1907static void __mce_disable_bank(void *arg)
1908{
1909	int bank = *((int *)arg);
1910	__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1911	cmci_disable_bank(bank);
1912}
1913
1914void mce_disable_bank(int bank)
1915{
1916	if (bank >= this_cpu_read(mce_num_banks)) {
1917		pr_warn(FW_BUG
1918			"Ignoring request to disable invalid MCA bank %d.\n",
1919			bank);
1920		return;
1921	}
1922	set_bit(bank, mce_banks_ce_disabled);
1923	on_each_cpu(__mce_disable_bank, &bank, 1);
1924}
1925
1926/*
1927 * mce=off Disables machine check
1928 * mce=no_cmci Disables CMCI
1929 * mce=no_lmce Disables LMCE
1930 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
 
1931 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1932 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1933 *	monarchtimeout is how long to wait for other CPUs on machine
1934 *	check, or 0 to not wait
1935 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
1936	and older.
1937 * mce=nobootlog Don't log MCEs from before booting.
1938 * mce=bios_cmci_threshold Don't program the CMCI threshold
1939 * mce=recovery force enable memcpy_mcsafe()
1940 */
1941static int __init mcheck_enable(char *str)
1942{
1943	struct mca_config *cfg = &mca_cfg;
1944
1945	if (*str == 0) {
1946		enable_p5_mce();
1947		return 1;
1948	}
1949	if (*str == '=')
1950		str++;
1951	if (!strcmp(str, "off"))
1952		cfg->disabled = 1;
1953	else if (!strcmp(str, "no_cmci"))
1954		cfg->cmci_disabled = true;
1955	else if (!strcmp(str, "no_lmce"))
1956		cfg->lmce_disabled = 1;
1957	else if (!strcmp(str, "dont_log_ce"))
1958		cfg->dont_log_ce = true;
 
 
1959	else if (!strcmp(str, "ignore_ce"))
1960		cfg->ignore_ce = true;
1961	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1962		cfg->bootlog = (str[0] == 'b');
1963	else if (!strcmp(str, "bios_cmci_threshold"))
1964		cfg->bios_cmci_threshold = 1;
1965	else if (!strcmp(str, "recovery"))
1966		cfg->recovery = 1;
1967	else if (isdigit(str[0])) {
1968		if (get_option(&str, &cfg->tolerant) == 2)
1969			get_option(&str, &(cfg->monarch_timeout));
1970	} else {
1971		pr_info("mce argument %s ignored. Please use /sys\n", str);
1972		return 0;
1973	}
1974	return 1;
1975}
1976__setup("mce", mcheck_enable);
1977
1978int __init mcheck_init(void)
1979{
1980	mcheck_intel_therm_init();
1981	mce_register_decode_chain(&first_nb);
1982	mce_register_decode_chain(&mce_srao_nb);
1983	mce_register_decode_chain(&mce_default_nb);
1984	mcheck_vendor_init_severity();
1985
1986	INIT_WORK(&mce_work, mce_gen_pool_process);
1987	init_irq_work(&mce_irq_work, mce_irq_work_cb);
1988
1989	return 0;
1990}
1991
1992/*
1993 * mce_syscore: PM support
1994 */
1995
1996/*
1997 * Disable machine checks on suspend and shutdown. We can't really handle
1998 * them later.
1999 */
2000static void mce_disable_error_reporting(void)
2001{
2002	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2003	int i;
2004
2005	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2006		struct mce_bank *b = &mce_banks[i];
2007
2008		if (b->init)
2009			wrmsrl(msr_ops.ctl(i), 0);
2010	}
2011	return;
2012}
2013
2014static void vendor_disable_error_reporting(void)
2015{
2016	/*
2017	 * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs
2018	 * are socket-wide.
2019	 * Disabling them for just a single offlined CPU is bad, since it will
2020	 * inhibit reporting for all shared resources on the socket like the
2021	 * last level cache (LLC), the integrated memory controller (iMC), etc.
2022	 */
2023	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
2024	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
2025	    boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
 
2026		return;
2027
2028	mce_disable_error_reporting();
2029}
2030
2031static int mce_syscore_suspend(void)
2032{
2033	vendor_disable_error_reporting();
2034	return 0;
2035}
2036
2037static void mce_syscore_shutdown(void)
2038{
2039	vendor_disable_error_reporting();
2040}
2041
2042/*
2043 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2044 * Only one CPU is active at this time, the others get re-added later using
2045 * CPU hotplug:
2046 */
2047static void mce_syscore_resume(void)
2048{
2049	__mcheck_cpu_init_generic();
2050	__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2051	__mcheck_cpu_init_clear_banks();
2052}
2053
2054static struct syscore_ops mce_syscore_ops = {
2055	.suspend	= mce_syscore_suspend,
2056	.shutdown	= mce_syscore_shutdown,
2057	.resume		= mce_syscore_resume,
2058};
2059
2060/*
2061 * mce_device: Sysfs support
2062 */
2063
2064static void mce_cpu_restart(void *data)
2065{
2066	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2067		return;
2068	__mcheck_cpu_init_generic();
2069	__mcheck_cpu_init_clear_banks();
2070	__mcheck_cpu_init_timer();
2071}
2072
2073/* Reinit MCEs after user configuration changes */
2074static void mce_restart(void)
2075{
2076	mce_timer_delete_all();
2077	on_each_cpu(mce_cpu_restart, NULL, 1);
 
2078}
2079
2080/* Toggle features for corrected errors */
2081static void mce_disable_cmci(void *data)
2082{
2083	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2084		return;
2085	cmci_clear();
2086}
2087
2088static void mce_enable_ce(void *all)
2089{
2090	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2091		return;
2092	cmci_reenable();
2093	cmci_recheck();
2094	if (all)
2095		__mcheck_cpu_init_timer();
2096}
2097
2098static struct bus_type mce_subsys = {
2099	.name		= "machinecheck",
2100	.dev_name	= "machinecheck",
2101};
2102
2103DEFINE_PER_CPU(struct device *, mce_device);
2104
2105static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr)
2106{
2107	return container_of(attr, struct mce_bank_dev, attr);
2108}
2109
2110static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2111			 char *buf)
2112{
2113	u8 bank = attr_to_bank(attr)->bank;
2114	struct mce_bank *b;
2115
2116	if (bank >= per_cpu(mce_num_banks, s->id))
2117		return -EINVAL;
2118
2119	b = &per_cpu(mce_banks_array, s->id)[bank];
2120
2121	if (!b->init)
2122		return -ENODEV;
2123
2124	return sprintf(buf, "%llx\n", b->ctl);
2125}
2126
2127static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2128			const char *buf, size_t size)
2129{
2130	u8 bank = attr_to_bank(attr)->bank;
2131	struct mce_bank *b;
2132	u64 new;
2133
2134	if (kstrtou64(buf, 0, &new) < 0)
2135		return -EINVAL;
2136
2137	if (bank >= per_cpu(mce_num_banks, s->id))
2138		return -EINVAL;
2139
2140	b = &per_cpu(mce_banks_array, s->id)[bank];
2141
2142	if (!b->init)
2143		return -ENODEV;
2144
2145	b->ctl = new;
2146	mce_restart();
2147
2148	return size;
2149}
2150
2151static ssize_t set_ignore_ce(struct device *s,
2152			     struct device_attribute *attr,
2153			     const char *buf, size_t size)
2154{
2155	u64 new;
2156
2157	if (kstrtou64(buf, 0, &new) < 0)
2158		return -EINVAL;
2159
2160	mutex_lock(&mce_sysfs_mutex);
2161	if (mca_cfg.ignore_ce ^ !!new) {
2162		if (new) {
2163			/* disable ce features */
2164			mce_timer_delete_all();
2165			on_each_cpu(mce_disable_cmci, NULL, 1);
2166			mca_cfg.ignore_ce = true;
2167		} else {
2168			/* enable ce features */
2169			mca_cfg.ignore_ce = false;
2170			on_each_cpu(mce_enable_ce, (void *)1, 1);
2171		}
2172	}
2173	mutex_unlock(&mce_sysfs_mutex);
2174
2175	return size;
2176}
2177
2178static ssize_t set_cmci_disabled(struct device *s,
2179				 struct device_attribute *attr,
2180				 const char *buf, size_t size)
2181{
2182	u64 new;
2183
2184	if (kstrtou64(buf, 0, &new) < 0)
2185		return -EINVAL;
2186
2187	mutex_lock(&mce_sysfs_mutex);
2188	if (mca_cfg.cmci_disabled ^ !!new) {
2189		if (new) {
2190			/* disable cmci */
2191			on_each_cpu(mce_disable_cmci, NULL, 1);
2192			mca_cfg.cmci_disabled = true;
2193		} else {
2194			/* enable cmci */
2195			mca_cfg.cmci_disabled = false;
2196			on_each_cpu(mce_enable_ce, NULL, 1);
2197		}
2198	}
2199	mutex_unlock(&mce_sysfs_mutex);
2200
2201	return size;
2202}
2203
2204static ssize_t store_int_with_restart(struct device *s,
2205				      struct device_attribute *attr,
2206				      const char *buf, size_t size)
2207{
2208	unsigned long old_check_interval = check_interval;
2209	ssize_t ret = device_store_ulong(s, attr, buf, size);
2210
2211	if (check_interval == old_check_interval)
2212		return ret;
2213
2214	mutex_lock(&mce_sysfs_mutex);
2215	mce_restart();
2216	mutex_unlock(&mce_sysfs_mutex);
2217
2218	return ret;
2219}
2220
2221static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2222static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2223static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
 
2224
2225static struct dev_ext_attribute dev_attr_check_interval = {
2226	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2227	&check_interval
2228};
2229
2230static struct dev_ext_attribute dev_attr_ignore_ce = {
2231	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2232	&mca_cfg.ignore_ce
2233};
2234
2235static struct dev_ext_attribute dev_attr_cmci_disabled = {
2236	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2237	&mca_cfg.cmci_disabled
2238};
2239
2240static struct device_attribute *mce_device_attrs[] = {
2241	&dev_attr_tolerant.attr,
2242	&dev_attr_check_interval.attr,
2243#ifdef CONFIG_X86_MCELOG_LEGACY
2244	&dev_attr_trigger,
2245#endif
2246	&dev_attr_monarch_timeout.attr,
2247	&dev_attr_dont_log_ce.attr,
 
2248	&dev_attr_ignore_ce.attr,
2249	&dev_attr_cmci_disabled.attr,
2250	NULL
2251};
2252
2253static cpumask_var_t mce_device_initialized;
2254
2255static void mce_device_release(struct device *dev)
2256{
2257	kfree(dev);
2258}
2259
2260/* Per CPU device init. All of the CPUs still share the same bank device: */
2261static int mce_device_create(unsigned int cpu)
2262{
2263	struct device *dev;
2264	int err;
2265	int i, j;
2266
2267	if (!mce_available(&boot_cpu_data))
2268		return -EIO;
2269
2270	dev = per_cpu(mce_device, cpu);
2271	if (dev)
2272		return 0;
2273
2274	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2275	if (!dev)
2276		return -ENOMEM;
2277	dev->id  = cpu;
2278	dev->bus = &mce_subsys;
2279	dev->release = &mce_device_release;
2280
2281	err = device_register(dev);
2282	if (err) {
2283		put_device(dev);
2284		return err;
2285	}
2286
2287	for (i = 0; mce_device_attrs[i]; i++) {
2288		err = device_create_file(dev, mce_device_attrs[i]);
2289		if (err)
2290			goto error;
2291	}
2292	for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
2293		err = device_create_file(dev, &mce_bank_devs[j].attr);
2294		if (err)
2295			goto error2;
2296	}
2297	cpumask_set_cpu(cpu, mce_device_initialized);
2298	per_cpu(mce_device, cpu) = dev;
2299
2300	return 0;
2301error2:
2302	while (--j >= 0)
2303		device_remove_file(dev, &mce_bank_devs[j].attr);
2304error:
2305	while (--i >= 0)
2306		device_remove_file(dev, mce_device_attrs[i]);
2307
2308	device_unregister(dev);
2309
2310	return err;
2311}
2312
2313static void mce_device_remove(unsigned int cpu)
2314{
2315	struct device *dev = per_cpu(mce_device, cpu);
2316	int i;
2317
2318	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2319		return;
2320
2321	for (i = 0; mce_device_attrs[i]; i++)
2322		device_remove_file(dev, mce_device_attrs[i]);
2323
2324	for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
2325		device_remove_file(dev, &mce_bank_devs[i].attr);
2326
2327	device_unregister(dev);
2328	cpumask_clear_cpu(cpu, mce_device_initialized);
2329	per_cpu(mce_device, cpu) = NULL;
2330}
2331
2332/* Make sure there are no machine checks on offlined CPUs. */
2333static void mce_disable_cpu(void)
2334{
2335	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2336		return;
2337
2338	if (!cpuhp_tasks_frozen)
2339		cmci_clear();
2340
2341	vendor_disable_error_reporting();
2342}
2343
2344static void mce_reenable_cpu(void)
2345{
2346	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2347	int i;
2348
2349	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2350		return;
2351
2352	if (!cpuhp_tasks_frozen)
2353		cmci_reenable();
2354	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2355		struct mce_bank *b = &mce_banks[i];
2356
2357		if (b->init)
2358			wrmsrl(msr_ops.ctl(i), b->ctl);
2359	}
2360}
2361
2362static int mce_cpu_dead(unsigned int cpu)
2363{
2364	mce_intel_hcpu_update(cpu);
2365
2366	/* intentionally ignoring frozen here */
2367	if (!cpuhp_tasks_frozen)
2368		cmci_rediscover();
2369	return 0;
2370}
2371
2372static int mce_cpu_online(unsigned int cpu)
2373{
2374	struct timer_list *t = this_cpu_ptr(&mce_timer);
2375	int ret;
2376
2377	mce_device_create(cpu);
2378
2379	ret = mce_threshold_create_device(cpu);
2380	if (ret) {
2381		mce_device_remove(cpu);
2382		return ret;
2383	}
2384	mce_reenable_cpu();
2385	mce_start_timer(t);
2386	return 0;
2387}
2388
2389static int mce_cpu_pre_down(unsigned int cpu)
2390{
2391	struct timer_list *t = this_cpu_ptr(&mce_timer);
2392
2393	mce_disable_cpu();
2394	del_timer_sync(t);
2395	mce_threshold_remove_device(cpu);
2396	mce_device_remove(cpu);
2397	return 0;
2398}
2399
2400static __init void mce_init_banks(void)
2401{
2402	int i;
2403
2404	for (i = 0; i < MAX_NR_BANKS; i++) {
2405		struct mce_bank_dev *b = &mce_bank_devs[i];
2406		struct device_attribute *a = &b->attr;
2407
2408		b->bank = i;
2409
2410		sysfs_attr_init(&a->attr);
2411		a->attr.name	= b->attrname;
2412		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2413
2414		a->attr.mode	= 0644;
2415		a->show		= show_bank;
2416		a->store	= set_bank;
2417	}
2418}
2419
 
 
 
 
 
 
 
2420static __init int mcheck_init_device(void)
2421{
2422	int err;
2423
2424	/*
2425	 * Check if we have a spare virtual bit. This will only become
2426	 * a problem if/when we move beyond 5-level page tables.
2427	 */
2428	MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2429
2430	if (!mce_available(&boot_cpu_data)) {
2431		err = -EIO;
2432		goto err_out;
2433	}
2434
2435	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2436		err = -ENOMEM;
2437		goto err_out;
2438	}
2439
2440	mce_init_banks();
2441
2442	err = subsys_system_register(&mce_subsys, NULL);
2443	if (err)
2444		goto err_out_mem;
2445
2446	err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2447				mce_cpu_dead);
2448	if (err)
2449		goto err_out_mem;
2450
 
 
 
 
2451	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2452				mce_cpu_online, mce_cpu_pre_down);
2453	if (err < 0)
2454		goto err_out_online;
2455
2456	register_syscore_ops(&mce_syscore_ops);
2457
2458	return 0;
2459
2460err_out_online:
2461	cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2462
2463err_out_mem:
2464	free_cpumask_var(mce_device_initialized);
2465
2466err_out:
2467	pr_err("Unable to init MCE device (rc: %d)\n", err);
2468
2469	return err;
2470}
2471device_initcall_sync(mcheck_init_device);
2472
2473/*
2474 * Old style boot options parsing. Only for compatibility.
2475 */
2476static int __init mcheck_disable(char *str)
2477{
2478	mca_cfg.disabled = 1;
2479	return 1;
2480}
2481__setup("nomce", mcheck_disable);
2482
2483#ifdef CONFIG_DEBUG_FS
2484struct dentry *mce_get_debugfs_dir(void)
2485{
2486	static struct dentry *dmce;
2487
2488	if (!dmce)
2489		dmce = debugfs_create_dir("mce", NULL);
2490
2491	return dmce;
2492}
2493
2494static void mce_reset(void)
2495{
2496	cpu_missing = 0;
2497	atomic_set(&mce_fake_panicked, 0);
2498	atomic_set(&mce_executing, 0);
2499	atomic_set(&mce_callin, 0);
2500	atomic_set(&global_nwo, 0);
 
2501}
2502
2503static int fake_panic_get(void *data, u64 *val)
2504{
2505	*val = fake_panic;
2506	return 0;
2507}
2508
2509static int fake_panic_set(void *data, u64 val)
2510{
2511	mce_reset();
2512	fake_panic = val;
2513	return 0;
2514}
2515
2516DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
2517			 "%llu\n");
2518
2519static void __init mcheck_debugfs_init(void)
2520{
2521	struct dentry *dmce;
2522
2523	dmce = mce_get_debugfs_dir();
2524	debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL,
2525				   &fake_panic_fops);
2526}
2527#else
2528static void __init mcheck_debugfs_init(void) { }
2529#endif
2530
2531DEFINE_STATIC_KEY_FALSE(mcsafe_key);
2532EXPORT_SYMBOL_GPL(mcsafe_key);
2533
2534static int __init mcheck_late_init(void)
2535{
2536	if (mca_cfg.recovery)
2537		static_branch_inc(&mcsafe_key);
2538
2539	mcheck_debugfs_init();
2540	cec_init();
2541
2542	/*
2543	 * Flush out everything that has been logged during early boot, now that
2544	 * everything has been initialized (workqueues, decoders, ...).
2545	 */
2546	mce_schedule_work();
2547
2548	return 0;
2549}
2550late_initcall(mcheck_late_init);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Machine check handler.
   4 *
   5 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
   6 * Rest from unknown author(s).
   7 * 2004 Andi Kleen. Rewrote most of it.
   8 * Copyright 2008 Intel Corporation
   9 * Author: Andi Kleen
  10 */
  11
  12#include <linux/thread_info.h>
  13#include <linux/capability.h>
  14#include <linux/miscdevice.h>
  15#include <linux/ratelimit.h>
  16#include <linux/rcupdate.h>
  17#include <linux/kobject.h>
  18#include <linux/uaccess.h>
  19#include <linux/kdebug.h>
  20#include <linux/kernel.h>
  21#include <linux/percpu.h>
  22#include <linux/string.h>
  23#include <linux/device.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/delay.h>
  26#include <linux/ctype.h>
  27#include <linux/sched.h>
  28#include <linux/sysfs.h>
  29#include <linux/types.h>
  30#include <linux/slab.h>
  31#include <linux/init.h>
  32#include <linux/kmod.h>
  33#include <linux/poll.h>
  34#include <linux/nmi.h>
  35#include <linux/cpu.h>
  36#include <linux/ras.h>
  37#include <linux/smp.h>
  38#include <linux/fs.h>
  39#include <linux/mm.h>
  40#include <linux/debugfs.h>
  41#include <linux/irq_work.h>
  42#include <linux/export.h>
 
  43#include <linux/set_memory.h>
  44#include <linux/sync_core.h>
  45#include <linux/task_work.h>
  46#include <linux/hardirq.h>
  47#include <linux/kexec.h>
  48
  49#include <asm/intel-family.h>
  50#include <asm/processor.h>
  51#include <asm/traps.h>
  52#include <asm/tlbflush.h>
  53#include <asm/mce.h>
  54#include <asm/msr.h>
  55#include <asm/reboot.h>
  56#include <asm/tdx.h>
  57
  58#include "internal.h"
  59
 
 
  60/* sysfs synchronization */
  61static DEFINE_MUTEX(mce_sysfs_mutex);
  62
  63#define CREATE_TRACE_POINTS
  64#include <trace/events/mce.h>
  65
  66#define SPINUNIT		100	/* 100ns */
  67
  68DEFINE_PER_CPU(unsigned, mce_exception_count);
  69
  70DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
  71
  72DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
 
 
 
 
  73
  74#define ATTR_LEN               16
  75/* One object for each MCE bank, shared by all CPUs */
  76struct mce_bank_dev {
  77	struct device_attribute	attr;			/* device attribute */
  78	char			attrname[ATTR_LEN];	/* attribute name */
  79	u8			bank;			/* bank number */
  80};
  81static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS];
  82
  83struct mce_vendor_flags mce_flags __read_mostly;
  84
  85struct mca_config mca_cfg __read_mostly = {
  86	.bootlog  = -1,
 
 
 
 
 
 
 
 
  87	.monarch_timeout = -1
  88};
  89
  90static DEFINE_PER_CPU(struct mce, mces_seen);
  91static unsigned long mce_need_notify;
 
  92
  93/*
  94 * MCA banks polled by the period polling timer for corrected events.
  95 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
  96 */
  97DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
  98	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
  99};
 100
 101/*
 102 * MCA banks controlled through firmware first for corrected errors.
 103 * This is a global list of banks for which we won't enable CMCI and we
 104 * won't poll. Firmware controls these banks and is responsible for
 105 * reporting corrected errors through GHES. Uncorrected/recoverable
 106 * errors are still notified through a machine check.
 107 */
 108mce_banks_t mce_banks_ce_disabled;
 109
 110static struct work_struct mce_work;
 111static struct irq_work mce_irq_work;
 112
 
 
 113/*
 114 * CPU/chipset specific EDAC code can register a notifier call here to print
 115 * MCE errors in a human-readable form.
 116 */
 117BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
 118
 119/* Do initial initialization of a struct mce */
 120void mce_setup(struct mce *m)
 121{
 122	memset(m, 0, sizeof(struct mce));
 123	m->cpu = m->extcpu = smp_processor_id();
 124	/* need the internal __ version to avoid deadlocks */
 125	m->time = __ktime_get_real_seconds();
 126	m->cpuvendor = boot_cpu_data.x86_vendor;
 127	m->cpuid = cpuid_eax(1);
 128	m->socketid = cpu_data(m->extcpu).topo.pkg_id;
 129	m->apicid = cpu_data(m->extcpu).topo.initial_apicid;
 130	m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
 131	m->ppin = cpu_data(m->extcpu).ppin;
 
 
 
 132	m->microcode = boot_cpu_data.microcode;
 133}
 134
 135DEFINE_PER_CPU(struct mce, injectm);
 136EXPORT_PER_CPU_SYMBOL_GPL(injectm);
 137
 138void mce_log(struct mce *m)
 139{
 140	if (!mce_gen_pool_add(m))
 141		irq_work_queue(&mce_irq_work);
 142}
 143EXPORT_SYMBOL_GPL(mce_log);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 144
 145void mce_register_decode_chain(struct notifier_block *nb)
 146{
 147	if (WARN_ON(nb->priority < MCE_PRIO_LOWEST ||
 148		    nb->priority > MCE_PRIO_HIGHEST))
 149		return;
 150
 
 
 151	blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
 152}
 153EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 154
 155void mce_unregister_decode_chain(struct notifier_block *nb)
 156{
 
 
 157	blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
 158}
 159EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161static void __print_mce(struct mce *m)
 162{
 163	pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
 164		 m->extcpu,
 165		 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
 166		 m->mcgstatus, m->bank, m->status);
 167
 168	if (m->ip) {
 169		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
 170			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
 171			m->cs, m->ip);
 172
 173		if (m->cs == __KERNEL_CS)
 174			pr_cont("{%pS}", (void *)(unsigned long)m->ip);
 175		pr_cont("\n");
 176	}
 177
 178	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
 179	if (m->addr)
 180		pr_cont("ADDR %llx ", m->addr);
 181	if (m->misc)
 182		pr_cont("MISC %llx ", m->misc);
 183	if (m->ppin)
 184		pr_cont("PPIN %llx ", m->ppin);
 185
 186	if (mce_flags.smca) {
 187		if (m->synd)
 188			pr_cont("SYND %llx ", m->synd);
 189		if (m->ipid)
 190			pr_cont("IPID %llx ", m->ipid);
 191	}
 192
 193	pr_cont("\n");
 194
 195	/*
 196	 * Note this output is parsed by external tools and old fields
 197	 * should not be changed.
 198	 */
 199	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
 200		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
 201		m->microcode);
 202}
 203
 204static void print_mce(struct mce *m)
 205{
 206	__print_mce(m);
 207
 208	if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
 209		pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 210}
 211
 212#define PANIC_TIMEOUT 5 /* 5 seconds */
 213
 214static atomic_t mce_panicked;
 215
 216static int fake_panic;
 217static atomic_t mce_fake_panicked;
 218
 219/* Panic in progress. Enable interrupts and wait for final IPI */
 220static void wait_for_panic(void)
 221{
 222	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
 223
 224	preempt_disable();
 225	local_irq_enable();
 226	while (timeout-- > 0)
 227		udelay(1);
 228	if (panic_timeout == 0)
 229		panic_timeout = mca_cfg.panic_timeout;
 230	panic("Panicing machine check CPU died");
 231}
 232
 233static const char *mce_dump_aux_info(struct mce *m)
 234{
 235	if (boot_cpu_has_bug(X86_BUG_TDX_PW_MCE))
 236		return tdx_dump_mce_info(m);
 237
 238	return NULL;
 239}
 240
 241static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
 242{
 
 243	struct llist_node *pending;
 244	struct mce_evt_llist *l;
 245	int apei_err = 0;
 246	const char *memmsg;
 247
 248	/*
 249	 * Allow instrumentation around external facilities usage. Not that it
 250	 * matters a whole lot since the machine is going to panic anyway.
 251	 */
 252	instrumentation_begin();
 253
 254	if (!fake_panic) {
 255		/*
 256		 * Make sure only one CPU runs in machine check panic
 257		 */
 258		if (atomic_inc_return(&mce_panicked) > 1)
 259			wait_for_panic();
 260		barrier();
 261
 262		bust_spinlocks(1);
 263		console_verbose();
 264	} else {
 265		/* Don't log too much for fake panic */
 266		if (atomic_inc_return(&mce_fake_panicked) > 1)
 267			goto out;
 268	}
 269	pending = mce_gen_pool_prepare_records();
 270	/* First print corrected ones that are still unlogged */
 271	llist_for_each_entry(l, pending, llnode) {
 272		struct mce *m = &l->mce;
 273		if (!(m->status & MCI_STATUS_UC)) {
 274			print_mce(m);
 275			if (!apei_err)
 276				apei_err = apei_write_mce(m);
 277		}
 278	}
 279	/* Now print uncorrected but with the final one last */
 280	llist_for_each_entry(l, pending, llnode) {
 281		struct mce *m = &l->mce;
 282		if (!(m->status & MCI_STATUS_UC))
 283			continue;
 284		if (!final || mce_cmp(m, final)) {
 285			print_mce(m);
 286			if (!apei_err)
 287				apei_err = apei_write_mce(m);
 288		}
 289	}
 290	if (final) {
 291		print_mce(final);
 292		if (!apei_err)
 293			apei_err = apei_write_mce(final);
 294	}
 
 
 295	if (exp)
 296		pr_emerg(HW_ERR "Machine check: %s\n", exp);
 297
 298	memmsg = mce_dump_aux_info(final);
 299	if (memmsg)
 300		pr_emerg(HW_ERR "Machine check: %s\n", memmsg);
 301
 302	if (!fake_panic) {
 303		if (panic_timeout == 0)
 304			panic_timeout = mca_cfg.panic_timeout;
 305
 306		/*
 307		 * Kdump skips the poisoned page in order to avoid
 308		 * touching the error bits again. Poison the page even
 309		 * if the error is fatal and the machine is about to
 310		 * panic.
 311		 */
 312		if (kexec_crash_loaded()) {
 313			if (final && (final->status & MCI_STATUS_ADDRV)) {
 314				struct page *p;
 315				p = pfn_to_online_page(final->addr >> PAGE_SHIFT);
 316				if (p)
 317					SetPageHWPoison(p);
 318			}
 319		}
 320		panic(msg);
 321	} else
 322		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
 323
 324out:
 325	instrumentation_end();
 326}
 327
 328/* Support code for software error injection */
 329
 330static int msr_to_offset(u32 msr)
 331{
 332	unsigned bank = __this_cpu_read(injectm.bank);
 333
 334	if (msr == mca_cfg.rip_msr)
 335		return offsetof(struct mce, ip);
 336	if (msr == mca_msr_reg(bank, MCA_STATUS))
 337		return offsetof(struct mce, status);
 338	if (msr == mca_msr_reg(bank, MCA_ADDR))
 339		return offsetof(struct mce, addr);
 340	if (msr == mca_msr_reg(bank, MCA_MISC))
 341		return offsetof(struct mce, misc);
 342	if (msr == MSR_IA32_MCG_STATUS)
 343		return offsetof(struct mce, mcgstatus);
 344	return -1;
 345}
 346
 347void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr)
 348{
 349	if (wrmsr) {
 350		pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
 351			 (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
 352			 regs->ip, (void *)regs->ip);
 353	} else {
 354		pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
 355			 (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
 356	}
 357
 358	show_stack_regs(regs);
 359
 360	panic("MCA architectural violation!\n");
 361
 362	while (true)
 363		cpu_relax();
 364}
 365
 366/* MSR access wrappers used for error injection */
 367noinstr u64 mce_rdmsrl(u32 msr)
 368{
 369	DECLARE_ARGS(val, low, high);
 370
 371	if (__this_cpu_read(injectm.finished)) {
 372		int offset;
 373		u64 ret;
 374
 375		instrumentation_begin();
 376
 377		offset = msr_to_offset(msr);
 378		if (offset < 0)
 379			ret = 0;
 380		else
 381			ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
 382
 383		instrumentation_end();
 384
 385		return ret;
 
 
 
 
 
 386	}
 387
 388	/*
 389	 * RDMSR on MCA MSRs should not fault. If they do, this is very much an
 390	 * architectural violation and needs to be reported to hw vendor. Panic
 391	 * the box to not allow any further progress.
 392	 */
 393	asm volatile("1: rdmsr\n"
 394		     "2:\n"
 395		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR_IN_MCE)
 396		     : EAX_EDX_RET(val, low, high) : "c" (msr));
 397
 398
 399	return EAX_EDX_VAL(val, low, high);
 400}
 401
 402static noinstr void mce_wrmsrl(u32 msr, u64 v)
 403{
 404	u32 low, high;
 405
 406	if (__this_cpu_read(injectm.finished)) {
 407		int offset;
 408
 409		instrumentation_begin();
 410
 411		offset = msr_to_offset(msr);
 412		if (offset >= 0)
 413			*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
 414
 415		instrumentation_end();
 416
 417		return;
 418	}
 419
 420	low  = (u32)v;
 421	high = (u32)(v >> 32);
 422
 423	/* See comment in mce_rdmsrl() */
 424	asm volatile("1: wrmsr\n"
 425		     "2:\n"
 426		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE)
 427		     : : "c" (msr), "a"(low), "d" (high) : "memory");
 428}
 429
 430/*
 431 * Collect all global (w.r.t. this processor) status about this machine
 432 * check into our "mce" struct so that we can use it later to assess
 433 * the severity of the problem as we read per-bank specific details.
 434 */
 435static noinstr void mce_gather_info(struct mce *m, struct pt_regs *regs)
 436{
 437	/*
 438	 * Enable instrumentation around mce_setup() which calls external
 439	 * facilities.
 440	 */
 441	instrumentation_begin();
 442	mce_setup(m);
 443	instrumentation_end();
 444
 445	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
 446	if (regs) {
 447		/*
 448		 * Get the address of the instruction at the time of
 449		 * the machine check error.
 450		 */
 451		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
 452			m->ip = regs->ip;
 453			m->cs = regs->cs;
 454
 455			/*
 456			 * When in VM86 mode make the cs look like ring 3
 457			 * always. This is a lie, but it's better than passing
 458			 * the additional vm86 bit around everywhere.
 459			 */
 460			if (v8086_mode(regs))
 461				m->cs |= 3;
 462		}
 463		/* Use accurate RIP reporting if available. */
 464		if (mca_cfg.rip_msr)
 465			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
 466	}
 467}
 468
 469int mce_available(struct cpuinfo_x86 *c)
 470{
 471	if (mca_cfg.disabled)
 472		return 0;
 473	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
 474}
 475
 476static void mce_schedule_work(void)
 477{
 478	if (!mce_gen_pool_empty())
 479		schedule_work(&mce_work);
 480}
 481
 482static void mce_irq_work_cb(struct irq_work *entry)
 483{
 484	mce_schedule_work();
 485}
 486
 487bool mce_usable_address(struct mce *m)
 
 
 
 
 
 
 488{
 489	if (!(m->status & MCI_STATUS_ADDRV))
 490		return false;
 
 
 
 
 
 
 
 491
 492	switch (m->cpuvendor) {
 493	case X86_VENDOR_AMD:
 494		return amd_mce_usable_address(m);
 495
 496	case X86_VENDOR_INTEL:
 497	case X86_VENDOR_ZHAOXIN:
 498		return intel_mce_usable_address(m);
 499
 500	default:
 501		return true;
 502	}
 503}
 504EXPORT_SYMBOL_GPL(mce_usable_address);
 505
 506bool mce_is_memory_error(struct mce *m)
 507{
 508	switch (m->cpuvendor) {
 509	case X86_VENDOR_AMD:
 510	case X86_VENDOR_HYGON:
 511		return amd_mce_is_memory_error(m);
 512
 513	case X86_VENDOR_INTEL:
 514	case X86_VENDOR_ZHAOXIN:
 515		/*
 516		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
 517		 *
 518		 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
 519		 * indicating a memory error. Bit 8 is used for indicating a
 520		 * cache hierarchy error. The combination of bit 2 and bit 3
 521		 * is used for indicating a `generic' cache hierarchy error
 522		 * But we can't just blindly check the above bits, because if
 523		 * bit 11 is set, then it is a bus/interconnect error - and
 524		 * either way the above bits just gives more detail on what
 525		 * bus/interconnect error happened. Note that bit 12 can be
 526		 * ignored, as it's the "filter" bit.
 527		 */
 528		return (m->status & 0xef80) == BIT(7) ||
 529		       (m->status & 0xef00) == BIT(8) ||
 530		       (m->status & 0xeffc) == 0xc;
 
 531
 532	default:
 533		return false;
 534	}
 535}
 536EXPORT_SYMBOL_GPL(mce_is_memory_error);
 537
 538static bool whole_page(struct mce *m)
 539{
 540	if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
 541		return true;
 542
 543	return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
 544}
 545
 546bool mce_is_correctable(struct mce *m)
 547{
 548	if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
 549		return false;
 550
 551	if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
 552		return false;
 553
 554	if (m->status & MCI_STATUS_UC)
 555		return false;
 556
 557	return true;
 558}
 559EXPORT_SYMBOL_GPL(mce_is_correctable);
 560
 561static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562			      void *data)
 563{
 564	struct mce *m = (struct mce *)data;
 565
 566	if (!m)
 567		return NOTIFY_DONE;
 568
 
 
 
 569	/* Emit the trace record: */
 570	trace_mce_record(m);
 571
 572	set_bit(0, &mce_need_notify);
 573
 574	mce_notify_irq();
 575
 576	return NOTIFY_DONE;
 577}
 578
 579static struct notifier_block early_nb = {
 580	.notifier_call	= mce_early_notifier,
 581	.priority	= MCE_PRIO_EARLY,
 582};
 583
 584static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
 585			      void *data)
 586{
 587	struct mce *mce = (struct mce *)data;
 588	unsigned long pfn;
 589
 590	if (!mce || !mce_usable_address(mce))
 591		return NOTIFY_DONE;
 592
 593	if (mce->severity != MCE_AO_SEVERITY &&
 594	    mce->severity != MCE_DEFERRED_SEVERITY)
 595		return NOTIFY_DONE;
 596
 597	pfn = (mce->addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
 598	if (!memory_failure(pfn, 0)) {
 599		set_mce_nospec(pfn);
 600		mce->kflags |= MCE_HANDLED_UC;
 601	}
 602
 603	return NOTIFY_OK;
 604}
 605
 606static struct notifier_block mce_uc_nb = {
 607	.notifier_call	= uc_decode_notifier,
 608	.priority	= MCE_PRIO_UC,
 609};
 610
 611static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
 612				void *data)
 613{
 614	struct mce *m = (struct mce *)data;
 615
 616	if (!m)
 617		return NOTIFY_DONE;
 618
 619	if (mca_cfg.print_all || !m->kflags)
 620		__print_mce(m);
 
 
 621
 622	return NOTIFY_DONE;
 623}
 624
 625static struct notifier_block mce_default_nb = {
 626	.notifier_call	= mce_default_notifier,
 627	/* lowest prio, we want it to run last. */
 628	.priority	= MCE_PRIO_LOWEST,
 629};
 630
 631/*
 632 * Read ADDR and MISC registers.
 633 */
 634static noinstr void mce_read_aux(struct mce *m, int i)
 635{
 636	if (m->status & MCI_STATUS_MISCV)
 637		m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC));
 638
 639	if (m->status & MCI_STATUS_ADDRV) {
 640		m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR));
 641
 642		/*
 643		 * Mask the reported address by the reported granularity.
 644		 */
 645		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
 646			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
 647			m->addr >>= shift;
 648			m->addr <<= shift;
 649		}
 650
 651		smca_extract_err_addr(m);
 
 
 
 
 
 
 
 
 652	}
 653
 654	if (mce_flags.smca) {
 655		m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
 656
 657		if (m->status & MCI_STATUS_SYNDV)
 658			m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
 659	}
 660}
 661
 662DEFINE_PER_CPU(unsigned, mce_poll_count);
 663
 664/*
 665 * Poll for corrected events or events that happened before reset.
 666 * Those are just logged through /dev/mcelog.
 667 *
 668 * This is executed in standard interrupt context.
 669 *
 670 * Note: spec recommends to panic for fatal unsignalled
 671 * errors here. However this would be quite problematic --
 672 * we would need to reimplement the Monarch handling and
 673 * it would mess up the exclusion between exception handler
 674 * and poll handler -- * so we skip this for now.
 675 * These cases should not happen anyways, or only when the CPU
 676 * is already totally * confused. In this case it's likely it will
 677 * not fully execute the machine check handler either.
 678 */
 679bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 680{
 681	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
 682	bool error_seen = false;
 683	struct mce m;
 684	int i;
 685
 686	this_cpu_inc(mce_poll_count);
 687
 688	mce_gather_info(&m, NULL);
 689
 690	if (flags & MCP_TIMESTAMP)
 691		m.tsc = rdtsc();
 692
 693	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 694		if (!mce_banks[i].ctl || !test_bit(i, *b))
 695			continue;
 696
 697		m.misc = 0;
 698		m.addr = 0;
 699		m.bank = i;
 700
 701		barrier();
 702		m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
 703
 704		/*
 705		 * Update storm tracking here, before checking for the
 706		 * MCI_STATUS_VAL bit. Valid corrected errors count
 707		 * towards declaring, or maintaining, storm status. No
 708		 * error in a bank counts towards avoiding, or ending,
 709		 * storm status.
 710		 */
 711		if (!mca_cfg.cmci_disabled)
 712			mce_track_storm(&m);
 713
 714		/* If this entry is not valid, ignore it */
 715		if (!(m.status & MCI_STATUS_VAL))
 716			continue;
 717
 718		/*
 719		 * If we are logging everything (at CPU online) or this
 720		 * is a corrected error, then we must log it.
 721		 */
 722		if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
 723			goto log_it;
 724
 725		/*
 726		 * Newer Intel systems that support software error
 727		 * recovery need to make additional checks. Other
 728		 * CPUs should skip over uncorrected errors, but log
 729		 * everything else.
 730		 */
 731		if (!mca_cfg.ser) {
 732			if (m.status & MCI_STATUS_UC)
 733				continue;
 734			goto log_it;
 735		}
 736
 737		/* Log "not enabled" (speculative) errors */
 738		if (!(m.status & MCI_STATUS_EN))
 739			goto log_it;
 740
 741		/*
 742		 * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
 743		 * UC == 1 && PCC == 0 && S == 0
 744		 */
 745		if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
 746			goto log_it;
 747
 748		/*
 749		 * Skip anything else. Presumption is that our read of this
 750		 * bank is racing with a machine check. Leave the log alone
 751		 * for do_machine_check() to deal with it.
 752		 */
 753		continue;
 754
 755log_it:
 756		error_seen = true;
 757
 758		if (flags & MCP_DONTLOG)
 759			goto clear_it;
 
 760
 761		mce_read_aux(&m, i);
 762		m.severity = mce_severity(&m, NULL, NULL, false);
 763		/*
 764		 * Don't get the IP here because it's unlikely to
 765		 * have anything to do with the actual error location.
 766		 */
 767
 768		if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
 769			goto clear_it;
 770
 771		if (flags & MCP_QUEUE_LOG)
 772			mce_gen_pool_add(&m);
 773		else
 774			mce_log(&m);
 
 
 
 
 
 
 
 
 
 775
 776clear_it:
 777		/*
 778		 * Clear state for this bank.
 779		 */
 780		mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
 781	}
 782
 783	/*
 784	 * Don't clear MCG_STATUS here because it's only defined for
 785	 * exceptions.
 786	 */
 787
 788	sync_core();
 789
 790	return error_seen;
 791}
 792EXPORT_SYMBOL_GPL(machine_check_poll);
 793
 794/*
 795 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
 796 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
 797 * Vol 3B Table 15-20). But this confuses both the code that determines
 798 * whether the machine check occurred in kernel or user mode, and also
 799 * the severity assessment code. Pretend that EIPV was set, and take the
 800 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
 801 */
 802static __always_inline void
 803quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
 804{
 805	if (bank != 0)
 806		return;
 807	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
 808		return;
 809	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
 810		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
 811			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
 812			  MCACOD)) !=
 813			 (MCI_STATUS_UC|MCI_STATUS_EN|
 814			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
 815			  MCI_STATUS_AR|MCACOD_INSTR))
 816		return;
 817
 818	m->mcgstatus |= MCG_STATUS_EIPV;
 819	m->ip = regs->ip;
 820	m->cs = regs->cs;
 821}
 822
 823/*
 824 * Disable fast string copy and return from the MCE handler upon the first SRAR
 825 * MCE on bank 1 due to a CPU erratum on Intel Skylake/Cascade Lake/Cooper Lake
 826 * CPUs.
 827 * The fast string copy instructions ("REP; MOVS*") could consume an
 828 * uncorrectable memory error in the cache line _right after_ the desired region
 829 * to copy and raise an MCE with RIP pointing to the instruction _after_ the
 830 * "REP; MOVS*".
 831 * This mitigation addresses the issue completely with the caveat of performance
 832 * degradation on the CPU affected. This is still better than the OS crashing on
 833 * MCEs raised on an irrelevant process due to "REP; MOVS*" accesses from a
 834 * kernel context (e.g., copy_page).
 835 *
 836 * Returns true when fast string copy on CPU has been disabled.
 837 */
 838static noinstr bool quirk_skylake_repmov(void)
 839{
 840	u64 mcgstatus   = mce_rdmsrl(MSR_IA32_MCG_STATUS);
 841	u64 misc_enable = mce_rdmsrl(MSR_IA32_MISC_ENABLE);
 842	u64 mc1_status;
 843
 844	/*
 845	 * Apply the quirk only to local machine checks, i.e., no broadcast
 846	 * sync is needed.
 847	 */
 848	if (!(mcgstatus & MCG_STATUS_LMCES) ||
 849	    !(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING))
 850		return false;
 851
 852	mc1_status = mce_rdmsrl(MSR_IA32_MCx_STATUS(1));
 853
 854	/* Check for a software-recoverable data fetch error. */
 855	if ((mc1_status &
 856	     (MCI_STATUS_VAL | MCI_STATUS_OVER | MCI_STATUS_UC | MCI_STATUS_EN |
 857	      MCI_STATUS_ADDRV | MCI_STATUS_MISCV | MCI_STATUS_PCC |
 858	      MCI_STATUS_AR | MCI_STATUS_S)) ==
 859	     (MCI_STATUS_VAL |                   MCI_STATUS_UC | MCI_STATUS_EN |
 860	      MCI_STATUS_ADDRV | MCI_STATUS_MISCV |
 861	      MCI_STATUS_AR | MCI_STATUS_S)) {
 862		misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
 863		mce_wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 864		mce_wrmsrl(MSR_IA32_MCx_STATUS(1), 0);
 865
 866		instrumentation_begin();
 867		pr_err_once("Erratum detected, disable fast string copy instructions.\n");
 868		instrumentation_end();
 869
 870		return true;
 871	}
 872
 873	return false;
 874}
 875
 876/*
 877 * Some Zen-based Instruction Fetch Units set EIPV=RIPV=0 on poison consumption
 878 * errors. This means mce_gather_info() will not save the "ip" and "cs" registers.
 879 *
 880 * However, the context is still valid, so save the "cs" register for later use.
 881 *
 882 * The "ip" register is truly unknown, so don't save it or fixup EIPV/RIPV.
 883 *
 884 * The Instruction Fetch Unit is at MCA bank 1 for all affected systems.
 885 */
 886static __always_inline void quirk_zen_ifu(int bank, struct mce *m, struct pt_regs *regs)
 887{
 888	if (bank != 1)
 889		return;
 890	if (!(m->status & MCI_STATUS_POISON))
 891		return;
 892
 893	m->cs = regs->cs;
 894}
 895
 896/*
 897 * Do a quick check if any of the events requires a panic.
 898 * This decides if we keep the events around or clear them.
 899 */
 900static __always_inline int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
 901					  struct pt_regs *regs)
 902{
 903	char *tmp = *msg;
 904	int i;
 905
 906	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 907		m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
 908		if (!(m->status & MCI_STATUS_VAL))
 909			continue;
 910
 911		arch___set_bit(i, validp);
 912		if (mce_flags.snb_ifu_quirk)
 913			quirk_sandybridge_ifu(i, m, regs);
 914
 915		if (mce_flags.zen_ifu_quirk)
 916			quirk_zen_ifu(i, m, regs);
 917
 918		m->bank = i;
 919		if (mce_severity(m, regs, &tmp, true) >= MCE_PANIC_SEVERITY) {
 920			mce_read_aux(m, i);
 921			*msg = tmp;
 922			return 1;
 923		}
 924	}
 925	return 0;
 926}
 927
 928/*
 929 * Variable to establish order between CPUs while scanning.
 930 * Each CPU spins initially until executing is equal its number.
 931 */
 932static atomic_t mce_executing;
 933
 934/*
 935 * Defines order of CPUs on entry. First CPU becomes Monarch.
 936 */
 937static atomic_t mce_callin;
 938
 939/*
 940 * Track which CPUs entered the MCA broadcast synchronization and which not in
 941 * order to print holdouts.
 942 */
 943static cpumask_t mce_missing_cpus = CPU_MASK_ALL;
 944
 945/*
 946 * Check if a timeout waiting for other CPUs happened.
 947 */
 948static noinstr int mce_timed_out(u64 *t, const char *msg)
 949{
 950	int ret = 0;
 951
 952	/* Enable instrumentation around calls to external facilities */
 953	instrumentation_begin();
 954
 955	/*
 956	 * The others already did panic for some reason.
 957	 * Bail out like in a timeout.
 958	 * rmb() to tell the compiler that system_state
 959	 * might have been modified by someone else.
 960	 */
 961	rmb();
 962	if (atomic_read(&mce_panicked))
 963		wait_for_panic();
 964	if (!mca_cfg.monarch_timeout)
 965		goto out;
 966	if ((s64)*t < SPINUNIT) {
 967		if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus))
 968			pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n",
 969				 cpumask_pr_args(&mce_missing_cpus));
 970		mce_panic(msg, NULL, NULL);
 971
 972		ret = 1;
 973		goto out;
 974	}
 975	*t -= SPINUNIT;
 976
 977out:
 978	touch_nmi_watchdog();
 979
 980	instrumentation_end();
 981
 982	return ret;
 983}
 984
 985/*
 986 * The Monarch's reign.  The Monarch is the CPU who entered
 987 * the machine check handler first. It waits for the others to
 988 * raise the exception too and then grades them. When any
 989 * error is fatal panic. Only then let the others continue.
 990 *
 991 * The other CPUs entering the MCE handler will be controlled by the
 992 * Monarch. They are called Subjects.
 993 *
 994 * This way we prevent any potential data corruption in a unrecoverable case
 995 * and also makes sure always all CPU's errors are examined.
 996 *
 997 * Also this detects the case of a machine check event coming from outer
 998 * space (not detected by any CPUs) In this case some external agent wants
 999 * us to shut down, so panic too.
1000 *
1001 * The other CPUs might still decide to panic if the handler happens
1002 * in a unrecoverable place, but in this case the system is in a semi-stable
1003 * state and won't corrupt anything by itself. It's ok to let the others
1004 * continue for a bit first.
1005 *
1006 * All the spin loops have timeouts; when a timeout happens a CPU
1007 * typically elects itself to be Monarch.
1008 */
1009static void mce_reign(void)
1010{
1011	int cpu;
1012	struct mce *m = NULL;
1013	int global_worst = 0;
1014	char *msg = NULL;
 
1015
1016	/*
1017	 * This CPU is the Monarch and the other CPUs have run
1018	 * through their handlers.
1019	 * Grade the severity of the errors of all the CPUs.
1020	 */
1021	for_each_possible_cpu(cpu) {
1022		struct mce *mtmp = &per_cpu(mces_seen, cpu);
1023
1024		if (mtmp->severity > global_worst) {
1025			global_worst = mtmp->severity;
 
 
1026			m = &per_cpu(mces_seen, cpu);
1027		}
1028	}
1029
1030	/*
1031	 * Cannot recover? Panic here then.
1032	 * This dumps all the mces in the log buffer and stops the
1033	 * other CPUs.
1034	 */
1035	if (m && global_worst >= MCE_PANIC_SEVERITY) {
1036		/* call mce_severity() to get "msg" for panic */
1037		mce_severity(m, NULL, &msg, true);
1038		mce_panic("Fatal machine check", m, msg);
1039	}
1040
1041	/*
1042	 * For UC somewhere we let the CPU who detects it handle it.
1043	 * Also must let continue the others, otherwise the handling
1044	 * CPU could deadlock on a lock.
1045	 */
1046
1047	/*
1048	 * No machine check event found. Must be some external
1049	 * source or one CPU is hung. Panic.
1050	 */
1051	if (global_worst <= MCE_KEEP_SEVERITY)
1052		mce_panic("Fatal machine check from unknown source", NULL, NULL);
1053
1054	/*
1055	 * Now clear all the mces_seen so that they don't reappear on
1056	 * the next mce.
1057	 */
1058	for_each_possible_cpu(cpu)
1059		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
1060}
1061
1062static atomic_t global_nwo;
1063
1064/*
1065 * Start of Monarch synchronization. This waits until all CPUs have
1066 * entered the exception handler and then determines if any of them
1067 * saw a fatal event that requires panic. Then it executes them
1068 * in the entry order.
1069 * TBD double check parallel CPU hotunplug
1070 */
1071static noinstr int mce_start(int *no_way_out)
1072{
 
 
1073	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1074	int order, ret = -1;
1075
1076	if (!timeout)
1077		return ret;
1078
1079	raw_atomic_add(*no_way_out, &global_nwo);
1080	/*
1081	 * Rely on the implied barrier below, such that global_nwo
1082	 * is updated before mce_callin.
1083	 */
1084	order = raw_atomic_inc_return(&mce_callin);
1085	arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
1086
1087	/* Enable instrumentation around calls to external facilities */
1088	instrumentation_begin();
1089
1090	/*
1091	 * Wait for everyone.
1092	 */
1093	while (raw_atomic_read(&mce_callin) != num_online_cpus()) {
1094		if (mce_timed_out(&timeout,
1095				  "Timeout: Not all CPUs entered broadcast exception handler")) {
1096			raw_atomic_set(&global_nwo, 0);
1097			goto out;
1098		}
1099		ndelay(SPINUNIT);
1100	}
1101
1102	/*
1103	 * mce_callin should be read before global_nwo
1104	 */
1105	smp_rmb();
1106
1107	if (order == 1) {
1108		/*
1109		 * Monarch: Starts executing now, the others wait.
1110		 */
1111		raw_atomic_set(&mce_executing, 1);
1112	} else {
1113		/*
1114		 * Subject: Now start the scanning loop one by one in
1115		 * the original callin order.
1116		 * This way when there are any shared banks it will be
1117		 * only seen by one CPU before cleared, avoiding duplicates.
1118		 */
1119		while (raw_atomic_read(&mce_executing) < order) {
1120			if (mce_timed_out(&timeout,
1121					  "Timeout: Subject CPUs unable to finish machine check processing")) {
1122				raw_atomic_set(&global_nwo, 0);
1123				goto out;
1124			}
1125			ndelay(SPINUNIT);
1126		}
1127	}
1128
1129	/*
1130	 * Cache the global no_way_out state.
1131	 */
1132	*no_way_out = raw_atomic_read(&global_nwo);
1133
1134	ret = order;
1135
1136out:
1137	instrumentation_end();
1138
1139	return ret;
1140}
1141
1142/*
1143 * Synchronize between CPUs after main scanning loop.
1144 * This invokes the bulk of the Monarch processing.
1145 */
1146static noinstr int mce_end(int order)
1147{
 
1148	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1149	int ret = -1;
1150
1151	/* Allow instrumentation around external facilities. */
1152	instrumentation_begin();
1153
1154	if (!timeout)
1155		goto reset;
1156	if (order < 0)
1157		goto reset;
1158
1159	/*
1160	 * Allow others to run.
1161	 */
1162	atomic_inc(&mce_executing);
1163
1164	if (order == 1) {
 
 
 
1165		/*
1166		 * Monarch: Wait for everyone to go through their scanning
1167		 * loops.
1168		 */
1169		while (atomic_read(&mce_executing) <= num_online_cpus()) {
1170			if (mce_timed_out(&timeout,
1171					  "Timeout: Monarch CPU unable to finish machine check processing"))
1172				goto reset;
1173			ndelay(SPINUNIT);
1174		}
1175
1176		mce_reign();
1177		barrier();
1178		ret = 0;
1179	} else {
1180		/*
1181		 * Subject: Wait for Monarch to finish.
1182		 */
1183		while (atomic_read(&mce_executing) != 0) {
1184			if (mce_timed_out(&timeout,
1185					  "Timeout: Monarch CPU did not finish machine check processing"))
1186				goto reset;
1187			ndelay(SPINUNIT);
1188		}
1189
1190		/*
1191		 * Don't reset anything. That's done by the Monarch.
1192		 */
1193		ret = 0;
1194		goto out;
1195	}
1196
1197	/*
1198	 * Reset all global state.
1199	 */
1200reset:
1201	atomic_set(&global_nwo, 0);
1202	atomic_set(&mce_callin, 0);
1203	cpumask_setall(&mce_missing_cpus);
1204	barrier();
1205
1206	/*
1207	 * Let others run again.
1208	 */
1209	atomic_set(&mce_executing, 0);
1210
1211out:
1212	instrumentation_end();
1213
1214	return ret;
1215}
1216
1217static __always_inline void mce_clear_state(unsigned long *toclear)
1218{
1219	int i;
1220
1221	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1222		if (arch_test_bit(i, toclear))
1223			mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
1224	}
1225}
1226
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1227/*
1228 * Cases where we avoid rendezvous handler timeout:
1229 * 1) If this CPU is offline.
1230 *
1231 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1232 *  skip those CPUs which remain looping in the 1st kernel - see
1233 *  crash_nmi_callback().
1234 *
1235 * Note: there still is a small window between kexec-ing and the new,
1236 * kdump kernel establishing a new #MC handler where a broadcasted MCE
1237 * might not get handled properly.
1238 */
1239static noinstr bool mce_check_crashing_cpu(void)
1240{
1241	unsigned int cpu = smp_processor_id();
1242
1243	if (arch_cpu_is_offline(cpu) ||
1244	    (crashing_cpu != -1 && crashing_cpu != cpu)) {
1245		u64 mcgstatus;
1246
1247		mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
1248
1249		if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
1250			if (mcgstatus & MCG_STATUS_LMCES)
1251				return false;
1252		}
1253
1254		if (mcgstatus & MCG_STATUS_RIPV) {
1255			__wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
1256			return true;
1257		}
1258	}
1259	return false;
1260}
1261
1262static __always_inline int
1263__mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
1264		unsigned long *toclear, unsigned long *valid_banks, int no_way_out,
1265		int *worst)
1266{
1267	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1268	struct mca_config *cfg = &mca_cfg;
1269	int severity, i, taint = 0;
1270
1271	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1272		arch___clear_bit(i, toclear);
1273		if (!arch_test_bit(i, valid_banks))
1274			continue;
1275
1276		if (!mce_banks[i].ctl)
1277			continue;
1278
1279		m->misc = 0;
1280		m->addr = 0;
1281		m->bank = i;
1282
1283		m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
1284		if (!(m->status & MCI_STATUS_VAL))
1285			continue;
1286
1287		/*
1288		 * Corrected or non-signaled errors are handled by
1289		 * machine_check_poll(). Leave them alone, unless this panics.
1290		 */
1291		if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1292			!no_way_out)
1293			continue;
1294
1295		/* Set taint even when machine check was not enabled. */
1296		taint++;
1297
1298		severity = mce_severity(m, regs, NULL, true);
1299
1300		/*
1301		 * When machine check was for corrected/deferred handler don't
1302		 * touch, unless we're panicking.
1303		 */
1304		if ((severity == MCE_KEEP_SEVERITY ||
1305		     severity == MCE_UCNA_SEVERITY) && !no_way_out)
1306			continue;
1307
1308		arch___set_bit(i, toclear);
1309
1310		/* Machine check event was not enabled. Clear, but ignore. */
1311		if (severity == MCE_NO_SEVERITY)
1312			continue;
1313
1314		mce_read_aux(m, i);
1315
1316		/* assuming valid severity level != 0 */
1317		m->severity = severity;
1318
1319		/*
1320		 * Enable instrumentation around the mce_log() call which is
1321		 * done in #MC context, where instrumentation is disabled.
1322		 */
1323		instrumentation_begin();
1324		mce_log(m);
1325		instrumentation_end();
1326
1327		if (severity > *worst) {
1328			*final = *m;
1329			*worst = severity;
1330		}
1331	}
1332
1333	/* mce_clear_state will clear *final, save locally for use later */
1334	*m = *final;
1335
1336	return taint;
1337}
1338
1339static void kill_me_now(struct callback_head *ch)
1340{
1341	struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
1342
1343	p->mce_count = 0;
1344	force_sig(SIGBUS);
1345}
1346
1347static void kill_me_maybe(struct callback_head *cb)
1348{
1349	struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1350	int flags = MF_ACTION_REQUIRED;
1351	unsigned long pfn;
1352	int ret;
1353
1354	p->mce_count = 0;
1355	pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
1356
1357	if (!p->mce_ripv)
1358		flags |= MF_MUST_KILL;
1359
1360	pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
1361	ret = memory_failure(pfn, flags);
1362	if (!ret) {
1363		set_mce_nospec(pfn);
1364		sync_core();
1365		return;
1366	}
1367
1368	/*
1369	 * -EHWPOISON from memory_failure() means that it already sent SIGBUS
1370	 * to the current process with the proper error info,
1371	 * -EOPNOTSUPP means hwpoison_filter() filtered the error event,
1372	 *
1373	 * In both cases, no further processing is required.
1374	 */
1375	if (ret == -EHWPOISON || ret == -EOPNOTSUPP)
1376		return;
1377
1378	pr_err("Memory error not recovered");
1379	kill_me_now(cb);
1380}
1381
1382static void kill_me_never(struct callback_head *cb)
1383{
1384	struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1385	unsigned long pfn;
1386
1387	p->mce_count = 0;
1388	pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr);
1389	pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
1390	if (!memory_failure(pfn, 0))
1391		set_mce_nospec(pfn);
1392}
1393
1394static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *))
1395{
1396	int count = ++current->mce_count;
1397
1398	/* First call, save all the details */
1399	if (count == 1) {
1400		current->mce_addr = m->addr;
1401		current->mce_kflags = m->kflags;
1402		current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
1403		current->mce_whole_page = whole_page(m);
1404		current->mce_kill_me.func = func;
1405	}
1406
1407	/* Ten is likely overkill. Don't expect more than two faults before task_work() */
1408	if (count > 10)
1409		mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
1410
1411	/* Second or later call, make sure page address matches the one from first call */
1412	if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
1413		mce_panic("Consecutive machine checks to different user pages", m, msg);
1414
1415	/* Do not call task_work_add() more than once */
1416	if (count > 1)
1417		return;
1418
1419	task_work_add(current, &current->mce_kill_me, TWA_RESUME);
1420}
1421
1422/* Handle unconfigured int18 (should never happen) */
1423static noinstr void unexpected_machine_check(struct pt_regs *regs)
1424{
1425	instrumentation_begin();
1426	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1427	       smp_processor_id());
1428	instrumentation_end();
1429}
1430
1431/*
1432 * The actual machine check handler. This only handles real exceptions when
1433 * something got corrupted coming in through int 18.
1434 *
1435 * This is executed in #MC context not subject to normal locking rules.
1436 * This implies that most kernel services cannot be safely used. Don't even
1437 * think about putting a printk in there!
1438 *
1439 * On Intel systems this is entered on all CPUs in parallel through
1440 * MCE broadcast. However some CPUs might be broken beyond repair,
1441 * so be always careful when synchronizing with others.
1442 *
1443 * Tracing and kprobes are disabled: if we interrupted a kernel context
1444 * with IF=1, we need to minimize stack usage.  There are also recursion
1445 * issues: if the machine check was due to a failure of the memory
1446 * backing the user stack, tracing that reads the user stack will cause
1447 * potentially infinite recursion.
1448 *
1449 * Currently, the #MC handler calls out to a number of external facilities
1450 * and, therefore, allows instrumentation around them. The optimal thing to
1451 * have would be to do the absolutely minimal work required in #MC context
1452 * and have instrumentation disabled only around that. Further processing can
1453 * then happen in process context where instrumentation is allowed. Achieving
1454 * that requires careful auditing and modifications. Until then, the code
1455 * allows instrumentation temporarily, where required. *
1456 */
1457noinstr void do_machine_check(struct pt_regs *regs)
1458{
1459	int worst = 0, order, no_way_out, kill_current_task, lmce, taint = 0;
1460	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS) = { 0 };
1461	DECLARE_BITMAP(toclear, MAX_NR_BANKS) = { 0 };
1462	struct mce m, *final;
1463	char *msg = NULL;
1464
1465	if (unlikely(mce_flags.p5))
1466		return pentium_machine_check(regs);
1467	else if (unlikely(mce_flags.winchip))
1468		return winchip_machine_check(regs);
1469	else if (unlikely(!mca_cfg.initialized))
1470		return unexpected_machine_check(regs);
1471
1472	if (mce_flags.skx_repmov_quirk && quirk_skylake_repmov())
1473		goto clear;
1474
1475	/*
1476	 * Establish sequential order between the CPUs entering the machine
1477	 * check handler.
1478	 */
1479	order = -1;
1480
1481	/*
1482	 * If no_way_out gets set, there is no safe way to recover from this
1483	 * MCE.
1484	 */
1485	no_way_out = 0;
1486
1487	/*
1488	 * If kill_current_task is not set, there might be a way to recover from this
1489	 * error.
1490	 */
1491	kill_current_task = 0;
1492
1493	/*
1494	 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1495	 * on Intel.
1496	 */
1497	lmce = 1;
 
 
 
 
 
1498
1499	this_cpu_inc(mce_exception_count);
1500
1501	mce_gather_info(&m, regs);
1502	m.tsc = rdtsc();
1503
1504	final = this_cpu_ptr(&mces_seen);
1505	*final = m;
1506
 
1507	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1508
1509	barrier();
1510
1511	/*
1512	 * When no restart IP might need to kill or panic.
1513	 * Assume the worst for now, but if we find the
1514	 * severity is MCE_AR_SEVERITY we have other options.
1515	 */
1516	if (!(m.mcgstatus & MCG_STATUS_RIPV))
1517		kill_current_task = 1;
 
1518	/*
1519	 * Check if this MCE is signaled to only this logical processor,
1520	 * on Intel, Zhaoxin only.
1521	 */
1522	if (m.cpuvendor == X86_VENDOR_INTEL ||
1523	    m.cpuvendor == X86_VENDOR_ZHAOXIN)
1524		lmce = m.mcgstatus & MCG_STATUS_LMCES;
1525
1526	/*
1527	 * Local machine check may already know that we have to panic.
1528	 * Broadcast machine check begins rendezvous in mce_start()
1529	 * Go through all banks in exclusion of the other CPUs. This way we
1530	 * don't report duplicated events on shared banks because the first one
1531	 * to see it will clear it.
1532	 */
1533	if (lmce) {
1534		if (no_way_out)
1535			mce_panic("Fatal local machine check", &m, msg);
1536	} else {
1537		order = mce_start(&no_way_out);
1538	}
1539
1540	taint = __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst);
1541
1542	if (!no_way_out)
1543		mce_clear_state(toclear);
1544
1545	/*
1546	 * Do most of the synchronization with other CPUs.
1547	 * When there's any problem use only local no_way_out state.
1548	 */
1549	if (!lmce) {
1550		if (mce_end(order) < 0) {
1551			if (!no_way_out)
1552				no_way_out = worst >= MCE_PANIC_SEVERITY;
1553
1554			if (no_way_out)
1555				mce_panic("Fatal machine check on current CPU", &m, msg);
1556		}
1557	} else {
1558		/*
1559		 * If there was a fatal machine check we should have
1560		 * already called mce_panic earlier in this function.
1561		 * Since we re-read the banks, we might have found
1562		 * something new. Check again to see if we found a
1563		 * fatal error. We call "mce_severity()" again to
1564		 * make sure we have the right "msg".
1565		 */
1566		if (worst >= MCE_PANIC_SEVERITY) {
1567			mce_severity(&m, regs, &msg, true);
1568			mce_panic("Local fatal machine check!", &m, msg);
1569		}
1570	}
1571
1572	/*
1573	 * Enable instrumentation around the external facilities like task_work_add()
1574	 * (via queue_task_work()), fixup_exception() etc. For now, that is. Fixing this
1575	 * properly would need a lot more involved reorganization.
1576	 */
1577	instrumentation_begin();
 
 
 
 
 
 
 
 
1578
1579	if (taint)
1580		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1581
1582	if (worst != MCE_AR_SEVERITY && !kill_current_task)
1583		goto out;
1584
1585	/* Fault was in user mode and we need to take some action */
1586	if ((m.cs & 3) == 3) {
1587		/* If this triggers there is no way to recover. Die hard. */
1588		BUG_ON(!on_thread_stack() || !user_mode(regs));
1589
1590		if (!mce_usable_address(&m))
1591			queue_task_work(&m, msg, kill_me_now);
1592		else
1593			queue_task_work(&m, msg, kill_me_maybe);
1594
 
 
 
 
1595	} else {
1596		/*
1597		 * Handle an MCE which has happened in kernel space but from
1598		 * which the kernel can recover: ex_has_fault_handler() has
1599		 * already verified that the rIP at which the error happened is
1600		 * a rIP from which the kernel can recover (by jumping to
1601		 * recovery code specified in _ASM_EXTABLE_FAULT()) and the
1602		 * corresponding exception handler which would do that is the
1603		 * proper one.
1604		 */
1605		if (m.kflags & MCE_IN_KERNEL_RECOV) {
1606			if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
1607				mce_panic("Failed kernel mode recovery", &m, msg);
1608		}
1609
1610		if (m.kflags & MCE_IN_KERNEL_COPYIN)
1611			queue_task_work(&m, msg, kill_me_never);
1612	}
1613
1614out:
1615	instrumentation_end();
1616
1617clear:
1618	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1619}
1620EXPORT_SYMBOL_GPL(do_machine_check);
1621
1622#ifndef CONFIG_MEMORY_FAILURE
1623int memory_failure(unsigned long pfn, int flags)
1624{
1625	/* mce_severity() should not hand us an ACTION_REQUIRED error */
1626	BUG_ON(flags & MF_ACTION_REQUIRED);
1627	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1628	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1629	       pfn);
1630
1631	return 0;
1632}
1633#endif
1634
1635/*
1636 * Periodic polling timer for "silent" machine check errors.  If the
1637 * poller finds an MCE, poll 2x faster.  When the poller finds no more
1638 * errors, poll 2x slower (up to check_interval seconds).
1639 */
1640static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1641
1642static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1643static DEFINE_PER_CPU(struct timer_list, mce_timer);
1644
 
 
 
 
 
 
 
1645static void __start_timer(struct timer_list *t, unsigned long interval)
1646{
1647	unsigned long when = jiffies + interval;
1648	unsigned long flags;
1649
1650	local_irq_save(flags);
1651
1652	if (!timer_pending(t) || time_before(when, t->expires))
1653		mod_timer(t, round_jiffies(when));
1654
1655	local_irq_restore(flags);
1656}
1657
1658static void mc_poll_banks_default(void)
1659{
1660	machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1661}
1662
1663void (*mc_poll_banks)(void) = mc_poll_banks_default;
1664
1665static void mce_timer_fn(struct timer_list *t)
1666{
1667	struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1668	unsigned long iv;
1669
1670	WARN_ON(cpu_t != t);
1671
1672	iv = __this_cpu_read(mce_next_interval);
1673
1674	if (mce_available(this_cpu_ptr(&cpu_info)))
1675		mc_poll_banks();
 
 
 
 
 
 
1676
1677	/*
1678	 * Alert userspace if needed. If we logged an MCE, reduce the polling
1679	 * interval, otherwise increase the polling interval.
1680	 */
1681	if (mce_notify_irq())
1682		iv = max(iv / 2, (unsigned long) HZ/100);
1683	else
1684		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1685
1686	if (mce_get_storm_mode()) {
1687		__start_timer(t, HZ);
1688	} else {
1689		__this_cpu_write(mce_next_interval, iv);
1690		__start_timer(t, iv);
1691	}
1692}
1693
1694/*
1695 * When a storm starts on any bank on this CPU, switch to polling
1696 * once per second. When the storm ends, revert to the default
1697 * polling interval.
1698 */
1699void mce_timer_kick(bool storm)
1700{
1701	struct timer_list *t = this_cpu_ptr(&mce_timer);
 
1702
1703	mce_set_storm_mode(storm);
1704
1705	if (storm)
1706		__start_timer(t, HZ);
1707	else
1708		__this_cpu_write(mce_next_interval, check_interval * HZ);
1709}
1710
1711/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1712static void mce_timer_delete_all(void)
1713{
1714	int cpu;
1715
1716	for_each_online_cpu(cpu)
1717		del_timer_sync(&per_cpu(mce_timer, cpu));
1718}
1719
1720/*
1721 * Notify the user(s) about new machine check events.
1722 * Can be called from interrupt context, but not from machine check/NMI
1723 * context.
1724 */
1725int mce_notify_irq(void)
1726{
1727	/* Not more than two messages every minute */
1728	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1729
1730	if (test_and_clear_bit(0, &mce_need_notify)) {
1731		mce_work_trigger();
1732
1733		if (__ratelimit(&ratelimit))
1734			pr_info(HW_ERR "Machine check events logged\n");
1735
1736		return 1;
1737	}
1738	return 0;
1739}
1740EXPORT_SYMBOL_GPL(mce_notify_irq);
1741
1742static void __mcheck_cpu_mce_banks_init(void)
1743{
1744	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1745	u8 n_banks = this_cpu_read(mce_num_banks);
1746	int i;
1747
1748	for (i = 0; i < n_banks; i++) {
1749		struct mce_bank *b = &mce_banks[i];
1750
1751		/*
1752		 * Init them all, __mcheck_cpu_apply_quirks() is going to apply
1753		 * the required vendor quirks before
1754		 * __mcheck_cpu_init_clear_banks() does the final bank setup.
1755		 */
1756		b->ctl = -1ULL;
1757		b->init = true;
1758	}
1759}
1760
1761/*
1762 * Initialize Machine Checks for a CPU.
1763 */
1764static void __mcheck_cpu_cap_init(void)
1765{
1766	u64 cap;
1767	u8 b;
1768
1769	rdmsrl(MSR_IA32_MCG_CAP, cap);
1770
1771	b = cap & MCG_BANKCNT_MASK;
1772
1773	if (b > MAX_NR_BANKS) {
1774		pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
1775			smp_processor_id(), MAX_NR_BANKS, b);
1776		b = MAX_NR_BANKS;
1777	}
1778
1779	this_cpu_write(mce_num_banks, b);
1780
1781	__mcheck_cpu_mce_banks_init();
1782
1783	/* Use accurate RIP reporting if available. */
1784	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1785		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1786
1787	if (cap & MCG_SER_P)
1788		mca_cfg.ser = 1;
1789}
1790
1791static void __mcheck_cpu_init_generic(void)
1792{
1793	enum mcp_flags m_fl = 0;
1794	mce_banks_t all_banks;
1795	u64 cap;
1796
1797	if (!mca_cfg.bootlog)
1798		m_fl = MCP_DONTLOG;
1799
1800	/*
1801	 * Log the machine checks left over from the previous reset. Log them
1802	 * only, do not start processing them. That will happen in mcheck_late_init()
1803	 * when all consumers have been registered on the notifier chain.
1804	 */
1805	bitmap_fill(all_banks, MAX_NR_BANKS);
1806	machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
1807
1808	cr4_set_bits(X86_CR4_MCE);
1809
1810	rdmsrl(MSR_IA32_MCG_CAP, cap);
1811	if (cap & MCG_CTL_P)
1812		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1813}
1814
1815static void __mcheck_cpu_init_clear_banks(void)
1816{
1817	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1818	int i;
1819
1820	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1821		struct mce_bank *b = &mce_banks[i];
1822
1823		if (!b->init)
1824			continue;
1825		wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
1826		wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
1827	}
1828}
1829
1830/*
1831 * Do a final check to see if there are any unused/RAZ banks.
1832 *
1833 * This must be done after the banks have been initialized and any quirks have
1834 * been applied.
1835 *
1836 * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs.
1837 * Otherwise, a user who disables a bank will not be able to re-enable it
1838 * without a system reboot.
1839 */
1840static void __mcheck_cpu_check_banks(void)
1841{
1842	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1843	u64 msrval;
1844	int i;
1845
1846	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1847		struct mce_bank *b = &mce_banks[i];
1848
1849		if (!b->init)
1850			continue;
1851
1852		rdmsrl(mca_msr_reg(i, MCA_CTL), msrval);
1853		b->init = !!msrval;
1854	}
1855}
1856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1857/* Add per CPU specific workarounds here */
1858static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1859{
1860	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1861	struct mca_config *cfg = &mca_cfg;
1862
1863	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1864		pr_info("unknown CPU type - not enabling MCE support\n");
1865		return -EOPNOTSUPP;
1866	}
1867
1868	/* This should be disabled by the BIOS, but isn't always */
1869	if (c->x86_vendor == X86_VENDOR_AMD) {
1870		if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
1871			/*
1872			 * disable GART TBL walk error reporting, which
1873			 * trips off incorrectly with the IOMMU & 3ware
1874			 * & Cerberus:
1875			 */
1876			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1877		}
1878		if (c->x86 < 0x11 && cfg->bootlog < 0) {
1879			/*
1880			 * Lots of broken BIOS around that don't clear them
1881			 * by default and leave crap in there. Don't log:
1882			 */
1883			cfg->bootlog = 0;
1884		}
1885		/*
1886		 * Various K7s with broken bank 0 around. Always disable
1887		 * by default.
1888		 */
1889		if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
1890			mce_banks[0].ctl = 0;
1891
1892		/*
1893		 * overflow_recov is supported for F15h Models 00h-0fh
1894		 * even though we don't have a CPUID bit for it.
1895		 */
1896		if (c->x86 == 0x15 && c->x86_model <= 0xf)
1897			mce_flags.overflow_recov = 1;
1898
1899		if (c->x86 >= 0x17 && c->x86 <= 0x1A)
1900			mce_flags.zen_ifu_quirk = 1;
1901
1902	}
1903
1904	if (c->x86_vendor == X86_VENDOR_INTEL) {
1905		/*
1906		 * SDM documents that on family 6 bank 0 should not be written
1907		 * because it aliases to another special BIOS controlled
1908		 * register.
1909		 * But it's not aliased anymore on model 0x1a+
1910		 * Don't ignore bank 0 completely because there could be a
1911		 * valid event later, merely don't write CTL0.
1912		 */
1913
1914		if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
1915			mce_banks[0].init = false;
1916
1917		/*
1918		 * All newer Intel systems support MCE broadcasting. Enable
1919		 * synchronization with a one second timeout.
1920		 */
1921		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1922			cfg->monarch_timeout < 0)
1923			cfg->monarch_timeout = USEC_PER_SEC;
1924
1925		/*
1926		 * There are also broken BIOSes on some Pentium M and
1927		 * earlier systems:
1928		 */
1929		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1930			cfg->bootlog = 0;
1931
1932		if (c->x86 == 6 && c->x86_model == 45)
1933			mce_flags.snb_ifu_quirk = 1;
1934
1935		/*
1936		 * Skylake, Cascacde Lake and Cooper Lake require a quirk on
1937		 * rep movs.
1938		 */
1939		if (c->x86 == 6 && c->x86_model == INTEL_FAM6_SKYLAKE_X)
1940			mce_flags.skx_repmov_quirk = 1;
1941	}
1942
1943	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
1944		/*
1945		 * All newer Zhaoxin CPUs support MCE broadcasting. Enable
1946		 * synchronization with a one second timeout.
1947		 */
1948		if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1949			if (cfg->monarch_timeout < 0)
1950				cfg->monarch_timeout = USEC_PER_SEC;
1951		}
1952	}
1953
1954	if (cfg->monarch_timeout < 0)
1955		cfg->monarch_timeout = 0;
1956	if (cfg->bootlog != 0)
1957		cfg->panic_timeout = 30;
1958
1959	return 0;
1960}
1961
1962static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1963{
1964	if (c->x86 != 5)
1965		return 0;
1966
1967	switch (c->x86_vendor) {
1968	case X86_VENDOR_INTEL:
1969		intel_p5_mcheck_init(c);
1970		mce_flags.p5 = 1;
1971		return 1;
 
1972	case X86_VENDOR_CENTAUR:
1973		winchip_mcheck_init(c);
1974		mce_flags.winchip = 1;
1975		return 1;
 
1976	default:
1977		return 0;
1978	}
1979
1980	return 0;
1981}
1982
1983/*
1984 * Init basic CPU features needed for early decoding of MCEs.
1985 */
1986static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
1987{
1988	if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
1989		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1990		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
1991		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
1992		mce_flags.amd_threshold	 = 1;
 
 
 
 
 
 
1993	}
1994}
1995
1996static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
1997{
1998	struct mca_config *cfg = &mca_cfg;
1999
2000	 /*
2001	  * All newer Centaur CPUs support MCE broadcasting. Enable
2002	  * synchronization with a one second timeout.
2003	  */
2004	if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
2005	     c->x86 > 6) {
2006		if (cfg->monarch_timeout < 0)
2007			cfg->monarch_timeout = USEC_PER_SEC;
2008	}
2009}
2010
2011static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
2012{
2013	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2014
2015	/*
2016	 * These CPUs have MCA bank 8 which reports only one error type called
2017	 * SVAD (System View Address Decoder). The reporting of that error is
2018	 * controlled by IA32_MC8.CTL.0.
2019	 *
2020	 * If enabled, prefetching on these CPUs will cause SVAD MCE when
2021	 * virtual machines start and result in a system  panic. Always disable
2022	 * bank 8 SVAD error by default.
2023	 */
2024	if ((c->x86 == 7 && c->x86_model == 0x1b) ||
2025	    (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
2026		if (this_cpu_read(mce_num_banks) > 8)
2027			mce_banks[8].ctl = 0;
2028	}
2029
2030	intel_init_cmci();
2031	intel_init_lmce();
2032}
2033
2034static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
2035{
2036	intel_clear_lmce();
2037}
2038
2039static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
2040{
2041	switch (c->x86_vendor) {
2042	case X86_VENDOR_INTEL:
2043		mce_intel_feature_init(c);
 
2044		break;
2045
2046	case X86_VENDOR_AMD: {
2047		mce_amd_feature_init(c);
2048		break;
2049		}
2050
2051	case X86_VENDOR_HYGON:
2052		mce_hygon_feature_init(c);
2053		break;
2054
2055	case X86_VENDOR_CENTAUR:
2056		mce_centaur_feature_init(c);
2057		break;
2058
2059	case X86_VENDOR_ZHAOXIN:
2060		mce_zhaoxin_feature_init(c);
2061		break;
2062
2063	default:
2064		break;
2065	}
2066}
2067
2068static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
2069{
2070	switch (c->x86_vendor) {
2071	case X86_VENDOR_INTEL:
2072		mce_intel_feature_clear(c);
2073		break;
2074
2075	case X86_VENDOR_ZHAOXIN:
2076		mce_zhaoxin_feature_clear(c);
2077		break;
2078
2079	default:
2080		break;
2081	}
2082}
2083
2084static void mce_start_timer(struct timer_list *t)
2085{
2086	unsigned long iv = check_interval * HZ;
2087
2088	if (mca_cfg.ignore_ce || !iv)
2089		return;
2090
2091	this_cpu_write(mce_next_interval, iv);
2092	__start_timer(t, iv);
2093}
2094
2095static void __mcheck_cpu_setup_timer(void)
2096{
2097	struct timer_list *t = this_cpu_ptr(&mce_timer);
2098
2099	timer_setup(t, mce_timer_fn, TIMER_PINNED);
2100}
2101
2102static void __mcheck_cpu_init_timer(void)
2103{
2104	struct timer_list *t = this_cpu_ptr(&mce_timer);
2105
2106	timer_setup(t, mce_timer_fn, TIMER_PINNED);
2107	mce_start_timer(t);
2108}
2109
2110bool filter_mce(struct mce *m)
2111{
2112	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
2113		return amd_filter_mce(m);
2114	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2115		return intel_filter_mce(m);
2116
2117	return false;
2118}
2119
2120static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
 
2121{
2122	irqentry_state_t irq_state;
2123
2124	WARN_ON_ONCE(user_mode(regs));
2125
2126	/*
2127	 * Only required when from kernel mode. See
2128	 * mce_check_crashing_cpu() for details.
2129	 */
2130	if (mca_cfg.initialized && mce_check_crashing_cpu())
2131		return;
2132
2133	irq_state = irqentry_nmi_enter(regs);
2134
2135	do_machine_check(regs);
2136
2137	irqentry_nmi_exit(regs, irq_state);
2138}
2139
2140static __always_inline void exc_machine_check_user(struct pt_regs *regs)
2141{
2142	irqentry_enter_from_user_mode(regs);
2143
2144	do_machine_check(regs);
2145
2146	irqentry_exit_to_user_mode(regs);
2147}
2148
2149#ifdef CONFIG_X86_64
2150/* MCE hit kernel mode */
2151DEFINE_IDTENTRY_MCE(exc_machine_check)
2152{
2153	unsigned long dr7;
2154
2155	dr7 = local_db_save();
2156	exc_machine_check_kernel(regs);
2157	local_db_restore(dr7);
2158}
2159
2160/* The user mode variant. */
2161DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
2162{
2163	unsigned long dr7;
2164
2165	dr7 = local_db_save();
2166	exc_machine_check_user(regs);
2167	local_db_restore(dr7);
2168}
2169#else
2170/* 32bit unified entry point */
2171DEFINE_IDTENTRY_RAW(exc_machine_check)
2172{
2173	unsigned long dr7;
2174
2175	dr7 = local_db_save();
2176	if (user_mode(regs))
2177		exc_machine_check_user(regs);
2178	else
2179		exc_machine_check_kernel(regs);
2180	local_db_restore(dr7);
2181}
2182#endif
2183
2184/*
2185 * Called for each booted CPU to set up machine checks.
2186 * Must be called with preempt off:
2187 */
2188void mcheck_cpu_init(struct cpuinfo_x86 *c)
2189{
2190	if (mca_cfg.disabled)
2191		return;
2192
2193	if (__mcheck_cpu_ancient_init(c))
2194		return;
2195
2196	if (!mce_available(c))
2197		return;
2198
2199	__mcheck_cpu_cap_init();
2200
2201	if (__mcheck_cpu_apply_quirks(c) < 0) {
2202		mca_cfg.disabled = 1;
2203		return;
2204	}
2205
2206	if (mce_gen_pool_init()) {
2207		mca_cfg.disabled = 1;
2208		pr_emerg("Couldn't allocate MCE records pool!\n");
2209		return;
2210	}
2211
2212	mca_cfg.initialized = 1;
2213
2214	__mcheck_cpu_init_early(c);
2215	__mcheck_cpu_init_generic();
2216	__mcheck_cpu_init_vendor(c);
2217	__mcheck_cpu_init_clear_banks();
2218	__mcheck_cpu_check_banks();
2219	__mcheck_cpu_setup_timer();
2220}
2221
2222/*
2223 * Called for each booted CPU to clear some machine checks opt-ins
2224 */
2225void mcheck_cpu_clear(struct cpuinfo_x86 *c)
2226{
2227	if (mca_cfg.disabled)
2228		return;
2229
2230	if (!mce_available(c))
2231		return;
2232
2233	/*
2234	 * Possibly to clear general settings generic to x86
2235	 * __mcheck_cpu_clear_generic(c);
2236	 */
2237	__mcheck_cpu_clear_vendor(c);
2238
2239}
2240
2241static void __mce_disable_bank(void *arg)
2242{
2243	int bank = *((int *)arg);
2244	__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2245	cmci_disable_bank(bank);
2246}
2247
2248void mce_disable_bank(int bank)
2249{
2250	if (bank >= this_cpu_read(mce_num_banks)) {
2251		pr_warn(FW_BUG
2252			"Ignoring request to disable invalid MCA bank %d.\n",
2253			bank);
2254		return;
2255	}
2256	set_bit(bank, mce_banks_ce_disabled);
2257	on_each_cpu(__mce_disable_bank, &bank, 1);
2258}
2259
2260/*
2261 * mce=off Disables machine check
2262 * mce=no_cmci Disables CMCI
2263 * mce=no_lmce Disables LMCE
2264 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2265 * mce=print_all Print all machine check logs to console
2266 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2267 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2268 *	monarchtimeout is how long to wait for other CPUs on machine
2269 *	check, or 0 to not wait
2270 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
2271	and older.
2272 * mce=nobootlog Don't log MCEs from before booting.
2273 * mce=bios_cmci_threshold Don't program the CMCI threshold
2274 * mce=recovery force enable copy_mc_fragile()
2275 */
2276static int __init mcheck_enable(char *str)
2277{
2278	struct mca_config *cfg = &mca_cfg;
2279
2280	if (*str == 0) {
2281		enable_p5_mce();
2282		return 1;
2283	}
2284	if (*str == '=')
2285		str++;
2286	if (!strcmp(str, "off"))
2287		cfg->disabled = 1;
2288	else if (!strcmp(str, "no_cmci"))
2289		cfg->cmci_disabled = true;
2290	else if (!strcmp(str, "no_lmce"))
2291		cfg->lmce_disabled = 1;
2292	else if (!strcmp(str, "dont_log_ce"))
2293		cfg->dont_log_ce = true;
2294	else if (!strcmp(str, "print_all"))
2295		cfg->print_all = true;
2296	else if (!strcmp(str, "ignore_ce"))
2297		cfg->ignore_ce = true;
2298	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2299		cfg->bootlog = (str[0] == 'b');
2300	else if (!strcmp(str, "bios_cmci_threshold"))
2301		cfg->bios_cmci_threshold = 1;
2302	else if (!strcmp(str, "recovery"))
2303		cfg->recovery = 1;
2304	else if (isdigit(str[0]))
2305		get_option(&str, &(cfg->monarch_timeout));
2306	else {
 
2307		pr_info("mce argument %s ignored. Please use /sys\n", str);
2308		return 0;
2309	}
2310	return 1;
2311}
2312__setup("mce", mcheck_enable);
2313
2314int __init mcheck_init(void)
2315{
2316	mce_register_decode_chain(&early_nb);
2317	mce_register_decode_chain(&mce_uc_nb);
 
2318	mce_register_decode_chain(&mce_default_nb);
 
2319
2320	INIT_WORK(&mce_work, mce_gen_pool_process);
2321	init_irq_work(&mce_irq_work, mce_irq_work_cb);
2322
2323	return 0;
2324}
2325
2326/*
2327 * mce_syscore: PM support
2328 */
2329
2330/*
2331 * Disable machine checks on suspend and shutdown. We can't really handle
2332 * them later.
2333 */
2334static void mce_disable_error_reporting(void)
2335{
2336	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2337	int i;
2338
2339	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2340		struct mce_bank *b = &mce_banks[i];
2341
2342		if (b->init)
2343			wrmsrl(mca_msr_reg(i, MCA_CTL), 0);
2344	}
2345	return;
2346}
2347
2348static void vendor_disable_error_reporting(void)
2349{
2350	/*
2351	 * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these
2352	 * MSRs are socket-wide. Disabling them for just a single offlined CPU
2353	 * is bad, since it will inhibit reporting for all shared resources on
2354	 * the socket like the last level cache (LLC), the integrated memory
2355	 * controller (iMC), etc.
2356	 */
2357	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
2358	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
2359	    boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2360	    boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
2361		return;
2362
2363	mce_disable_error_reporting();
2364}
2365
2366static int mce_syscore_suspend(void)
2367{
2368	vendor_disable_error_reporting();
2369	return 0;
2370}
2371
2372static void mce_syscore_shutdown(void)
2373{
2374	vendor_disable_error_reporting();
2375}
2376
2377/*
2378 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2379 * Only one CPU is active at this time, the others get re-added later using
2380 * CPU hotplug:
2381 */
2382static void mce_syscore_resume(void)
2383{
2384	__mcheck_cpu_init_generic();
2385	__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2386	__mcheck_cpu_init_clear_banks();
2387}
2388
2389static struct syscore_ops mce_syscore_ops = {
2390	.suspend	= mce_syscore_suspend,
2391	.shutdown	= mce_syscore_shutdown,
2392	.resume		= mce_syscore_resume,
2393};
2394
2395/*
2396 * mce_device: Sysfs support
2397 */
2398
2399static void mce_cpu_restart(void *data)
2400{
2401	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2402		return;
2403	__mcheck_cpu_init_generic();
2404	__mcheck_cpu_init_clear_banks();
2405	__mcheck_cpu_init_timer();
2406}
2407
2408/* Reinit MCEs after user configuration changes */
2409static void mce_restart(void)
2410{
2411	mce_timer_delete_all();
2412	on_each_cpu(mce_cpu_restart, NULL, 1);
2413	mce_schedule_work();
2414}
2415
2416/* Toggle features for corrected errors */
2417static void mce_disable_cmci(void *data)
2418{
2419	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2420		return;
2421	cmci_clear();
2422}
2423
2424static void mce_enable_ce(void *all)
2425{
2426	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2427		return;
2428	cmci_reenable();
2429	cmci_recheck();
2430	if (all)
2431		__mcheck_cpu_init_timer();
2432}
2433
2434static struct bus_type mce_subsys = {
2435	.name		= "machinecheck",
2436	.dev_name	= "machinecheck",
2437};
2438
2439DEFINE_PER_CPU(struct device *, mce_device);
2440
2441static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr)
2442{
2443	return container_of(attr, struct mce_bank_dev, attr);
2444}
2445
2446static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2447			 char *buf)
2448{
2449	u8 bank = attr_to_bank(attr)->bank;
2450	struct mce_bank *b;
2451
2452	if (bank >= per_cpu(mce_num_banks, s->id))
2453		return -EINVAL;
2454
2455	b = &per_cpu(mce_banks_array, s->id)[bank];
2456
2457	if (!b->init)
2458		return -ENODEV;
2459
2460	return sprintf(buf, "%llx\n", b->ctl);
2461}
2462
2463static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2464			const char *buf, size_t size)
2465{
2466	u8 bank = attr_to_bank(attr)->bank;
2467	struct mce_bank *b;
2468	u64 new;
2469
2470	if (kstrtou64(buf, 0, &new) < 0)
2471		return -EINVAL;
2472
2473	if (bank >= per_cpu(mce_num_banks, s->id))
2474		return -EINVAL;
2475
2476	b = &per_cpu(mce_banks_array, s->id)[bank];
2477
2478	if (!b->init)
2479		return -ENODEV;
2480
2481	b->ctl = new;
2482	mce_restart();
2483
2484	return size;
2485}
2486
2487static ssize_t set_ignore_ce(struct device *s,
2488			     struct device_attribute *attr,
2489			     const char *buf, size_t size)
2490{
2491	u64 new;
2492
2493	if (kstrtou64(buf, 0, &new) < 0)
2494		return -EINVAL;
2495
2496	mutex_lock(&mce_sysfs_mutex);
2497	if (mca_cfg.ignore_ce ^ !!new) {
2498		if (new) {
2499			/* disable ce features */
2500			mce_timer_delete_all();
2501			on_each_cpu(mce_disable_cmci, NULL, 1);
2502			mca_cfg.ignore_ce = true;
2503		} else {
2504			/* enable ce features */
2505			mca_cfg.ignore_ce = false;
2506			on_each_cpu(mce_enable_ce, (void *)1, 1);
2507		}
2508	}
2509	mutex_unlock(&mce_sysfs_mutex);
2510
2511	return size;
2512}
2513
2514static ssize_t set_cmci_disabled(struct device *s,
2515				 struct device_attribute *attr,
2516				 const char *buf, size_t size)
2517{
2518	u64 new;
2519
2520	if (kstrtou64(buf, 0, &new) < 0)
2521		return -EINVAL;
2522
2523	mutex_lock(&mce_sysfs_mutex);
2524	if (mca_cfg.cmci_disabled ^ !!new) {
2525		if (new) {
2526			/* disable cmci */
2527			on_each_cpu(mce_disable_cmci, NULL, 1);
2528			mca_cfg.cmci_disabled = true;
2529		} else {
2530			/* enable cmci */
2531			mca_cfg.cmci_disabled = false;
2532			on_each_cpu(mce_enable_ce, NULL, 1);
2533		}
2534	}
2535	mutex_unlock(&mce_sysfs_mutex);
2536
2537	return size;
2538}
2539
2540static ssize_t store_int_with_restart(struct device *s,
2541				      struct device_attribute *attr,
2542				      const char *buf, size_t size)
2543{
2544	unsigned long old_check_interval = check_interval;
2545	ssize_t ret = device_store_ulong(s, attr, buf, size);
2546
2547	if (check_interval == old_check_interval)
2548		return ret;
2549
2550	mutex_lock(&mce_sysfs_mutex);
2551	mce_restart();
2552	mutex_unlock(&mce_sysfs_mutex);
2553
2554	return ret;
2555}
2556
 
2557static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2558static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2559static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all);
2560
2561static struct dev_ext_attribute dev_attr_check_interval = {
2562	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2563	&check_interval
2564};
2565
2566static struct dev_ext_attribute dev_attr_ignore_ce = {
2567	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2568	&mca_cfg.ignore_ce
2569};
2570
2571static struct dev_ext_attribute dev_attr_cmci_disabled = {
2572	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2573	&mca_cfg.cmci_disabled
2574};
2575
2576static struct device_attribute *mce_device_attrs[] = {
 
2577	&dev_attr_check_interval.attr,
2578#ifdef CONFIG_X86_MCELOG_LEGACY
2579	&dev_attr_trigger,
2580#endif
2581	&dev_attr_monarch_timeout.attr,
2582	&dev_attr_dont_log_ce.attr,
2583	&dev_attr_print_all.attr,
2584	&dev_attr_ignore_ce.attr,
2585	&dev_attr_cmci_disabled.attr,
2586	NULL
2587};
2588
2589static cpumask_var_t mce_device_initialized;
2590
2591static void mce_device_release(struct device *dev)
2592{
2593	kfree(dev);
2594}
2595
2596/* Per CPU device init. All of the CPUs still share the same bank device: */
2597static int mce_device_create(unsigned int cpu)
2598{
2599	struct device *dev;
2600	int err;
2601	int i, j;
2602
 
 
 
2603	dev = per_cpu(mce_device, cpu);
2604	if (dev)
2605		return 0;
2606
2607	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2608	if (!dev)
2609		return -ENOMEM;
2610	dev->id  = cpu;
2611	dev->bus = &mce_subsys;
2612	dev->release = &mce_device_release;
2613
2614	err = device_register(dev);
2615	if (err) {
2616		put_device(dev);
2617		return err;
2618	}
2619
2620	for (i = 0; mce_device_attrs[i]; i++) {
2621		err = device_create_file(dev, mce_device_attrs[i]);
2622		if (err)
2623			goto error;
2624	}
2625	for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
2626		err = device_create_file(dev, &mce_bank_devs[j].attr);
2627		if (err)
2628			goto error2;
2629	}
2630	cpumask_set_cpu(cpu, mce_device_initialized);
2631	per_cpu(mce_device, cpu) = dev;
2632
2633	return 0;
2634error2:
2635	while (--j >= 0)
2636		device_remove_file(dev, &mce_bank_devs[j].attr);
2637error:
2638	while (--i >= 0)
2639		device_remove_file(dev, mce_device_attrs[i]);
2640
2641	device_unregister(dev);
2642
2643	return err;
2644}
2645
2646static void mce_device_remove(unsigned int cpu)
2647{
2648	struct device *dev = per_cpu(mce_device, cpu);
2649	int i;
2650
2651	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2652		return;
2653
2654	for (i = 0; mce_device_attrs[i]; i++)
2655		device_remove_file(dev, mce_device_attrs[i]);
2656
2657	for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
2658		device_remove_file(dev, &mce_bank_devs[i].attr);
2659
2660	device_unregister(dev);
2661	cpumask_clear_cpu(cpu, mce_device_initialized);
2662	per_cpu(mce_device, cpu) = NULL;
2663}
2664
2665/* Make sure there are no machine checks on offlined CPUs. */
2666static void mce_disable_cpu(void)
2667{
2668	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2669		return;
2670
2671	if (!cpuhp_tasks_frozen)
2672		cmci_clear();
2673
2674	vendor_disable_error_reporting();
2675}
2676
2677static void mce_reenable_cpu(void)
2678{
2679	struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2680	int i;
2681
2682	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2683		return;
2684
2685	if (!cpuhp_tasks_frozen)
2686		cmci_reenable();
2687	for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2688		struct mce_bank *b = &mce_banks[i];
2689
2690		if (b->init)
2691			wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
2692	}
2693}
2694
2695static int mce_cpu_dead(unsigned int cpu)
2696{
 
 
2697	/* intentionally ignoring frozen here */
2698	if (!cpuhp_tasks_frozen)
2699		cmci_rediscover();
2700	return 0;
2701}
2702
2703static int mce_cpu_online(unsigned int cpu)
2704{
2705	struct timer_list *t = this_cpu_ptr(&mce_timer);
2706	int ret;
2707
2708	mce_device_create(cpu);
2709
2710	ret = mce_threshold_create_device(cpu);
2711	if (ret) {
2712		mce_device_remove(cpu);
2713		return ret;
2714	}
2715	mce_reenable_cpu();
2716	mce_start_timer(t);
2717	return 0;
2718}
2719
2720static int mce_cpu_pre_down(unsigned int cpu)
2721{
2722	struct timer_list *t = this_cpu_ptr(&mce_timer);
2723
2724	mce_disable_cpu();
2725	del_timer_sync(t);
2726	mce_threshold_remove_device(cpu);
2727	mce_device_remove(cpu);
2728	return 0;
2729}
2730
2731static __init void mce_init_banks(void)
2732{
2733	int i;
2734
2735	for (i = 0; i < MAX_NR_BANKS; i++) {
2736		struct mce_bank_dev *b = &mce_bank_devs[i];
2737		struct device_attribute *a = &b->attr;
2738
2739		b->bank = i;
2740
2741		sysfs_attr_init(&a->attr);
2742		a->attr.name	= b->attrname;
2743		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2744
2745		a->attr.mode	= 0644;
2746		a->show		= show_bank;
2747		a->store	= set_bank;
2748	}
2749}
2750
2751/*
2752 * When running on XEN, this initcall is ordered against the XEN mcelog
2753 * initcall:
2754 *
2755 *   device_initcall(xen_late_init_mcelog);
2756 *   device_initcall_sync(mcheck_init_device);
2757 */
2758static __init int mcheck_init_device(void)
2759{
2760	int err;
2761
2762	/*
2763	 * Check if we have a spare virtual bit. This will only become
2764	 * a problem if/when we move beyond 5-level page tables.
2765	 */
2766	MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2767
2768	if (!mce_available(&boot_cpu_data)) {
2769		err = -EIO;
2770		goto err_out;
2771	}
2772
2773	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2774		err = -ENOMEM;
2775		goto err_out;
2776	}
2777
2778	mce_init_banks();
2779
2780	err = subsys_system_register(&mce_subsys, NULL);
2781	if (err)
2782		goto err_out_mem;
2783
2784	err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2785				mce_cpu_dead);
2786	if (err)
2787		goto err_out_mem;
2788
2789	/*
2790	 * Invokes mce_cpu_online() on all CPUs which are online when
2791	 * the state is installed.
2792	 */
2793	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2794				mce_cpu_online, mce_cpu_pre_down);
2795	if (err < 0)
2796		goto err_out_online;
2797
2798	register_syscore_ops(&mce_syscore_ops);
2799
2800	return 0;
2801
2802err_out_online:
2803	cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2804
2805err_out_mem:
2806	free_cpumask_var(mce_device_initialized);
2807
2808err_out:
2809	pr_err("Unable to init MCE device (rc: %d)\n", err);
2810
2811	return err;
2812}
2813device_initcall_sync(mcheck_init_device);
2814
2815/*
2816 * Old style boot options parsing. Only for compatibility.
2817 */
2818static int __init mcheck_disable(char *str)
2819{
2820	mca_cfg.disabled = 1;
2821	return 1;
2822}
2823__setup("nomce", mcheck_disable);
2824
2825#ifdef CONFIG_DEBUG_FS
2826struct dentry *mce_get_debugfs_dir(void)
2827{
2828	static struct dentry *dmce;
2829
2830	if (!dmce)
2831		dmce = debugfs_create_dir("mce", NULL);
2832
2833	return dmce;
2834}
2835
2836static void mce_reset(void)
2837{
 
2838	atomic_set(&mce_fake_panicked, 0);
2839	atomic_set(&mce_executing, 0);
2840	atomic_set(&mce_callin, 0);
2841	atomic_set(&global_nwo, 0);
2842	cpumask_setall(&mce_missing_cpus);
2843}
2844
2845static int fake_panic_get(void *data, u64 *val)
2846{
2847	*val = fake_panic;
2848	return 0;
2849}
2850
2851static int fake_panic_set(void *data, u64 val)
2852{
2853	mce_reset();
2854	fake_panic = val;
2855	return 0;
2856}
2857
2858DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
2859			 "%llu\n");
2860
2861static void __init mcheck_debugfs_init(void)
2862{
2863	struct dentry *dmce;
2864
2865	dmce = mce_get_debugfs_dir();
2866	debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL,
2867				   &fake_panic_fops);
2868}
2869#else
2870static void __init mcheck_debugfs_init(void) { }
2871#endif
2872
 
 
 
2873static int __init mcheck_late_init(void)
2874{
2875	if (mca_cfg.recovery)
2876		enable_copy_mc_fragile();
2877
2878	mcheck_debugfs_init();
 
2879
2880	/*
2881	 * Flush out everything that has been logged during early boot, now that
2882	 * everything has been initialized (workqueues, decoders, ...).
2883	 */
2884	mce_schedule_work();
2885
2886	return 0;
2887}
2888late_initcall(mcheck_late_init);