Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * APEI Generic Hardware Error Source support
   4 *
   5 * Generic Hardware Error Source provides a way to report platform
   6 * hardware errors (such as that from chipset). It works in so called
   7 * "Firmware First" mode, that is, hardware errors are reported to
   8 * firmware firstly, then reported to Linux by firmware. This way,
   9 * some non-standard hardware error registers or non-standard hardware
  10 * link can be checked by firmware to produce more hardware error
  11 * information for Linux.
  12 *
  13 * For more information about Generic Hardware Error Source, please
  14 * refer to ACPI Specification version 4.0, section 17.3.2.6
  15 *
  16 * Copyright 2010,2011 Intel Corp.
  17 *   Author: Huang Ying <ying.huang@intel.com>
  18 */
  19
  20#include <linux/arm_sdei.h>
  21#include <linux/kernel.h>
  22#include <linux/moduleparam.h>
  23#include <linux/init.h>
  24#include <linux/acpi.h>
  25#include <linux/io.h>
  26#include <linux/interrupt.h>
  27#include <linux/timer.h>
  28#include <linux/cper.h>
  29#include <linux/cleanup.h>
  30#include <linux/platform_device.h>
  31#include <linux/mutex.h>
  32#include <linux/ratelimit.h>
  33#include <linux/vmalloc.h>
  34#include <linux/irq_work.h>
  35#include <linux/llist.h>
  36#include <linux/genalloc.h>
  37#include <linux/kfifo.h>
  38#include <linux/pci.h>
  39#include <linux/pfn.h>
  40#include <linux/aer.h>
  41#include <linux/nmi.h>
  42#include <linux/sched/clock.h>
  43#include <linux/uuid.h>
  44#include <linux/ras.h>
  45#include <linux/task_work.h>
  46
  47#include <acpi/actbl1.h>
  48#include <acpi/ghes.h>
  49#include <acpi/apei.h>
  50#include <asm/fixmap.h>
  51#include <asm/tlbflush.h>
  52#include <cxl/event.h>
  53#include <ras/ras_event.h>
  54
  55#include "apei-internal.h"
  56
  57#define GHES_PFX	"GHES: "
  58
  59#define GHES_ESTATUS_MAX_SIZE		65536
  60#define GHES_ESOURCE_PREALLOC_MAX_SIZE	65536
  61
  62#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
  63
  64/* This is just an estimation for memory pool allocation */
  65#define GHES_ESTATUS_CACHE_AVG_SIZE	512
  66
  67#define GHES_ESTATUS_CACHES_SIZE	4
  68
  69#define GHES_ESTATUS_IN_CACHE_MAX_NSEC	10000000000ULL
  70/* Prevent too many caches are allocated because of RCU */
  71#define GHES_ESTATUS_CACHE_ALLOCED_MAX	(GHES_ESTATUS_CACHES_SIZE * 3 / 2)
  72
  73#define GHES_ESTATUS_CACHE_LEN(estatus_len)			\
  74	(sizeof(struct ghes_estatus_cache) + (estatus_len))
  75#define GHES_ESTATUS_FROM_CACHE(estatus_cache)			\
  76	((struct acpi_hest_generic_status *)				\
  77	 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
  78
  79#define GHES_ESTATUS_NODE_LEN(estatus_len)			\
  80	(sizeof(struct ghes_estatus_node) + (estatus_len))
  81#define GHES_ESTATUS_FROM_NODE(estatus_node)			\
  82	((struct acpi_hest_generic_status *)				\
  83	 ((struct ghes_estatus_node *)(estatus_node) + 1))
  84
  85#define GHES_VENDOR_ENTRY_LEN(gdata_len)                               \
  86	(sizeof(struct ghes_vendor_record_entry) + (gdata_len))
  87#define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry)                     \
  88	((struct acpi_hest_generic_data *)                              \
  89	((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
  90
  91/*
  92 *  NMI-like notifications vary by architecture, before the compiler can prune
  93 *  unused static functions it needs a value for these enums.
  94 */
  95#ifndef CONFIG_ARM_SDE_INTERFACE
  96#define FIX_APEI_GHES_SDEI_NORMAL	__end_of_fixed_addresses
  97#define FIX_APEI_GHES_SDEI_CRITICAL	__end_of_fixed_addresses
  98#endif
  99
 100static ATOMIC_NOTIFIER_HEAD(ghes_report_chain);
 101
 102static inline bool is_hest_type_generic_v2(struct ghes *ghes)
 103{
 104	return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
 105}
 106
 107/*
 108 * A platform may describe one error source for the handling of synchronous
 109 * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI
 110 * or External Interrupt). On x86, the HEST notifications are always
 111 * asynchronous, so only SEA on ARM is delivered as a synchronous
 112 * notification.
 113 */
 114static inline bool is_hest_sync_notify(struct ghes *ghes)
 115{
 116	u8 notify_type = ghes->generic->notify.type;
 117
 118	return notify_type == ACPI_HEST_NOTIFY_SEA;
 119}
 120
 121/*
 122 * This driver isn't really modular, however for the time being,
 123 * continuing to use module_param is the easiest way to remain
 124 * compatible with existing boot arg use cases.
 125 */
 126bool ghes_disable;
 127module_param_named(disable, ghes_disable, bool, 0);
 128
 129/*
 130 * "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform
 131 * check.
 132 */
 133static bool ghes_edac_force_enable;
 134module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0);
 135
 136/*
 137 * All error sources notified with HED (Hardware Error Device) share a
 138 * single notifier callback, so they need to be linked and checked one
 139 * by one. This holds true for NMI too.
 140 *
 141 * RCU is used for these lists, so ghes_list_mutex is only used for
 142 * list changing, not for traversing.
 143 */
 144static LIST_HEAD(ghes_hed);
 145static DEFINE_MUTEX(ghes_list_mutex);
 146
 147/*
 148 * A list of GHES devices which are given to the corresponding EDAC driver
 149 * ghes_edac for further use.
 150 */
 151static LIST_HEAD(ghes_devs);
 152static DEFINE_MUTEX(ghes_devs_mutex);
 153
 154/*
 155 * Because the memory area used to transfer hardware error information
 156 * from BIOS to Linux can be determined only in NMI, IRQ or timer
 157 * handler, but general ioremap can not be used in atomic context, so
 158 * the fixmap is used instead.
 159 *
 160 * This spinlock is used to prevent the fixmap entry from being used
 161 * simultaneously.
 162 */
 163static DEFINE_SPINLOCK(ghes_notify_lock_irq);
 164
 165struct ghes_vendor_record_entry {
 166	struct work_struct work;
 167	int error_severity;
 168	char vendor_record[];
 169};
 170
 171static struct gen_pool *ghes_estatus_pool;
 
 172
 173static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
 174static atomic_t ghes_estatus_cache_alloced;
 175
 
 
 176static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
 177{
 178	phys_addr_t paddr;
 179	pgprot_t prot;
 180
 181	paddr = PFN_PHYS(pfn);
 182	prot = arch_apei_get_mem_attribute(paddr);
 183	__set_fixmap(fixmap_idx, paddr, prot);
 184
 185	return (void __iomem *) __fix_to_virt(fixmap_idx);
 186}
 187
 188static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
 189{
 190	int _idx = virt_to_fix((unsigned long)vaddr);
 191
 192	WARN_ON_ONCE(fixmap_idx != _idx);
 193	clear_fixmap(fixmap_idx);
 194}
 195
 196int ghes_estatus_pool_init(unsigned int num_ghes)
 197{
 198	unsigned long addr, len;
 199	int rc;
 200
 201	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
 202	if (!ghes_estatus_pool)
 203		return -ENOMEM;
 204
 205	len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
 206	len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
 207
 
 208	addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
 209	if (!addr)
 210		goto err_pool_alloc;
 211
 212	rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
 213	if (rc)
 214		goto err_pool_add;
 215
 216	return 0;
 217
 218err_pool_add:
 219	vfree((void *)addr);
 220
 221err_pool_alloc:
 222	gen_pool_destroy(ghes_estatus_pool);
 223
 224	return -ENOMEM;
 225}
 226
 227/**
 228 * ghes_estatus_pool_region_free - free previously allocated memory
 229 *				   from the ghes_estatus_pool.
 230 * @addr: address of memory to free.
 231 * @size: size of memory to free.
 232 *
 233 * Returns none.
 234 */
 235void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
 236{
 237	gen_pool_free(ghes_estatus_pool, addr, size);
 238}
 239EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
 240
 241static int map_gen_v2(struct ghes *ghes)
 242{
 243	return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
 244}
 245
 246static void unmap_gen_v2(struct ghes *ghes)
 247{
 248	apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
 249}
 250
 251static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
 252{
 253	int rc;
 254	u64 val = 0;
 255
 256	rc = apei_read(&val, &gv2->read_ack_register);
 257	if (rc)
 258		return;
 259
 260	val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
 261	val |= gv2->read_ack_write    << gv2->read_ack_register.bit_offset;
 262
 263	apei_write(val, &gv2->read_ack_register);
 264}
 265
 266static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 267{
 268	struct ghes *ghes;
 269	unsigned int error_block_length;
 270	int rc;
 271
 272	ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
 273	if (!ghes)
 274		return ERR_PTR(-ENOMEM);
 275
 276	ghes->generic = generic;
 277	if (is_hest_type_generic_v2(ghes)) {
 278		rc = map_gen_v2(ghes);
 279		if (rc)
 280			goto err_free;
 281	}
 282
 283	rc = apei_map_generic_address(&generic->error_status_address);
 284	if (rc)
 285		goto err_unmap_read_ack_addr;
 286	error_block_length = generic->error_block_length;
 287	if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
 288		pr_warn(FW_WARN GHES_PFX
 289			"Error status block length is too long: %u for "
 290			"generic hardware error source: %d.\n",
 291			error_block_length, generic->header.source_id);
 292		error_block_length = GHES_ESTATUS_MAX_SIZE;
 293	}
 294	ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
 295	if (!ghes->estatus) {
 296		rc = -ENOMEM;
 297		goto err_unmap_status_addr;
 298	}
 299
 300	return ghes;
 301
 302err_unmap_status_addr:
 303	apei_unmap_generic_address(&generic->error_status_address);
 304err_unmap_read_ack_addr:
 305	if (is_hest_type_generic_v2(ghes))
 306		unmap_gen_v2(ghes);
 307err_free:
 308	kfree(ghes);
 309	return ERR_PTR(rc);
 310}
 311
 312static void ghes_fini(struct ghes *ghes)
 313{
 314	kfree(ghes->estatus);
 315	apei_unmap_generic_address(&ghes->generic->error_status_address);
 316	if (is_hest_type_generic_v2(ghes))
 317		unmap_gen_v2(ghes);
 318}
 319
 320static inline int ghes_severity(int severity)
 321{
 322	switch (severity) {
 323	case CPER_SEV_INFORMATIONAL:
 324		return GHES_SEV_NO;
 325	case CPER_SEV_CORRECTED:
 326		return GHES_SEV_CORRECTED;
 327	case CPER_SEV_RECOVERABLE:
 328		return GHES_SEV_RECOVERABLE;
 329	case CPER_SEV_FATAL:
 330		return GHES_SEV_PANIC;
 331	default:
 332		/* Unknown, go panic */
 333		return GHES_SEV_PANIC;
 334	}
 335}
 336
 337static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
 338				  int from_phys,
 339				  enum fixed_addresses fixmap_idx)
 340{
 341	void __iomem *vaddr;
 342	u64 offset;
 343	u32 trunk;
 344
 345	while (len > 0) {
 346		offset = paddr - (paddr & PAGE_MASK);
 347		vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
 348		trunk = PAGE_SIZE - offset;
 349		trunk = min(trunk, len);
 350		if (from_phys)
 351			memcpy_fromio(buffer, vaddr + offset, trunk);
 352		else
 353			memcpy_toio(vaddr + offset, buffer, trunk);
 354		len -= trunk;
 355		paddr += trunk;
 356		buffer += trunk;
 357		ghes_unmap(vaddr, fixmap_idx);
 358	}
 359}
 360
 361/* Check the top-level record header has an appropriate size. */
 362static int __ghes_check_estatus(struct ghes *ghes,
 363				struct acpi_hest_generic_status *estatus)
 364{
 365	u32 len = cper_estatus_len(estatus);
 366
 367	if (len < sizeof(*estatus)) {
 368		pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
 369		return -EIO;
 370	}
 371
 372	if (len > ghes->generic->error_block_length) {
 373		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
 374		return -EIO;
 375	}
 376
 377	if (cper_estatus_check_header(estatus)) {
 378		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
 379		return -EIO;
 380	}
 381
 382	return 0;
 383}
 384
 385/* Read the CPER block, returning its address, and header in estatus. */
 386static int __ghes_peek_estatus(struct ghes *ghes,
 387			       struct acpi_hest_generic_status *estatus,
 388			       u64 *buf_paddr, enum fixed_addresses fixmap_idx)
 389{
 390	struct acpi_hest_generic *g = ghes->generic;
 391	int rc;
 392
 393	rc = apei_read(buf_paddr, &g->error_status_address);
 394	if (rc) {
 395		*buf_paddr = 0;
 396		pr_warn_ratelimited(FW_WARN GHES_PFX
 397"Failed to read error status block address for hardware error source: %d.\n",
 398				   g->header.source_id);
 399		return -EIO;
 400	}
 401	if (!*buf_paddr)
 402		return -ENOENT;
 403
 404	ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
 405			      fixmap_idx);
 406	if (!estatus->block_status) {
 407		*buf_paddr = 0;
 408		return -ENOENT;
 409	}
 410
 411	return 0;
 412}
 413
 414static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
 415			       u64 buf_paddr, enum fixed_addresses fixmap_idx,
 416			       size_t buf_len)
 417{
 418	ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
 419	if (cper_estatus_check(estatus)) {
 420		pr_warn_ratelimited(FW_WARN GHES_PFX
 421				    "Failed to read error status block!\n");
 422		return -EIO;
 423	}
 424
 425	return 0;
 426}
 427
 428static int ghes_read_estatus(struct ghes *ghes,
 429			     struct acpi_hest_generic_status *estatus,
 430			     u64 *buf_paddr, enum fixed_addresses fixmap_idx)
 431{
 432	int rc;
 433
 434	rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
 435	if (rc)
 436		return rc;
 437
 438	rc = __ghes_check_estatus(ghes, estatus);
 439	if (rc)
 440		return rc;
 441
 442	return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
 443				   cper_estatus_len(estatus));
 444}
 445
 446static void ghes_clear_estatus(struct ghes *ghes,
 447			       struct acpi_hest_generic_status *estatus,
 448			       u64 buf_paddr, enum fixed_addresses fixmap_idx)
 449{
 450	estatus->block_status = 0;
 451
 452	if (!buf_paddr)
 453		return;
 454
 455	ghes_copy_tofrom_phys(estatus, buf_paddr,
 456			      sizeof(estatus->block_status), 0,
 457			      fixmap_idx);
 458
 459	/*
 460	 * GHESv2 type HEST entries introduce support for error acknowledgment,
 461	 * so only acknowledge the error if this support is present.
 462	 */
 463	if (is_hest_type_generic_v2(ghes))
 464		ghes_ack_error(ghes->generic_v2);
 465}
 466
 467/*
 468 * Called as task_work before returning to user-space.
 469 * Ensure any queued work has been done before we return to the context that
 470 * triggered the notification.
 471 */
 472static void ghes_kick_task_work(struct callback_head *head)
 473{
 474	struct acpi_hest_generic_status *estatus;
 475	struct ghes_estatus_node *estatus_node;
 476	u32 node_len;
 477
 478	estatus_node = container_of(head, struct ghes_estatus_node, task_work);
 479	if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
 480		memory_failure_queue_kick(estatus_node->task_work_cpu);
 481
 482	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 483	node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
 484	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
 485}
 486
 487static bool ghes_do_memory_failure(u64 physical_addr, int flags)
 488{
 489	unsigned long pfn;
 490
 491	if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
 492		return false;
 493
 494	pfn = PHYS_PFN(physical_addr);
 495	if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
 496		pr_warn_ratelimited(FW_WARN GHES_PFX
 497		"Invalid address in generic error data: %#llx\n",
 498		physical_addr);
 499		return false;
 500	}
 501
 502	memory_failure_queue(pfn, flags);
 503	return true;
 504}
 505
 506static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
 507				       int sev, bool sync)
 508{
 509	int flags = -1;
 510	int sec_sev = ghes_severity(gdata->error_severity);
 511	struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
 512
 513	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
 514		return false;
 515
 516	/* iff following two events can be handled properly by now */
 517	if (sec_sev == GHES_SEV_CORRECTED &&
 518	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
 519		flags = MF_SOFT_OFFLINE;
 520	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
 521		flags = sync ? MF_ACTION_REQUIRED : 0;
 522
 523	if (flags != -1)
 524		return ghes_do_memory_failure(mem_err->physical_addr, flags);
 525
 526	return false;
 527}
 528
 529static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
 530				       int sev, bool sync)
 531{
 532	struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
 533	int flags = sync ? MF_ACTION_REQUIRED : 0;
 534	bool queued = false;
 535	int sec_sev, i;
 536	char *p;
 537
 538	log_arm_hw_error(err);
 539
 540	sec_sev = ghes_severity(gdata->error_severity);
 541	if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
 542		return false;
 543
 544	p = (char *)(err + 1);
 545	for (i = 0; i < err->err_info_num; i++) {
 546		struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
 547		bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
 548		bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
 549		const char *error_type = "unknown error";
 550
 551		/*
 552		 * The field (err_info->error_info & BIT(26)) is fixed to set to
 553		 * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
 554		 * firmware won't mix corrected errors in an uncorrected section,
 555		 * and don't filter out 'corrected' error here.
 556		 */
 557		if (is_cache && has_pa) {
 558			queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags);
 559			p += err_info->length;
 560			continue;
 561		}
 562
 563		if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
 564			error_type = cper_proc_error_type_strs[err_info->type];
 565
 566		pr_warn_ratelimited(FW_WARN GHES_PFX
 567				    "Unhandled processor error type: %s\n",
 568				    error_type);
 569		p += err_info->length;
 570	}
 571
 572	return queued;
 573}
 574
 575/*
 576 * PCIe AER errors need to be sent to the AER driver for reporting and
 577 * recovery. The GHES severities map to the following AER severities and
 578 * require the following handling:
 579 *
 580 * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
 581 *     These need to be reported by the AER driver but no recovery is
 582 *     necessary.
 583 * GHES_SEV_RECOVERABLE -> AER_NONFATAL
 584 * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
 585 *     These both need to be reported and recovered from by the AER driver.
 586 * GHES_SEV_PANIC does not make it to this handling since the kernel must
 587 *     panic.
 588 */
 589static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
 590{
 591#ifdef CONFIG_ACPI_APEI_PCIEAER
 592	struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
 593
 594	if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
 595	    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
 596		unsigned int devfn;
 597		int aer_severity;
 598		u8 *aer_info;
 599
 600		devfn = PCI_DEVFN(pcie_err->device_id.device,
 601				  pcie_err->device_id.function);
 602		aer_severity = cper_severity_to_aer(gdata->error_severity);
 603
 604		/*
 605		 * If firmware reset the component to contain
 606		 * the error, we must reinitialize it before
 607		 * use, so treat it as a fatal AER error.
 608		 */
 609		if (gdata->flags & CPER_SEC_RESET)
 610			aer_severity = AER_FATAL;
 611
 612		aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
 613						  sizeof(struct aer_capability_regs));
 614		if (!aer_info)
 615			return;
 616		memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
 617
 618		aer_recover_queue(pcie_err->device_id.segment,
 619				  pcie_err->device_id.bus,
 620				  devfn, aer_severity,
 621				  (struct aer_capability_regs *)
 622				  aer_info);
 623	}
 624#endif
 625}
 626
 627static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
 628
 629int ghes_register_vendor_record_notifier(struct notifier_block *nb)
 630{
 631	return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
 632}
 633EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
 634
 635void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
 636{
 637	blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
 638}
 639EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
 640
 641static void ghes_vendor_record_work_func(struct work_struct *work)
 642{
 643	struct ghes_vendor_record_entry *entry;
 644	struct acpi_hest_generic_data *gdata;
 645	u32 len;
 646
 647	entry = container_of(work, struct ghes_vendor_record_entry, work);
 648	gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
 649
 650	blocking_notifier_call_chain(&vendor_record_notify_list,
 651				     entry->error_severity, gdata);
 652
 653	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
 654	gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
 655}
 656
 657static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
 658					  int sev)
 659{
 660	struct acpi_hest_generic_data *copied_gdata;
 661	struct ghes_vendor_record_entry *entry;
 662	u32 len;
 663
 664	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
 665	entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
 666	if (!entry)
 667		return;
 668
 669	copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
 670	memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
 671	entry->error_severity = sev;
 672
 673	INIT_WORK(&entry->work, ghes_vendor_record_work_func);
 674	schedule_work(&entry->work);
 675}
 676
 677/* Room for 8 entries for each of the 4 event log queues */
 678#define CXL_CPER_FIFO_DEPTH 32
 679DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
 680
 681/* Synchronize schedule_work() with cxl_cper_work changes */
 682static DEFINE_SPINLOCK(cxl_cper_work_lock);
 683struct work_struct *cxl_cper_work;
 684
 685static void cxl_cper_post_event(enum cxl_event_type event_type,
 686				struct cxl_cper_event_rec *rec)
 687{
 688	struct cxl_cper_work_data wd;
 689
 690	if (rec->hdr.length <= sizeof(rec->hdr) ||
 691	    rec->hdr.length > sizeof(*rec)) {
 692		pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
 693		       rec->hdr.length);
 694		return;
 695	}
 696
 697	if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
 698		pr_err(FW_WARN "CXL CPER invalid event\n");
 699		return;
 700	}
 701
 702	guard(spinlock_irqsave)(&cxl_cper_work_lock);
 703
 704	if (!cxl_cper_work)
 705		return;
 706
 707	wd.event_type = event_type;
 708	memcpy(&wd.rec, rec, sizeof(wd.rec));
 709
 710	if (!kfifo_put(&cxl_cper_fifo, wd)) {
 711		pr_err_ratelimited("CXL CPER kfifo overflow\n");
 712		return;
 713	}
 714
 715	schedule_work(cxl_cper_work);
 716}
 717
 718int cxl_cper_register_work(struct work_struct *work)
 719{
 720	if (cxl_cper_work)
 721		return -EINVAL;
 722
 723	guard(spinlock)(&cxl_cper_work_lock);
 724	cxl_cper_work = work;
 725	return 0;
 726}
 727EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, "CXL");
 728
 729int cxl_cper_unregister_work(struct work_struct *work)
 730{
 731	if (cxl_cper_work != work)
 732		return -EINVAL;
 733
 734	guard(spinlock)(&cxl_cper_work_lock);
 735	cxl_cper_work = NULL;
 736	return 0;
 737}
 738EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, "CXL");
 739
 740int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
 741{
 742	return kfifo_get(&cxl_cper_fifo, wd);
 743}
 744EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
 745
 746static bool ghes_do_proc(struct ghes *ghes,
 747			 const struct acpi_hest_generic_status *estatus)
 748{
 749	int sev, sec_sev;
 750	struct acpi_hest_generic_data *gdata;
 751	guid_t *sec_type;
 752	const guid_t *fru_id = &guid_null;
 753	char *fru_text = "";
 754	bool queued = false;
 755	bool sync = is_hest_sync_notify(ghes);
 756
 757	sev = ghes_severity(estatus->error_severity);
 758	apei_estatus_for_each_section(estatus, gdata) {
 759		sec_type = (guid_t *)gdata->section_type;
 760		sec_sev = ghes_severity(gdata->error_severity);
 761		if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
 762			fru_id = (guid_t *)gdata->fru_id;
 763
 764		if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
 765			fru_text = gdata->fru_text;
 766
 767		if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
 768			struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
 769
 770			atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
 771
 772			arch_apei_report_mem_error(sev, mem_err);
 773			queued = ghes_handle_memory_failure(gdata, sev, sync);
 774		}
 775		else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
 776			ghes_handle_aer(gdata);
 777		}
 778		else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
 779			queued = ghes_handle_arm_hw_error(gdata, sev, sync);
 780		} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
 781			struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
 782
 783			cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
 784		} else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
 785			struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
 786
 787			cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
 788		} else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
 789			struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
 790
 791			cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
 792		} else {
 793			void *err = acpi_hest_get_payload(gdata);
 794
 795			ghes_defer_non_standard_event(gdata, sev);
 796			log_non_standard_event(sec_type, fru_id, fru_text,
 797					       sec_sev, err,
 798					       gdata->error_data_length);
 799		}
 800	}
 801
 802	return queued;
 803}
 804
 805static void __ghes_print_estatus(const char *pfx,
 806				 const struct acpi_hest_generic *generic,
 807				 const struct acpi_hest_generic_status *estatus)
 808{
 809	static atomic_t seqno;
 810	unsigned int curr_seqno;
 811	char pfx_seq[64];
 812
 813	if (pfx == NULL) {
 814		if (ghes_severity(estatus->error_severity) <=
 815		    GHES_SEV_CORRECTED)
 816			pfx = KERN_WARNING;
 817		else
 818			pfx = KERN_ERR;
 819	}
 820	curr_seqno = atomic_inc_return(&seqno);
 821	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
 822	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
 823	       pfx_seq, generic->header.source_id);
 824	cper_estatus_print(pfx_seq, estatus);
 825}
 826
 827static int ghes_print_estatus(const char *pfx,
 828			      const struct acpi_hest_generic *generic,
 829			      const struct acpi_hest_generic_status *estatus)
 830{
 831	/* Not more than 2 messages every 5 seconds */
 832	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
 833	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
 834	struct ratelimit_state *ratelimit;
 835
 836	if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
 837		ratelimit = &ratelimit_corrected;
 838	else
 839		ratelimit = &ratelimit_uncorrected;
 840	if (__ratelimit(ratelimit)) {
 841		__ghes_print_estatus(pfx, generic, estatus);
 842		return 1;
 843	}
 844	return 0;
 845}
 846
 847/*
 848 * GHES error status reporting throttle, to report more kinds of
 849 * errors, instead of just most frequently occurred errors.
 850 */
 851static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
 852{
 853	u32 len;
 854	int i, cached = 0;
 855	unsigned long long now;
 856	struct ghes_estatus_cache *cache;
 857	struct acpi_hest_generic_status *cache_estatus;
 858
 859	len = cper_estatus_len(estatus);
 860	rcu_read_lock();
 861	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
 862		cache = rcu_dereference(ghes_estatus_caches[i]);
 863		if (cache == NULL)
 864			continue;
 865		if (len != cache->estatus_len)
 866			continue;
 867		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
 868		if (memcmp(estatus, cache_estatus, len))
 869			continue;
 870		atomic_inc(&cache->count);
 871		now = sched_clock();
 872		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
 873			cached = 1;
 874		break;
 875	}
 876	rcu_read_unlock();
 877	return cached;
 878}
 879
 880static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
 881	struct acpi_hest_generic *generic,
 882	struct acpi_hest_generic_status *estatus)
 883{
 884	int alloced;
 885	u32 len, cache_len;
 886	struct ghes_estatus_cache *cache;
 887	struct acpi_hest_generic_status *cache_estatus;
 888
 889	alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
 890	if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
 891		atomic_dec(&ghes_estatus_cache_alloced);
 892		return NULL;
 893	}
 894	len = cper_estatus_len(estatus);
 895	cache_len = GHES_ESTATUS_CACHE_LEN(len);
 896	cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
 897	if (!cache) {
 898		atomic_dec(&ghes_estatus_cache_alloced);
 899		return NULL;
 900	}
 901	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
 902	memcpy(cache_estatus, estatus, len);
 903	cache->estatus_len = len;
 904	atomic_set(&cache->count, 0);
 905	cache->generic = generic;
 906	cache->time_in = sched_clock();
 907	return cache;
 908}
 909
 910static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
 911{
 912	struct ghes_estatus_cache *cache;
 913	u32 len;
 914
 915	cache = container_of(head, struct ghes_estatus_cache, rcu);
 916	len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
 917	len = GHES_ESTATUS_CACHE_LEN(len);
 918	gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
 919	atomic_dec(&ghes_estatus_cache_alloced);
 920}
 921
 922static void
 923ghes_estatus_cache_add(struct acpi_hest_generic *generic,
 924		       struct acpi_hest_generic_status *estatus)
 
 
 
 
 
 
 
 
 925{
 926	unsigned long long now, duration, period, max_period = 0;
 927	struct ghes_estatus_cache *cache, *new_cache;
 928	struct ghes_estatus_cache __rcu *victim;
 929	int i, slot = -1, count;
 
 
 930
 931	new_cache = ghes_estatus_cache_alloc(generic, estatus);
 932	if (!new_cache)
 933		return;
 934
 935	rcu_read_lock();
 936	now = sched_clock();
 937	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
 938		cache = rcu_dereference(ghes_estatus_caches[i]);
 939		if (cache == NULL) {
 940			slot = i;
 
 941			break;
 942		}
 943		duration = now - cache->time_in;
 944		if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
 945			slot = i;
 
 946			break;
 947		}
 948		count = atomic_read(&cache->count);
 949		period = duration;
 950		do_div(period, (count + 1));
 951		if (period > max_period) {
 952			max_period = period;
 953			slot = i;
 
 954		}
 955	}
 
 
 
 
 
 
 
 
 956	rcu_read_unlock();
 957
 958	if (slot != -1) {
 959		/*
 960		 * Use release semantics to ensure that ghes_estatus_cached()
 961		 * running on another CPU will see the updated cache fields if
 962		 * it can see the new value of the pointer.
 963		 */
 964		victim = xchg_release(&ghes_estatus_caches[slot],
 965				      RCU_INITIALIZER(new_cache));
 966
 967		/*
 968		 * At this point, victim may point to a cached item different
 969		 * from the one based on which we selected the slot. Instead of
 970		 * going to the loop again to pick another slot, let's just
 971		 * drop the other item anyway: this may cause a false cache
 972		 * miss later on, but that won't cause any problems.
 973		 */
 974		if (victim)
 975			call_rcu(&unrcu_pointer(victim)->rcu,
 976				 ghes_estatus_cache_rcu_free);
 977	}
 978}
 979
 980static void __ghes_panic(struct ghes *ghes,
 981			 struct acpi_hest_generic_status *estatus,
 982			 u64 buf_paddr, enum fixed_addresses fixmap_idx)
 983{
 984	const char *msg = GHES_PFX "Fatal hardware error";
 985
 986	__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
 987
 988	ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
 989
 
 990	if (!panic_timeout)
 991		pr_emerg("%s but panic disabled\n", msg);
 992
 993	panic(msg);
 994}
 995
 996static int ghes_proc(struct ghes *ghes)
 997{
 998	struct acpi_hest_generic_status *estatus = ghes->estatus;
 999	u64 buf_paddr;
1000	int rc;
1001
1002	rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
1003	if (rc)
1004		goto out;
1005
1006	if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
1007		__ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
1008
1009	if (!ghes_estatus_cached(estatus)) {
1010		if (ghes_print_estatus(NULL, ghes->generic, estatus))
1011			ghes_estatus_cache_add(ghes->generic, estatus);
1012	}
1013	ghes_do_proc(ghes, estatus);
1014
1015out:
1016	ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
1017
1018	return rc;
1019}
1020
1021static void ghes_add_timer(struct ghes *ghes)
1022{
1023	struct acpi_hest_generic *g = ghes->generic;
1024	unsigned long expire;
1025
1026	if (!g->notify.poll_interval) {
1027		pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
1028			g->header.source_id);
1029		return;
1030	}
1031	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
1032	ghes->timer.expires = round_jiffies_relative(expire);
1033	add_timer(&ghes->timer);
1034}
1035
1036static void ghes_poll_func(struct timer_list *t)
1037{
1038	struct ghes *ghes = from_timer(ghes, t, timer);
1039	unsigned long flags;
1040
1041	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1042	ghes_proc(ghes);
1043	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1044	if (!(ghes->flags & GHES_EXITING))
1045		ghes_add_timer(ghes);
1046}
1047
1048static irqreturn_t ghes_irq_func(int irq, void *data)
1049{
1050	struct ghes *ghes = data;
1051	unsigned long flags;
1052	int rc;
1053
1054	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1055	rc = ghes_proc(ghes);
1056	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1057	if (rc)
1058		return IRQ_NONE;
1059
1060	return IRQ_HANDLED;
1061}
1062
1063static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
1064			   void *data)
1065{
1066	struct ghes *ghes;
1067	unsigned long flags;
1068	int ret = NOTIFY_DONE;
1069
1070	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1071	rcu_read_lock();
1072	list_for_each_entry_rcu(ghes, &ghes_hed, list) {
1073		if (!ghes_proc(ghes))
1074			ret = NOTIFY_OK;
1075	}
1076	rcu_read_unlock();
1077	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1078
1079	return ret;
1080}
1081
1082static struct notifier_block ghes_notifier_hed = {
1083	.notifier_call = ghes_notify_hed,
1084};
1085
1086/*
1087 * Handlers for CPER records may not be NMI safe. For example,
1088 * memory_failure_queue() takes spinlocks and calls schedule_work_on().
1089 * In any NMI-like handler, memory from ghes_estatus_pool is used to save
1090 * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
1091 * ghes_proc_in_irq() to run in IRQ context where each estatus in
1092 * ghes_estatus_llist is processed.
1093 *
1094 * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
1095 * to suppress frequent messages.
1096 */
1097static struct llist_head ghes_estatus_llist;
1098static struct irq_work ghes_proc_irq_work;
1099
1100static void ghes_proc_in_irq(struct irq_work *irq_work)
1101{
1102	struct llist_node *llnode, *next;
1103	struct ghes_estatus_node *estatus_node;
1104	struct acpi_hest_generic *generic;
1105	struct acpi_hest_generic_status *estatus;
1106	bool task_work_pending;
1107	u32 len, node_len;
1108	int ret;
1109
1110	llnode = llist_del_all(&ghes_estatus_llist);
1111	/*
1112	 * Because the time order of estatus in list is reversed,
1113	 * revert it back to proper order.
1114	 */
1115	llnode = llist_reverse_order(llnode);
1116	while (llnode) {
1117		next = llnode->next;
1118		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1119					   llnode);
1120		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1121		len = cper_estatus_len(estatus);
1122		node_len = GHES_ESTATUS_NODE_LEN(len);
1123		task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
1124		if (!ghes_estatus_cached(estatus)) {
1125			generic = estatus_node->generic;
1126			if (ghes_print_estatus(NULL, generic, estatus))
1127				ghes_estatus_cache_add(generic, estatus);
1128		}
1129
1130		if (task_work_pending && current->mm) {
1131			estatus_node->task_work.func = ghes_kick_task_work;
1132			estatus_node->task_work_cpu = smp_processor_id();
1133			ret = task_work_add(current, &estatus_node->task_work,
1134					    TWA_RESUME);
1135			if (ret)
1136				estatus_node->task_work.func = NULL;
1137		}
1138
1139		if (!estatus_node->task_work.func)
1140			gen_pool_free(ghes_estatus_pool,
1141				      (unsigned long)estatus_node, node_len);
1142
1143		llnode = next;
1144	}
1145}
1146
1147static void ghes_print_queued_estatus(void)
1148{
1149	struct llist_node *llnode;
1150	struct ghes_estatus_node *estatus_node;
1151	struct acpi_hest_generic *generic;
1152	struct acpi_hest_generic_status *estatus;
1153
1154	llnode = llist_del_all(&ghes_estatus_llist);
1155	/*
1156	 * Because the time order of estatus in list is reversed,
1157	 * revert it back to proper order.
1158	 */
1159	llnode = llist_reverse_order(llnode);
1160	while (llnode) {
1161		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1162					   llnode);
1163		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1164		generic = estatus_node->generic;
1165		ghes_print_estatus(NULL, generic, estatus);
1166		llnode = llnode->next;
1167	}
1168}
1169
1170static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
1171				       enum fixed_addresses fixmap_idx)
1172{
1173	struct acpi_hest_generic_status *estatus, tmp_header;
1174	struct ghes_estatus_node *estatus_node;
1175	u32 len, node_len;
1176	u64 buf_paddr;
1177	int sev, rc;
1178
1179	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
1180		return -EOPNOTSUPP;
1181
1182	rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
1183	if (rc) {
1184		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1185		return rc;
1186	}
1187
1188	rc = __ghes_check_estatus(ghes, &tmp_header);
1189	if (rc) {
1190		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1191		return rc;
1192	}
1193
1194	len = cper_estatus_len(&tmp_header);
1195	node_len = GHES_ESTATUS_NODE_LEN(len);
1196	estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1197	if (!estatus_node)
1198		return -ENOMEM;
1199
1200	estatus_node->ghes = ghes;
1201	estatus_node->generic = ghes->generic;
1202	estatus_node->task_work.func = NULL;
1203	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1204
1205	if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1206		ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1207		rc = -ENOENT;
1208		goto no_work;
1209	}
1210
1211	sev = ghes_severity(estatus->error_severity);
1212	if (sev >= GHES_SEV_PANIC) {
1213		ghes_print_queued_estatus();
1214		__ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1215	}
1216
1217	ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1218
1219	/* This error has been reported before, don't process it again. */
1220	if (ghes_estatus_cached(estatus))
1221		goto no_work;
1222
1223	llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1224
1225	return rc;
1226
1227no_work:
1228	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1229		      node_len);
1230
1231	return rc;
1232}
1233
1234static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1235				       enum fixed_addresses fixmap_idx)
1236{
1237	int ret = -ENOENT;
1238	struct ghes *ghes;
1239
1240	rcu_read_lock();
1241	list_for_each_entry_rcu(ghes, rcu_list, list) {
1242		if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1243			ret = 0;
1244	}
1245	rcu_read_unlock();
1246
1247	if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1248		irq_work_queue(&ghes_proc_irq_work);
1249
1250	return ret;
1251}
1252
1253#ifdef CONFIG_ACPI_APEI_SEA
1254static LIST_HEAD(ghes_sea);
1255
1256/*
1257 * Return 0 only if one of the SEA error sources successfully reported an error
1258 * record sent from the firmware.
1259 */
1260int ghes_notify_sea(void)
1261{
1262	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1263	int rv;
1264
1265	raw_spin_lock(&ghes_notify_lock_sea);
1266	rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1267	raw_spin_unlock(&ghes_notify_lock_sea);
1268
1269	return rv;
1270}
1271
1272static void ghes_sea_add(struct ghes *ghes)
1273{
1274	mutex_lock(&ghes_list_mutex);
1275	list_add_rcu(&ghes->list, &ghes_sea);
1276	mutex_unlock(&ghes_list_mutex);
1277}
1278
1279static void ghes_sea_remove(struct ghes *ghes)
1280{
1281	mutex_lock(&ghes_list_mutex);
1282	list_del_rcu(&ghes->list);
1283	mutex_unlock(&ghes_list_mutex);
1284	synchronize_rcu();
1285}
1286#else /* CONFIG_ACPI_APEI_SEA */
1287static inline void ghes_sea_add(struct ghes *ghes) { }
1288static inline void ghes_sea_remove(struct ghes *ghes) { }
1289#endif /* CONFIG_ACPI_APEI_SEA */
1290
1291#ifdef CONFIG_HAVE_ACPI_APEI_NMI
1292/*
1293 * NMI may be triggered on any CPU, so ghes_in_nmi is used for
1294 * having only one concurrent reader.
1295 */
1296static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1297
1298static LIST_HEAD(ghes_nmi);
1299
1300static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1301{
1302	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1303	int ret = NMI_DONE;
1304
1305	if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1306		return ret;
1307
1308	raw_spin_lock(&ghes_notify_lock_nmi);
1309	if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1310		ret = NMI_HANDLED;
1311	raw_spin_unlock(&ghes_notify_lock_nmi);
1312
1313	atomic_dec(&ghes_in_nmi);
1314	return ret;
1315}
1316
1317static void ghes_nmi_add(struct ghes *ghes)
1318{
1319	mutex_lock(&ghes_list_mutex);
1320	if (list_empty(&ghes_nmi))
1321		register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1322	list_add_rcu(&ghes->list, &ghes_nmi);
1323	mutex_unlock(&ghes_list_mutex);
1324}
1325
1326static void ghes_nmi_remove(struct ghes *ghes)
1327{
1328	mutex_lock(&ghes_list_mutex);
1329	list_del_rcu(&ghes->list);
1330	if (list_empty(&ghes_nmi))
1331		unregister_nmi_handler(NMI_LOCAL, "ghes");
1332	mutex_unlock(&ghes_list_mutex);
1333	/*
1334	 * To synchronize with NMI handler, ghes can only be
1335	 * freed after NMI handler finishes.
1336	 */
1337	synchronize_rcu();
1338}
1339#else /* CONFIG_HAVE_ACPI_APEI_NMI */
1340static inline void ghes_nmi_add(struct ghes *ghes) { }
1341static inline void ghes_nmi_remove(struct ghes *ghes) { }
1342#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1343
1344static void ghes_nmi_init_cxt(void)
1345{
1346	init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1347}
1348
1349static int __ghes_sdei_callback(struct ghes *ghes,
1350				enum fixed_addresses fixmap_idx)
1351{
1352	if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1353		irq_work_queue(&ghes_proc_irq_work);
1354
1355		return 0;
1356	}
1357
1358	return -ENOENT;
1359}
1360
1361static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1362				      void *arg)
1363{
1364	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1365	struct ghes *ghes = arg;
1366	int err;
1367
1368	raw_spin_lock(&ghes_notify_lock_sdei_normal);
1369	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1370	raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1371
1372	return err;
1373}
1374
1375static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1376				       void *arg)
1377{
1378	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1379	struct ghes *ghes = arg;
1380	int err;
1381
1382	raw_spin_lock(&ghes_notify_lock_sdei_critical);
1383	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1384	raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1385
1386	return err;
1387}
1388
1389static int apei_sdei_register_ghes(struct ghes *ghes)
1390{
1391	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1392		return -EOPNOTSUPP;
1393
1394	return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1395				 ghes_sdei_critical_callback);
1396}
1397
1398static int apei_sdei_unregister_ghes(struct ghes *ghes)
1399{
1400	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1401		return -EOPNOTSUPP;
1402
1403	return sdei_unregister_ghes(ghes);
1404}
1405
1406static int ghes_probe(struct platform_device *ghes_dev)
1407{
1408	struct acpi_hest_generic *generic;
1409	struct ghes *ghes = NULL;
1410	unsigned long flags;
1411
1412	int rc = -EINVAL;
1413
1414	generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1415	if (!generic->enabled)
1416		return -ENODEV;
1417
1418	switch (generic->notify.type) {
1419	case ACPI_HEST_NOTIFY_POLLED:
1420	case ACPI_HEST_NOTIFY_EXTERNAL:
1421	case ACPI_HEST_NOTIFY_SCI:
1422	case ACPI_HEST_NOTIFY_GSIV:
1423	case ACPI_HEST_NOTIFY_GPIO:
1424		break;
1425
1426	case ACPI_HEST_NOTIFY_SEA:
1427		if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1428			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1429				generic->header.source_id);
1430			rc = -ENOTSUPP;
1431			goto err;
1432		}
1433		break;
1434	case ACPI_HEST_NOTIFY_NMI:
1435		if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1436			pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1437				generic->header.source_id);
1438			goto err;
1439		}
1440		break;
1441	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1442		if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1443			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1444				generic->header.source_id);
1445			goto err;
1446		}
1447		break;
1448	case ACPI_HEST_NOTIFY_LOCAL:
1449		pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1450			generic->header.source_id);
1451		goto err;
1452	default:
1453		pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1454			generic->notify.type, generic->header.source_id);
1455		goto err;
1456	}
1457
1458	rc = -EIO;
1459	if (generic->error_block_length <
1460	    sizeof(struct acpi_hest_generic_status)) {
1461		pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1462			generic->error_block_length, generic->header.source_id);
1463		goto err;
1464	}
1465	ghes = ghes_new(generic);
1466	if (IS_ERR(ghes)) {
1467		rc = PTR_ERR(ghes);
1468		ghes = NULL;
1469		goto err;
1470	}
1471
1472	switch (generic->notify.type) {
1473	case ACPI_HEST_NOTIFY_POLLED:
1474		timer_setup(&ghes->timer, ghes_poll_func, 0);
1475		ghes_add_timer(ghes);
1476		break;
1477	case ACPI_HEST_NOTIFY_EXTERNAL:
1478		/* External interrupt vector is GSI */
1479		rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1480		if (rc) {
1481			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1482			       generic->header.source_id);
1483			goto err;
1484		}
1485		rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1486				 "GHES IRQ", ghes);
1487		if (rc) {
1488			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1489			       generic->header.source_id);
1490			goto err;
1491		}
1492		break;
1493
1494	case ACPI_HEST_NOTIFY_SCI:
1495	case ACPI_HEST_NOTIFY_GSIV:
1496	case ACPI_HEST_NOTIFY_GPIO:
1497		mutex_lock(&ghes_list_mutex);
1498		if (list_empty(&ghes_hed))
1499			register_acpi_hed_notifier(&ghes_notifier_hed);
1500		list_add_rcu(&ghes->list, &ghes_hed);
1501		mutex_unlock(&ghes_list_mutex);
1502		break;
1503
1504	case ACPI_HEST_NOTIFY_SEA:
1505		ghes_sea_add(ghes);
1506		break;
1507	case ACPI_HEST_NOTIFY_NMI:
1508		ghes_nmi_add(ghes);
1509		break;
1510	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1511		rc = apei_sdei_register_ghes(ghes);
1512		if (rc)
1513			goto err;
1514		break;
1515	default:
1516		BUG();
1517	}
1518
1519	platform_set_drvdata(ghes_dev, ghes);
1520
1521	ghes->dev = &ghes_dev->dev;
1522
1523	mutex_lock(&ghes_devs_mutex);
1524	list_add_tail(&ghes->elist, &ghes_devs);
1525	mutex_unlock(&ghes_devs_mutex);
1526
1527	/* Handle any pending errors right away */
1528	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1529	ghes_proc(ghes);
1530	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1531
1532	return 0;
1533
1534err:
1535	if (ghes) {
1536		ghes_fini(ghes);
1537		kfree(ghes);
1538	}
1539	return rc;
1540}
1541
1542static void ghes_remove(struct platform_device *ghes_dev)
1543{
1544	int rc;
1545	struct ghes *ghes;
1546	struct acpi_hest_generic *generic;
1547
1548	ghes = platform_get_drvdata(ghes_dev);
1549	generic = ghes->generic;
1550
1551	ghes->flags |= GHES_EXITING;
1552	switch (generic->notify.type) {
1553	case ACPI_HEST_NOTIFY_POLLED:
1554		timer_shutdown_sync(&ghes->timer);
1555		break;
1556	case ACPI_HEST_NOTIFY_EXTERNAL:
1557		free_irq(ghes->irq, ghes);
1558		break;
1559
1560	case ACPI_HEST_NOTIFY_SCI:
1561	case ACPI_HEST_NOTIFY_GSIV:
1562	case ACPI_HEST_NOTIFY_GPIO:
1563		mutex_lock(&ghes_list_mutex);
1564		list_del_rcu(&ghes->list);
1565		if (list_empty(&ghes_hed))
1566			unregister_acpi_hed_notifier(&ghes_notifier_hed);
1567		mutex_unlock(&ghes_list_mutex);
1568		synchronize_rcu();
1569		break;
1570
1571	case ACPI_HEST_NOTIFY_SEA:
1572		ghes_sea_remove(ghes);
1573		break;
1574	case ACPI_HEST_NOTIFY_NMI:
1575		ghes_nmi_remove(ghes);
1576		break;
1577	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1578		rc = apei_sdei_unregister_ghes(ghes);
1579		if (rc) {
1580			/*
1581			 * Returning early results in a resource leak, but we're
1582			 * only here if stopping the hardware failed.
1583			 */
1584			dev_err(&ghes_dev->dev, "Failed to unregister ghes (%pe)\n",
1585				ERR_PTR(rc));
1586			return;
1587		}
1588		break;
1589	default:
1590		BUG();
1591		break;
1592	}
1593
1594	ghes_fini(ghes);
1595
1596	mutex_lock(&ghes_devs_mutex);
1597	list_del(&ghes->elist);
1598	mutex_unlock(&ghes_devs_mutex);
1599
1600	kfree(ghes);
 
 
 
 
1601}
1602
1603static struct platform_driver ghes_platform_driver = {
1604	.driver		= {
1605		.name	= "GHES",
1606	},
1607	.probe		= ghes_probe,
1608	.remove		= ghes_remove,
1609};
1610
1611void __init acpi_ghes_init(void)
1612{
1613	int rc;
1614
1615	sdei_init();
1616
1617	if (acpi_disabled)
1618		return;
1619
1620	switch (hest_disable) {
1621	case HEST_NOT_FOUND:
1622		return;
1623	case HEST_DISABLED:
1624		pr_info(GHES_PFX "HEST is not enabled!\n");
1625		return;
1626	default:
1627		break;
1628	}
1629
1630	if (ghes_disable) {
1631		pr_info(GHES_PFX "GHES is not enabled!\n");
1632		return;
1633	}
1634
1635	ghes_nmi_init_cxt();
1636
1637	rc = platform_driver_register(&ghes_platform_driver);
1638	if (rc)
1639		return;
1640
1641	rc = apei_osc_setup();
1642	if (rc == 0 && osc_sb_apei_support_acked)
1643		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1644	else if (rc == 0 && !osc_sb_apei_support_acked)
1645		pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1646	else if (rc && osc_sb_apei_support_acked)
1647		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1648	else
1649		pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1650}
1651
1652/*
1653 * Known x86 systems that prefer GHES error reporting:
1654 */
1655static struct acpi_platform_list plat_list[] = {
1656	{"HPE   ", "Server  ", 0, ACPI_SIG_FADT, all_versions},
1657	{ } /* End */
1658};
1659
1660struct list_head *ghes_get_devices(void)
1661{
1662	int idx = -1;
1663
1664	if (IS_ENABLED(CONFIG_X86)) {
1665		idx = acpi_match_platform_list(plat_list);
1666		if (idx < 0) {
1667			if (!ghes_edac_force_enable)
1668				return NULL;
1669
1670			pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
1671		}
1672	} else if (list_empty(&ghes_devs)) {
1673		return NULL;
1674	}
1675
1676	return &ghes_devs;
1677}
1678EXPORT_SYMBOL_GPL(ghes_get_devices);
1679
1680void ghes_register_report_chain(struct notifier_block *nb)
1681{
1682	atomic_notifier_chain_register(&ghes_report_chain, nb);
1683}
1684EXPORT_SYMBOL_GPL(ghes_register_report_chain);
1685
1686void ghes_unregister_report_chain(struct notifier_block *nb)
1687{
1688	atomic_notifier_chain_unregister(&ghes_report_chain, nb);
1689}
1690EXPORT_SYMBOL_GPL(ghes_unregister_report_chain);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * APEI Generic Hardware Error Source support
   4 *
   5 * Generic Hardware Error Source provides a way to report platform
   6 * hardware errors (such as that from chipset). It works in so called
   7 * "Firmware First" mode, that is, hardware errors are reported to
   8 * firmware firstly, then reported to Linux by firmware. This way,
   9 * some non-standard hardware error registers or non-standard hardware
  10 * link can be checked by firmware to produce more hardware error
  11 * information for Linux.
  12 *
  13 * For more information about Generic Hardware Error Source, please
  14 * refer to ACPI Specification version 4.0, section 17.3.2.6
  15 *
  16 * Copyright 2010,2011 Intel Corp.
  17 *   Author: Huang Ying <ying.huang@intel.com>
  18 */
  19
  20#include <linux/arm_sdei.h>
  21#include <linux/kernel.h>
  22#include <linux/moduleparam.h>
  23#include <linux/init.h>
  24#include <linux/acpi.h>
  25#include <linux/io.h>
  26#include <linux/interrupt.h>
  27#include <linux/timer.h>
  28#include <linux/cper.h>
 
  29#include <linux/platform_device.h>
  30#include <linux/mutex.h>
  31#include <linux/ratelimit.h>
  32#include <linux/vmalloc.h>
  33#include <linux/irq_work.h>
  34#include <linux/llist.h>
  35#include <linux/genalloc.h>
 
  36#include <linux/pci.h>
  37#include <linux/pfn.h>
  38#include <linux/aer.h>
  39#include <linux/nmi.h>
  40#include <linux/sched/clock.h>
  41#include <linux/uuid.h>
  42#include <linux/ras.h>
  43#include <linux/task_work.h>
  44
  45#include <acpi/actbl1.h>
  46#include <acpi/ghes.h>
  47#include <acpi/apei.h>
  48#include <asm/fixmap.h>
  49#include <asm/tlbflush.h>
 
  50#include <ras/ras_event.h>
  51
  52#include "apei-internal.h"
  53
  54#define GHES_PFX	"GHES: "
  55
  56#define GHES_ESTATUS_MAX_SIZE		65536
  57#define GHES_ESOURCE_PREALLOC_MAX_SIZE	65536
  58
  59#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
  60
  61/* This is just an estimation for memory pool allocation */
  62#define GHES_ESTATUS_CACHE_AVG_SIZE	512
  63
  64#define GHES_ESTATUS_CACHES_SIZE	4
  65
  66#define GHES_ESTATUS_IN_CACHE_MAX_NSEC	10000000000ULL
  67/* Prevent too many caches are allocated because of RCU */
  68#define GHES_ESTATUS_CACHE_ALLOCED_MAX	(GHES_ESTATUS_CACHES_SIZE * 3 / 2)
  69
  70#define GHES_ESTATUS_CACHE_LEN(estatus_len)			\
  71	(sizeof(struct ghes_estatus_cache) + (estatus_len))
  72#define GHES_ESTATUS_FROM_CACHE(estatus_cache)			\
  73	((struct acpi_hest_generic_status *)				\
  74	 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
  75
  76#define GHES_ESTATUS_NODE_LEN(estatus_len)			\
  77	(sizeof(struct ghes_estatus_node) + (estatus_len))
  78#define GHES_ESTATUS_FROM_NODE(estatus_node)			\
  79	((struct acpi_hest_generic_status *)				\
  80	 ((struct ghes_estatus_node *)(estatus_node) + 1))
  81
  82#define GHES_VENDOR_ENTRY_LEN(gdata_len)                               \
  83	(sizeof(struct ghes_vendor_record_entry) + (gdata_len))
  84#define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry)                     \
  85	((struct acpi_hest_generic_data *)                              \
  86	((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
  87
  88/*
  89 *  NMI-like notifications vary by architecture, before the compiler can prune
  90 *  unused static functions it needs a value for these enums.
  91 */
  92#ifndef CONFIG_ARM_SDE_INTERFACE
  93#define FIX_APEI_GHES_SDEI_NORMAL	__end_of_fixed_addresses
  94#define FIX_APEI_GHES_SDEI_CRITICAL	__end_of_fixed_addresses
  95#endif
  96
 
 
  97static inline bool is_hest_type_generic_v2(struct ghes *ghes)
  98{
  99	return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
 100}
 101
 102/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 103 * This driver isn't really modular, however for the time being,
 104 * continuing to use module_param is the easiest way to remain
 105 * compatible with existing boot arg use cases.
 106 */
 107bool ghes_disable;
 108module_param_named(disable, ghes_disable, bool, 0);
 109
 110/*
 
 
 
 
 
 
 
 111 * All error sources notified with HED (Hardware Error Device) share a
 112 * single notifier callback, so they need to be linked and checked one
 113 * by one. This holds true for NMI too.
 114 *
 115 * RCU is used for these lists, so ghes_list_mutex is only used for
 116 * list changing, not for traversing.
 117 */
 118static LIST_HEAD(ghes_hed);
 119static DEFINE_MUTEX(ghes_list_mutex);
 120
 121/*
 
 
 
 
 
 
 
 122 * Because the memory area used to transfer hardware error information
 123 * from BIOS to Linux can be determined only in NMI, IRQ or timer
 124 * handler, but general ioremap can not be used in atomic context, so
 125 * the fixmap is used instead.
 126 *
 127 * This spinlock is used to prevent the fixmap entry from being used
 128 * simultaneously.
 129 */
 130static DEFINE_SPINLOCK(ghes_notify_lock_irq);
 131
 132struct ghes_vendor_record_entry {
 133	struct work_struct work;
 134	int error_severity;
 135	char vendor_record[];
 136};
 137
 138static struct gen_pool *ghes_estatus_pool;
 139static unsigned long ghes_estatus_pool_size_request;
 140
 141static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
 142static atomic_t ghes_estatus_cache_alloced;
 143
 144static int ghes_panic_timeout __read_mostly = 30;
 145
 146static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
 147{
 148	phys_addr_t paddr;
 149	pgprot_t prot;
 150
 151	paddr = PFN_PHYS(pfn);
 152	prot = arch_apei_get_mem_attribute(paddr);
 153	__set_fixmap(fixmap_idx, paddr, prot);
 154
 155	return (void __iomem *) __fix_to_virt(fixmap_idx);
 156}
 157
 158static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
 159{
 160	int _idx = virt_to_fix((unsigned long)vaddr);
 161
 162	WARN_ON_ONCE(fixmap_idx != _idx);
 163	clear_fixmap(fixmap_idx);
 164}
 165
 166int ghes_estatus_pool_init(int num_ghes)
 167{
 168	unsigned long addr, len;
 169	int rc;
 170
 171	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
 172	if (!ghes_estatus_pool)
 173		return -ENOMEM;
 174
 175	len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
 176	len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
 177
 178	ghes_estatus_pool_size_request = PAGE_ALIGN(len);
 179	addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
 180	if (!addr)
 181		goto err_pool_alloc;
 182
 183	rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
 184	if (rc)
 185		goto err_pool_add;
 186
 187	return 0;
 188
 189err_pool_add:
 190	vfree((void *)addr);
 191
 192err_pool_alloc:
 193	gen_pool_destroy(ghes_estatus_pool);
 194
 195	return -ENOMEM;
 196}
 197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 198static int map_gen_v2(struct ghes *ghes)
 199{
 200	return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
 201}
 202
 203static void unmap_gen_v2(struct ghes *ghes)
 204{
 205	apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
 206}
 207
 208static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
 209{
 210	int rc;
 211	u64 val = 0;
 212
 213	rc = apei_read(&val, &gv2->read_ack_register);
 214	if (rc)
 215		return;
 216
 217	val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
 218	val |= gv2->read_ack_write    << gv2->read_ack_register.bit_offset;
 219
 220	apei_write(val, &gv2->read_ack_register);
 221}
 222
 223static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 224{
 225	struct ghes *ghes;
 226	unsigned int error_block_length;
 227	int rc;
 228
 229	ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
 230	if (!ghes)
 231		return ERR_PTR(-ENOMEM);
 232
 233	ghes->generic = generic;
 234	if (is_hest_type_generic_v2(ghes)) {
 235		rc = map_gen_v2(ghes);
 236		if (rc)
 237			goto err_free;
 238	}
 239
 240	rc = apei_map_generic_address(&generic->error_status_address);
 241	if (rc)
 242		goto err_unmap_read_ack_addr;
 243	error_block_length = generic->error_block_length;
 244	if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
 245		pr_warn(FW_WARN GHES_PFX
 246			"Error status block length is too long: %u for "
 247			"generic hardware error source: %d.\n",
 248			error_block_length, generic->header.source_id);
 249		error_block_length = GHES_ESTATUS_MAX_SIZE;
 250	}
 251	ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
 252	if (!ghes->estatus) {
 253		rc = -ENOMEM;
 254		goto err_unmap_status_addr;
 255	}
 256
 257	return ghes;
 258
 259err_unmap_status_addr:
 260	apei_unmap_generic_address(&generic->error_status_address);
 261err_unmap_read_ack_addr:
 262	if (is_hest_type_generic_v2(ghes))
 263		unmap_gen_v2(ghes);
 264err_free:
 265	kfree(ghes);
 266	return ERR_PTR(rc);
 267}
 268
 269static void ghes_fini(struct ghes *ghes)
 270{
 271	kfree(ghes->estatus);
 272	apei_unmap_generic_address(&ghes->generic->error_status_address);
 273	if (is_hest_type_generic_v2(ghes))
 274		unmap_gen_v2(ghes);
 275}
 276
 277static inline int ghes_severity(int severity)
 278{
 279	switch (severity) {
 280	case CPER_SEV_INFORMATIONAL:
 281		return GHES_SEV_NO;
 282	case CPER_SEV_CORRECTED:
 283		return GHES_SEV_CORRECTED;
 284	case CPER_SEV_RECOVERABLE:
 285		return GHES_SEV_RECOVERABLE;
 286	case CPER_SEV_FATAL:
 287		return GHES_SEV_PANIC;
 288	default:
 289		/* Unknown, go panic */
 290		return GHES_SEV_PANIC;
 291	}
 292}
 293
 294static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
 295				  int from_phys,
 296				  enum fixed_addresses fixmap_idx)
 297{
 298	void __iomem *vaddr;
 299	u64 offset;
 300	u32 trunk;
 301
 302	while (len > 0) {
 303		offset = paddr - (paddr & PAGE_MASK);
 304		vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
 305		trunk = PAGE_SIZE - offset;
 306		trunk = min(trunk, len);
 307		if (from_phys)
 308			memcpy_fromio(buffer, vaddr + offset, trunk);
 309		else
 310			memcpy_toio(vaddr + offset, buffer, trunk);
 311		len -= trunk;
 312		paddr += trunk;
 313		buffer += trunk;
 314		ghes_unmap(vaddr, fixmap_idx);
 315	}
 316}
 317
 318/* Check the top-level record header has an appropriate size. */
 319static int __ghes_check_estatus(struct ghes *ghes,
 320				struct acpi_hest_generic_status *estatus)
 321{
 322	u32 len = cper_estatus_len(estatus);
 323
 324	if (len < sizeof(*estatus)) {
 325		pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
 326		return -EIO;
 327	}
 328
 329	if (len > ghes->generic->error_block_length) {
 330		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
 331		return -EIO;
 332	}
 333
 334	if (cper_estatus_check_header(estatus)) {
 335		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
 336		return -EIO;
 337	}
 338
 339	return 0;
 340}
 341
 342/* Read the CPER block, returning its address, and header in estatus. */
 343static int __ghes_peek_estatus(struct ghes *ghes,
 344			       struct acpi_hest_generic_status *estatus,
 345			       u64 *buf_paddr, enum fixed_addresses fixmap_idx)
 346{
 347	struct acpi_hest_generic *g = ghes->generic;
 348	int rc;
 349
 350	rc = apei_read(buf_paddr, &g->error_status_address);
 351	if (rc) {
 352		*buf_paddr = 0;
 353		pr_warn_ratelimited(FW_WARN GHES_PFX
 354"Failed to read error status block address for hardware error source: %d.\n",
 355				   g->header.source_id);
 356		return -EIO;
 357	}
 358	if (!*buf_paddr)
 359		return -ENOENT;
 360
 361	ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
 362			      fixmap_idx);
 363	if (!estatus->block_status) {
 364		*buf_paddr = 0;
 365		return -ENOENT;
 366	}
 367
 368	return 0;
 369}
 370
 371static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
 372			       u64 buf_paddr, enum fixed_addresses fixmap_idx,
 373			       size_t buf_len)
 374{
 375	ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
 376	if (cper_estatus_check(estatus)) {
 377		pr_warn_ratelimited(FW_WARN GHES_PFX
 378				    "Failed to read error status block!\n");
 379		return -EIO;
 380	}
 381
 382	return 0;
 383}
 384
 385static int ghes_read_estatus(struct ghes *ghes,
 386			     struct acpi_hest_generic_status *estatus,
 387			     u64 *buf_paddr, enum fixed_addresses fixmap_idx)
 388{
 389	int rc;
 390
 391	rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
 392	if (rc)
 393		return rc;
 394
 395	rc = __ghes_check_estatus(ghes, estatus);
 396	if (rc)
 397		return rc;
 398
 399	return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
 400				   cper_estatus_len(estatus));
 401}
 402
 403static void ghes_clear_estatus(struct ghes *ghes,
 404			       struct acpi_hest_generic_status *estatus,
 405			       u64 buf_paddr, enum fixed_addresses fixmap_idx)
 406{
 407	estatus->block_status = 0;
 408
 409	if (!buf_paddr)
 410		return;
 411
 412	ghes_copy_tofrom_phys(estatus, buf_paddr,
 413			      sizeof(estatus->block_status), 0,
 414			      fixmap_idx);
 415
 416	/*
 417	 * GHESv2 type HEST entries introduce support for error acknowledgment,
 418	 * so only acknowledge the error if this support is present.
 419	 */
 420	if (is_hest_type_generic_v2(ghes))
 421		ghes_ack_error(ghes->generic_v2);
 422}
 423
 424/*
 425 * Called as task_work before returning to user-space.
 426 * Ensure any queued work has been done before we return to the context that
 427 * triggered the notification.
 428 */
 429static void ghes_kick_task_work(struct callback_head *head)
 430{
 431	struct acpi_hest_generic_status *estatus;
 432	struct ghes_estatus_node *estatus_node;
 433	u32 node_len;
 434
 435	estatus_node = container_of(head, struct ghes_estatus_node, task_work);
 436	if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
 437		memory_failure_queue_kick(estatus_node->task_work_cpu);
 438
 439	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 440	node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
 441	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
 442}
 443
 444static bool ghes_do_memory_failure(u64 physical_addr, int flags)
 445{
 446	unsigned long pfn;
 447
 448	if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
 449		return false;
 450
 451	pfn = PHYS_PFN(physical_addr);
 452	if (!pfn_valid(pfn)) {
 453		pr_warn_ratelimited(FW_WARN GHES_PFX
 454		"Invalid address in generic error data: %#llx\n",
 455		physical_addr);
 456		return false;
 457	}
 458
 459	memory_failure_queue(pfn, flags);
 460	return true;
 461}
 462
 463static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
 464				       int sev)
 465{
 466	int flags = -1;
 467	int sec_sev = ghes_severity(gdata->error_severity);
 468	struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
 469
 470	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
 471		return false;
 472
 473	/* iff following two events can be handled properly by now */
 474	if (sec_sev == GHES_SEV_CORRECTED &&
 475	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
 476		flags = MF_SOFT_OFFLINE;
 477	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
 478		flags = 0;
 479
 480	if (flags != -1)
 481		return ghes_do_memory_failure(mem_err->physical_addr, flags);
 482
 483	return false;
 484}
 485
 486static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
 
 487{
 488	struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
 
 489	bool queued = false;
 490	int sec_sev, i;
 491	char *p;
 492
 493	log_arm_hw_error(err);
 494
 495	sec_sev = ghes_severity(gdata->error_severity);
 496	if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
 497		return false;
 498
 499	p = (char *)(err + 1);
 500	for (i = 0; i < err->err_info_num; i++) {
 501		struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
 502		bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
 503		bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
 504		const char *error_type = "unknown error";
 505
 506		/*
 507		 * The field (err_info->error_info & BIT(26)) is fixed to set to
 508		 * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
 509		 * firmware won't mix corrected errors in an uncorrected section,
 510		 * and don't filter out 'corrected' error here.
 511		 */
 512		if (is_cache && has_pa) {
 513			queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
 514			p += err_info->length;
 515			continue;
 516		}
 517
 518		if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
 519			error_type = cper_proc_error_type_strs[err_info->type];
 520
 521		pr_warn_ratelimited(FW_WARN GHES_PFX
 522				    "Unhandled processor error type: %s\n",
 523				    error_type);
 524		p += err_info->length;
 525	}
 526
 527	return queued;
 528}
 529
 530/*
 531 * PCIe AER errors need to be sent to the AER driver for reporting and
 532 * recovery. The GHES severities map to the following AER severities and
 533 * require the following handling:
 534 *
 535 * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
 536 *     These need to be reported by the AER driver but no recovery is
 537 *     necessary.
 538 * GHES_SEV_RECOVERABLE -> AER_NONFATAL
 539 * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
 540 *     These both need to be reported and recovered from by the AER driver.
 541 * GHES_SEV_PANIC does not make it to this handling since the kernel must
 542 *     panic.
 543 */
 544static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
 545{
 546#ifdef CONFIG_ACPI_APEI_PCIEAER
 547	struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
 548
 549	if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
 550	    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
 551		unsigned int devfn;
 552		int aer_severity;
 
 553
 554		devfn = PCI_DEVFN(pcie_err->device_id.device,
 555				  pcie_err->device_id.function);
 556		aer_severity = cper_severity_to_aer(gdata->error_severity);
 557
 558		/*
 559		 * If firmware reset the component to contain
 560		 * the error, we must reinitialize it before
 561		 * use, so treat it as a fatal AER error.
 562		 */
 563		if (gdata->flags & CPER_SEC_RESET)
 564			aer_severity = AER_FATAL;
 565
 
 
 
 
 
 
 566		aer_recover_queue(pcie_err->device_id.segment,
 567				  pcie_err->device_id.bus,
 568				  devfn, aer_severity,
 569				  (struct aer_capability_regs *)
 570				  pcie_err->aer_info);
 571	}
 572#endif
 573}
 574
 575static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
 576
 577int ghes_register_vendor_record_notifier(struct notifier_block *nb)
 578{
 579	return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
 580}
 581EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
 582
 583void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
 584{
 585	blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
 586}
 587EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
 588
 589static void ghes_vendor_record_work_func(struct work_struct *work)
 590{
 591	struct ghes_vendor_record_entry *entry;
 592	struct acpi_hest_generic_data *gdata;
 593	u32 len;
 594
 595	entry = container_of(work, struct ghes_vendor_record_entry, work);
 596	gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
 597
 598	blocking_notifier_call_chain(&vendor_record_notify_list,
 599				     entry->error_severity, gdata);
 600
 601	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
 602	gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
 603}
 604
 605static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
 606					  int sev)
 607{
 608	struct acpi_hest_generic_data *copied_gdata;
 609	struct ghes_vendor_record_entry *entry;
 610	u32 len;
 611
 612	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
 613	entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
 614	if (!entry)
 615		return;
 616
 617	copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
 618	memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
 619	entry->error_severity = sev;
 620
 621	INIT_WORK(&entry->work, ghes_vendor_record_work_func);
 622	schedule_work(&entry->work);
 623}
 624
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625static bool ghes_do_proc(struct ghes *ghes,
 626			 const struct acpi_hest_generic_status *estatus)
 627{
 628	int sev, sec_sev;
 629	struct acpi_hest_generic_data *gdata;
 630	guid_t *sec_type;
 631	const guid_t *fru_id = &guid_null;
 632	char *fru_text = "";
 633	bool queued = false;
 
 634
 635	sev = ghes_severity(estatus->error_severity);
 636	apei_estatus_for_each_section(estatus, gdata) {
 637		sec_type = (guid_t *)gdata->section_type;
 638		sec_sev = ghes_severity(gdata->error_severity);
 639		if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
 640			fru_id = (guid_t *)gdata->fru_id;
 641
 642		if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
 643			fru_text = gdata->fru_text;
 644
 645		if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
 646			struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
 647
 648			ghes_edac_report_mem_error(sev, mem_err);
 649
 650			arch_apei_report_mem_error(sev, mem_err);
 651			queued = ghes_handle_memory_failure(gdata, sev);
 652		}
 653		else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
 654			ghes_handle_aer(gdata);
 655		}
 656		else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
 657			queued = ghes_handle_arm_hw_error(gdata, sev);
 
 
 
 
 
 
 
 
 
 
 
 
 658		} else {
 659			void *err = acpi_hest_get_payload(gdata);
 660
 661			ghes_defer_non_standard_event(gdata, sev);
 662			log_non_standard_event(sec_type, fru_id, fru_text,
 663					       sec_sev, err,
 664					       gdata->error_data_length);
 665		}
 666	}
 667
 668	return queued;
 669}
 670
 671static void __ghes_print_estatus(const char *pfx,
 672				 const struct acpi_hest_generic *generic,
 673				 const struct acpi_hest_generic_status *estatus)
 674{
 675	static atomic_t seqno;
 676	unsigned int curr_seqno;
 677	char pfx_seq[64];
 678
 679	if (pfx == NULL) {
 680		if (ghes_severity(estatus->error_severity) <=
 681		    GHES_SEV_CORRECTED)
 682			pfx = KERN_WARNING;
 683		else
 684			pfx = KERN_ERR;
 685	}
 686	curr_seqno = atomic_inc_return(&seqno);
 687	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
 688	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
 689	       pfx_seq, generic->header.source_id);
 690	cper_estatus_print(pfx_seq, estatus);
 691}
 692
 693static int ghes_print_estatus(const char *pfx,
 694			      const struct acpi_hest_generic *generic,
 695			      const struct acpi_hest_generic_status *estatus)
 696{
 697	/* Not more than 2 messages every 5 seconds */
 698	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
 699	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
 700	struct ratelimit_state *ratelimit;
 701
 702	if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
 703		ratelimit = &ratelimit_corrected;
 704	else
 705		ratelimit = &ratelimit_uncorrected;
 706	if (__ratelimit(ratelimit)) {
 707		__ghes_print_estatus(pfx, generic, estatus);
 708		return 1;
 709	}
 710	return 0;
 711}
 712
 713/*
 714 * GHES error status reporting throttle, to report more kinds of
 715 * errors, instead of just most frequently occurred errors.
 716 */
 717static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
 718{
 719	u32 len;
 720	int i, cached = 0;
 721	unsigned long long now;
 722	struct ghes_estatus_cache *cache;
 723	struct acpi_hest_generic_status *cache_estatus;
 724
 725	len = cper_estatus_len(estatus);
 726	rcu_read_lock();
 727	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
 728		cache = rcu_dereference(ghes_estatus_caches[i]);
 729		if (cache == NULL)
 730			continue;
 731		if (len != cache->estatus_len)
 732			continue;
 733		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
 734		if (memcmp(estatus, cache_estatus, len))
 735			continue;
 736		atomic_inc(&cache->count);
 737		now = sched_clock();
 738		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
 739			cached = 1;
 740		break;
 741	}
 742	rcu_read_unlock();
 743	return cached;
 744}
 745
 746static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
 747	struct acpi_hest_generic *generic,
 748	struct acpi_hest_generic_status *estatus)
 749{
 750	int alloced;
 751	u32 len, cache_len;
 752	struct ghes_estatus_cache *cache;
 753	struct acpi_hest_generic_status *cache_estatus;
 754
 755	alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
 756	if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
 757		atomic_dec(&ghes_estatus_cache_alloced);
 758		return NULL;
 759	}
 760	len = cper_estatus_len(estatus);
 761	cache_len = GHES_ESTATUS_CACHE_LEN(len);
 762	cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
 763	if (!cache) {
 764		atomic_dec(&ghes_estatus_cache_alloced);
 765		return NULL;
 766	}
 767	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
 768	memcpy(cache_estatus, estatus, len);
 769	cache->estatus_len = len;
 770	atomic_set(&cache->count, 0);
 771	cache->generic = generic;
 772	cache->time_in = sched_clock();
 773	return cache;
 774}
 775
 776static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
 777{
 
 778	u32 len;
 779
 
 780	len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
 781	len = GHES_ESTATUS_CACHE_LEN(len);
 782	gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
 783	atomic_dec(&ghes_estatus_cache_alloced);
 784}
 785
 786static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
 787{
 788	struct ghes_estatus_cache *cache;
 789
 790	cache = container_of(head, struct ghes_estatus_cache, rcu);
 791	ghes_estatus_cache_free(cache);
 792}
 793
 794static void ghes_estatus_cache_add(
 795	struct acpi_hest_generic *generic,
 796	struct acpi_hest_generic_status *estatus)
 797{
 
 
 
 798	int i, slot = -1, count;
 799	unsigned long long now, duration, period, max_period = 0;
 800	struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
 801
 802	new_cache = ghes_estatus_cache_alloc(generic, estatus);
 803	if (new_cache == NULL)
 804		return;
 
 805	rcu_read_lock();
 806	now = sched_clock();
 807	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
 808		cache = rcu_dereference(ghes_estatus_caches[i]);
 809		if (cache == NULL) {
 810			slot = i;
 811			slot_cache = NULL;
 812			break;
 813		}
 814		duration = now - cache->time_in;
 815		if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
 816			slot = i;
 817			slot_cache = cache;
 818			break;
 819		}
 820		count = atomic_read(&cache->count);
 821		period = duration;
 822		do_div(period, (count + 1));
 823		if (period > max_period) {
 824			max_period = period;
 825			slot = i;
 826			slot_cache = cache;
 827		}
 828	}
 829	/* new_cache must be put into array after its contents are written */
 830	smp_wmb();
 831	if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
 832				  slot_cache, new_cache) == slot_cache) {
 833		if (slot_cache)
 834			call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
 835	} else
 836		ghes_estatus_cache_free(new_cache);
 837	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 838}
 839
 840static void __ghes_panic(struct ghes *ghes,
 841			 struct acpi_hest_generic_status *estatus,
 842			 u64 buf_paddr, enum fixed_addresses fixmap_idx)
 843{
 
 
 844	__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
 845
 846	ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
 847
 848	/* reboot to log the error! */
 849	if (!panic_timeout)
 850		panic_timeout = ghes_panic_timeout;
 851	panic("Fatal hardware error!");
 
 852}
 853
 854static int ghes_proc(struct ghes *ghes)
 855{
 856	struct acpi_hest_generic_status *estatus = ghes->estatus;
 857	u64 buf_paddr;
 858	int rc;
 859
 860	rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
 861	if (rc)
 862		goto out;
 863
 864	if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
 865		__ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
 866
 867	if (!ghes_estatus_cached(estatus)) {
 868		if (ghes_print_estatus(NULL, ghes->generic, estatus))
 869			ghes_estatus_cache_add(ghes->generic, estatus);
 870	}
 871	ghes_do_proc(ghes, estatus);
 872
 873out:
 874	ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
 875
 876	return rc;
 877}
 878
 879static void ghes_add_timer(struct ghes *ghes)
 880{
 881	struct acpi_hest_generic *g = ghes->generic;
 882	unsigned long expire;
 883
 884	if (!g->notify.poll_interval) {
 885		pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
 886			g->header.source_id);
 887		return;
 888	}
 889	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
 890	ghes->timer.expires = round_jiffies_relative(expire);
 891	add_timer(&ghes->timer);
 892}
 893
 894static void ghes_poll_func(struct timer_list *t)
 895{
 896	struct ghes *ghes = from_timer(ghes, t, timer);
 897	unsigned long flags;
 898
 899	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
 900	ghes_proc(ghes);
 901	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
 902	if (!(ghes->flags & GHES_EXITING))
 903		ghes_add_timer(ghes);
 904}
 905
 906static irqreturn_t ghes_irq_func(int irq, void *data)
 907{
 908	struct ghes *ghes = data;
 909	unsigned long flags;
 910	int rc;
 911
 912	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
 913	rc = ghes_proc(ghes);
 914	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
 915	if (rc)
 916		return IRQ_NONE;
 917
 918	return IRQ_HANDLED;
 919}
 920
 921static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
 922			   void *data)
 923{
 924	struct ghes *ghes;
 925	unsigned long flags;
 926	int ret = NOTIFY_DONE;
 927
 928	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
 929	rcu_read_lock();
 930	list_for_each_entry_rcu(ghes, &ghes_hed, list) {
 931		if (!ghes_proc(ghes))
 932			ret = NOTIFY_OK;
 933	}
 934	rcu_read_unlock();
 935	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
 936
 937	return ret;
 938}
 939
 940static struct notifier_block ghes_notifier_hed = {
 941	.notifier_call = ghes_notify_hed,
 942};
 943
 944/*
 945 * Handlers for CPER records may not be NMI safe. For example,
 946 * memory_failure_queue() takes spinlocks and calls schedule_work_on().
 947 * In any NMI-like handler, memory from ghes_estatus_pool is used to save
 948 * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
 949 * ghes_proc_in_irq() to run in IRQ context where each estatus in
 950 * ghes_estatus_llist is processed.
 951 *
 952 * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
 953 * to suppress frequent messages.
 954 */
 955static struct llist_head ghes_estatus_llist;
 956static struct irq_work ghes_proc_irq_work;
 957
 958static void ghes_proc_in_irq(struct irq_work *irq_work)
 959{
 960	struct llist_node *llnode, *next;
 961	struct ghes_estatus_node *estatus_node;
 962	struct acpi_hest_generic *generic;
 963	struct acpi_hest_generic_status *estatus;
 964	bool task_work_pending;
 965	u32 len, node_len;
 966	int ret;
 967
 968	llnode = llist_del_all(&ghes_estatus_llist);
 969	/*
 970	 * Because the time order of estatus in list is reversed,
 971	 * revert it back to proper order.
 972	 */
 973	llnode = llist_reverse_order(llnode);
 974	while (llnode) {
 975		next = llnode->next;
 976		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
 977					   llnode);
 978		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 979		len = cper_estatus_len(estatus);
 980		node_len = GHES_ESTATUS_NODE_LEN(len);
 981		task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
 982		if (!ghes_estatus_cached(estatus)) {
 983			generic = estatus_node->generic;
 984			if (ghes_print_estatus(NULL, generic, estatus))
 985				ghes_estatus_cache_add(generic, estatus);
 986		}
 987
 988		if (task_work_pending && current->mm != &init_mm) {
 989			estatus_node->task_work.func = ghes_kick_task_work;
 990			estatus_node->task_work_cpu = smp_processor_id();
 991			ret = task_work_add(current, &estatus_node->task_work,
 992					    TWA_RESUME);
 993			if (ret)
 994				estatus_node->task_work.func = NULL;
 995		}
 996
 997		if (!estatus_node->task_work.func)
 998			gen_pool_free(ghes_estatus_pool,
 999				      (unsigned long)estatus_node, node_len);
1000
1001		llnode = next;
1002	}
1003}
1004
1005static void ghes_print_queued_estatus(void)
1006{
1007	struct llist_node *llnode;
1008	struct ghes_estatus_node *estatus_node;
1009	struct acpi_hest_generic *generic;
1010	struct acpi_hest_generic_status *estatus;
1011
1012	llnode = llist_del_all(&ghes_estatus_llist);
1013	/*
1014	 * Because the time order of estatus in list is reversed,
1015	 * revert it back to proper order.
1016	 */
1017	llnode = llist_reverse_order(llnode);
1018	while (llnode) {
1019		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1020					   llnode);
1021		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1022		generic = estatus_node->generic;
1023		ghes_print_estatus(NULL, generic, estatus);
1024		llnode = llnode->next;
1025	}
1026}
1027
1028static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
1029				       enum fixed_addresses fixmap_idx)
1030{
1031	struct acpi_hest_generic_status *estatus, tmp_header;
1032	struct ghes_estatus_node *estatus_node;
1033	u32 len, node_len;
1034	u64 buf_paddr;
1035	int sev, rc;
1036
1037	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
1038		return -EOPNOTSUPP;
1039
1040	rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
1041	if (rc) {
1042		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1043		return rc;
1044	}
1045
1046	rc = __ghes_check_estatus(ghes, &tmp_header);
1047	if (rc) {
1048		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1049		return rc;
1050	}
1051
1052	len = cper_estatus_len(&tmp_header);
1053	node_len = GHES_ESTATUS_NODE_LEN(len);
1054	estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1055	if (!estatus_node)
1056		return -ENOMEM;
1057
1058	estatus_node->ghes = ghes;
1059	estatus_node->generic = ghes->generic;
1060	estatus_node->task_work.func = NULL;
1061	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1062
1063	if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1064		ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1065		rc = -ENOENT;
1066		goto no_work;
1067	}
1068
1069	sev = ghes_severity(estatus->error_severity);
1070	if (sev >= GHES_SEV_PANIC) {
1071		ghes_print_queued_estatus();
1072		__ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1073	}
1074
1075	ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1076
1077	/* This error has been reported before, don't process it again. */
1078	if (ghes_estatus_cached(estatus))
1079		goto no_work;
1080
1081	llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1082
1083	return rc;
1084
1085no_work:
1086	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1087		      node_len);
1088
1089	return rc;
1090}
1091
1092static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1093				       enum fixed_addresses fixmap_idx)
1094{
1095	int ret = -ENOENT;
1096	struct ghes *ghes;
1097
1098	rcu_read_lock();
1099	list_for_each_entry_rcu(ghes, rcu_list, list) {
1100		if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1101			ret = 0;
1102	}
1103	rcu_read_unlock();
1104
1105	if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1106		irq_work_queue(&ghes_proc_irq_work);
1107
1108	return ret;
1109}
1110
1111#ifdef CONFIG_ACPI_APEI_SEA
1112static LIST_HEAD(ghes_sea);
1113
1114/*
1115 * Return 0 only if one of the SEA error sources successfully reported an error
1116 * record sent from the firmware.
1117 */
1118int ghes_notify_sea(void)
1119{
1120	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1121	int rv;
1122
1123	raw_spin_lock(&ghes_notify_lock_sea);
1124	rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1125	raw_spin_unlock(&ghes_notify_lock_sea);
1126
1127	return rv;
1128}
1129
1130static void ghes_sea_add(struct ghes *ghes)
1131{
1132	mutex_lock(&ghes_list_mutex);
1133	list_add_rcu(&ghes->list, &ghes_sea);
1134	mutex_unlock(&ghes_list_mutex);
1135}
1136
1137static void ghes_sea_remove(struct ghes *ghes)
1138{
1139	mutex_lock(&ghes_list_mutex);
1140	list_del_rcu(&ghes->list);
1141	mutex_unlock(&ghes_list_mutex);
1142	synchronize_rcu();
1143}
1144#else /* CONFIG_ACPI_APEI_SEA */
1145static inline void ghes_sea_add(struct ghes *ghes) { }
1146static inline void ghes_sea_remove(struct ghes *ghes) { }
1147#endif /* CONFIG_ACPI_APEI_SEA */
1148
1149#ifdef CONFIG_HAVE_ACPI_APEI_NMI
1150/*
1151 * NMI may be triggered on any CPU, so ghes_in_nmi is used for
1152 * having only one concurrent reader.
1153 */
1154static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1155
1156static LIST_HEAD(ghes_nmi);
1157
1158static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1159{
1160	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1161	int ret = NMI_DONE;
1162
1163	if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1164		return ret;
1165
1166	raw_spin_lock(&ghes_notify_lock_nmi);
1167	if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1168		ret = NMI_HANDLED;
1169	raw_spin_unlock(&ghes_notify_lock_nmi);
1170
1171	atomic_dec(&ghes_in_nmi);
1172	return ret;
1173}
1174
1175static void ghes_nmi_add(struct ghes *ghes)
1176{
1177	mutex_lock(&ghes_list_mutex);
1178	if (list_empty(&ghes_nmi))
1179		register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1180	list_add_rcu(&ghes->list, &ghes_nmi);
1181	mutex_unlock(&ghes_list_mutex);
1182}
1183
1184static void ghes_nmi_remove(struct ghes *ghes)
1185{
1186	mutex_lock(&ghes_list_mutex);
1187	list_del_rcu(&ghes->list);
1188	if (list_empty(&ghes_nmi))
1189		unregister_nmi_handler(NMI_LOCAL, "ghes");
1190	mutex_unlock(&ghes_list_mutex);
1191	/*
1192	 * To synchronize with NMI handler, ghes can only be
1193	 * freed after NMI handler finishes.
1194	 */
1195	synchronize_rcu();
1196}
1197#else /* CONFIG_HAVE_ACPI_APEI_NMI */
1198static inline void ghes_nmi_add(struct ghes *ghes) { }
1199static inline void ghes_nmi_remove(struct ghes *ghes) { }
1200#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1201
1202static void ghes_nmi_init_cxt(void)
1203{
1204	init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1205}
1206
1207static int __ghes_sdei_callback(struct ghes *ghes,
1208				enum fixed_addresses fixmap_idx)
1209{
1210	if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1211		irq_work_queue(&ghes_proc_irq_work);
1212
1213		return 0;
1214	}
1215
1216	return -ENOENT;
1217}
1218
1219static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1220				      void *arg)
1221{
1222	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1223	struct ghes *ghes = arg;
1224	int err;
1225
1226	raw_spin_lock(&ghes_notify_lock_sdei_normal);
1227	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1228	raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1229
1230	return err;
1231}
1232
1233static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1234				       void *arg)
1235{
1236	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1237	struct ghes *ghes = arg;
1238	int err;
1239
1240	raw_spin_lock(&ghes_notify_lock_sdei_critical);
1241	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1242	raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1243
1244	return err;
1245}
1246
1247static int apei_sdei_register_ghes(struct ghes *ghes)
1248{
1249	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1250		return -EOPNOTSUPP;
1251
1252	return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1253				 ghes_sdei_critical_callback);
1254}
1255
1256static int apei_sdei_unregister_ghes(struct ghes *ghes)
1257{
1258	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1259		return -EOPNOTSUPP;
1260
1261	return sdei_unregister_ghes(ghes);
1262}
1263
1264static int ghes_probe(struct platform_device *ghes_dev)
1265{
1266	struct acpi_hest_generic *generic;
1267	struct ghes *ghes = NULL;
1268	unsigned long flags;
1269
1270	int rc = -EINVAL;
1271
1272	generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1273	if (!generic->enabled)
1274		return -ENODEV;
1275
1276	switch (generic->notify.type) {
1277	case ACPI_HEST_NOTIFY_POLLED:
1278	case ACPI_HEST_NOTIFY_EXTERNAL:
1279	case ACPI_HEST_NOTIFY_SCI:
1280	case ACPI_HEST_NOTIFY_GSIV:
1281	case ACPI_HEST_NOTIFY_GPIO:
1282		break;
1283
1284	case ACPI_HEST_NOTIFY_SEA:
1285		if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1286			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1287				generic->header.source_id);
1288			rc = -ENOTSUPP;
1289			goto err;
1290		}
1291		break;
1292	case ACPI_HEST_NOTIFY_NMI:
1293		if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1294			pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1295				generic->header.source_id);
1296			goto err;
1297		}
1298		break;
1299	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1300		if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1301			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1302				generic->header.source_id);
1303			goto err;
1304		}
1305		break;
1306	case ACPI_HEST_NOTIFY_LOCAL:
1307		pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1308			generic->header.source_id);
1309		goto err;
1310	default:
1311		pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1312			generic->notify.type, generic->header.source_id);
1313		goto err;
1314	}
1315
1316	rc = -EIO;
1317	if (generic->error_block_length <
1318	    sizeof(struct acpi_hest_generic_status)) {
1319		pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1320			generic->error_block_length, generic->header.source_id);
1321		goto err;
1322	}
1323	ghes = ghes_new(generic);
1324	if (IS_ERR(ghes)) {
1325		rc = PTR_ERR(ghes);
1326		ghes = NULL;
1327		goto err;
1328	}
1329
1330	switch (generic->notify.type) {
1331	case ACPI_HEST_NOTIFY_POLLED:
1332		timer_setup(&ghes->timer, ghes_poll_func, 0);
1333		ghes_add_timer(ghes);
1334		break;
1335	case ACPI_HEST_NOTIFY_EXTERNAL:
1336		/* External interrupt vector is GSI */
1337		rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1338		if (rc) {
1339			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1340			       generic->header.source_id);
1341			goto err;
1342		}
1343		rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1344				 "GHES IRQ", ghes);
1345		if (rc) {
1346			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1347			       generic->header.source_id);
1348			goto err;
1349		}
1350		break;
1351
1352	case ACPI_HEST_NOTIFY_SCI:
1353	case ACPI_HEST_NOTIFY_GSIV:
1354	case ACPI_HEST_NOTIFY_GPIO:
1355		mutex_lock(&ghes_list_mutex);
1356		if (list_empty(&ghes_hed))
1357			register_acpi_hed_notifier(&ghes_notifier_hed);
1358		list_add_rcu(&ghes->list, &ghes_hed);
1359		mutex_unlock(&ghes_list_mutex);
1360		break;
1361
1362	case ACPI_HEST_NOTIFY_SEA:
1363		ghes_sea_add(ghes);
1364		break;
1365	case ACPI_HEST_NOTIFY_NMI:
1366		ghes_nmi_add(ghes);
1367		break;
1368	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1369		rc = apei_sdei_register_ghes(ghes);
1370		if (rc)
1371			goto err;
1372		break;
1373	default:
1374		BUG();
1375	}
1376
1377	platform_set_drvdata(ghes_dev, ghes);
1378
1379	ghes_edac_register(ghes, &ghes_dev->dev);
 
 
 
 
1380
1381	/* Handle any pending errors right away */
1382	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1383	ghes_proc(ghes);
1384	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1385
1386	return 0;
1387
1388err:
1389	if (ghes) {
1390		ghes_fini(ghes);
1391		kfree(ghes);
1392	}
1393	return rc;
1394}
1395
1396static int ghes_remove(struct platform_device *ghes_dev)
1397{
1398	int rc;
1399	struct ghes *ghes;
1400	struct acpi_hest_generic *generic;
1401
1402	ghes = platform_get_drvdata(ghes_dev);
1403	generic = ghes->generic;
1404
1405	ghes->flags |= GHES_EXITING;
1406	switch (generic->notify.type) {
1407	case ACPI_HEST_NOTIFY_POLLED:
1408		del_timer_sync(&ghes->timer);
1409		break;
1410	case ACPI_HEST_NOTIFY_EXTERNAL:
1411		free_irq(ghes->irq, ghes);
1412		break;
1413
1414	case ACPI_HEST_NOTIFY_SCI:
1415	case ACPI_HEST_NOTIFY_GSIV:
1416	case ACPI_HEST_NOTIFY_GPIO:
1417		mutex_lock(&ghes_list_mutex);
1418		list_del_rcu(&ghes->list);
1419		if (list_empty(&ghes_hed))
1420			unregister_acpi_hed_notifier(&ghes_notifier_hed);
1421		mutex_unlock(&ghes_list_mutex);
1422		synchronize_rcu();
1423		break;
1424
1425	case ACPI_HEST_NOTIFY_SEA:
1426		ghes_sea_remove(ghes);
1427		break;
1428	case ACPI_HEST_NOTIFY_NMI:
1429		ghes_nmi_remove(ghes);
1430		break;
1431	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1432		rc = apei_sdei_unregister_ghes(ghes);
1433		if (rc)
1434			return rc;
 
 
 
 
 
 
 
1435		break;
1436	default:
1437		BUG();
1438		break;
1439	}
1440
1441	ghes_fini(ghes);
1442
1443	ghes_edac_unregister(ghes);
 
 
1444
1445	kfree(ghes);
1446
1447	platform_set_drvdata(ghes_dev, NULL);
1448
1449	return 0;
1450}
1451
1452static struct platform_driver ghes_platform_driver = {
1453	.driver		= {
1454		.name	= "GHES",
1455	},
1456	.probe		= ghes_probe,
1457	.remove		= ghes_remove,
1458};
1459
1460static int __init ghes_init(void)
1461{
1462	int rc;
1463
 
 
1464	if (acpi_disabled)
1465		return -ENODEV;
1466
1467	switch (hest_disable) {
1468	case HEST_NOT_FOUND:
1469		return -ENODEV;
1470	case HEST_DISABLED:
1471		pr_info(GHES_PFX "HEST is not enabled!\n");
1472		return -EINVAL;
1473	default:
1474		break;
1475	}
1476
1477	if (ghes_disable) {
1478		pr_info(GHES_PFX "GHES is not enabled!\n");
1479		return -EINVAL;
1480	}
1481
1482	ghes_nmi_init_cxt();
1483
1484	rc = platform_driver_register(&ghes_platform_driver);
1485	if (rc)
1486		goto err;
1487
1488	rc = apei_osc_setup();
1489	if (rc == 0 && osc_sb_apei_support_acked)
1490		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1491	else if (rc == 0 && !osc_sb_apei_support_acked)
1492		pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1493	else if (rc && osc_sb_apei_support_acked)
1494		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1495	else
1496		pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
1497
1498	return 0;
1499err:
1500	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501}
1502device_initcall(ghes_init);