Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * APEI Generic Hardware Error Source support
   3 *
   4 * Generic Hardware Error Source provides a way to report platform
   5 * hardware errors (such as that from chipset). It works in so called
   6 * "Firmware First" mode, that is, hardware errors are reported to
   7 * firmware firstly, then reported to Linux by firmware. This way,
   8 * some non-standard hardware error registers or non-standard hardware
   9 * link can be checked by firmware to produce more hardware error
  10 * information for Linux.
  11 *
  12 * For more information about Generic Hardware Error Source, please
  13 * refer to ACPI Specification version 4.0, section 17.3.2.6
  14 *
  15 * Copyright 2010,2011 Intel Corp.
  16 *   Author: Huang Ying <ying.huang@intel.com>
  17 *
  18 * This program is free software; you can redistribute it and/or
  19 * modify it under the terms of the GNU General Public License version
  20 * 2 as published by the Free Software Foundation;
  21 *
  22 * This program is distributed in the hope that it will be useful,
  23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  25 * GNU General Public License for more details.
  26 *
  27 * You should have received a copy of the GNU General Public License
  28 * along with this program; if not, write to the Free Software
  29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  30 */
  31
  32#include <linux/kernel.h>
  33#include <linux/module.h>
  34#include <linux/init.h>
  35#include <linux/acpi.h>
  36#include <linux/acpi_io.h>
  37#include <linux/io.h>
  38#include <linux/interrupt.h>
  39#include <linux/timer.h>
  40#include <linux/cper.h>
  41#include <linux/kdebug.h>
  42#include <linux/platform_device.h>
  43#include <linux/mutex.h>
  44#include <linux/ratelimit.h>
  45#include <linux/vmalloc.h>
  46#include <linux/irq_work.h>
  47#include <linux/llist.h>
  48#include <linux/genalloc.h>
  49#include <linux/pci.h>
  50#include <linux/aer.h>
 
 
 
  51#include <acpi/apei.h>
  52#include <acpi/hed.h>
  53#include <asm/mce.h>
  54#include <asm/tlbflush.h>
  55#include <asm/nmi.h>
  56
  57#include "apei-internal.h"
  58
  59#define GHES_PFX	"GHES: "
  60
  61#define GHES_ESTATUS_MAX_SIZE		65536
  62#define GHES_ESOURCE_PREALLOC_MAX_SIZE	65536
  63
  64#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
  65
  66/* This is just an estimation for memory pool allocation */
  67#define GHES_ESTATUS_CACHE_AVG_SIZE	512
  68
  69#define GHES_ESTATUS_CACHES_SIZE	4
  70
  71#define GHES_ESTATUS_IN_CACHE_MAX_NSEC	10000000000ULL
  72/* Prevent too many caches are allocated because of RCU */
  73#define GHES_ESTATUS_CACHE_ALLOCED_MAX	(GHES_ESTATUS_CACHES_SIZE * 3 / 2)
  74
  75#define GHES_ESTATUS_CACHE_LEN(estatus_len)			\
  76	(sizeof(struct ghes_estatus_cache) + (estatus_len))
  77#define GHES_ESTATUS_FROM_CACHE(estatus_cache)			\
  78	((struct acpi_hest_generic_status *)			\
  79	 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
  80
  81#define GHES_ESTATUS_NODE_LEN(estatus_len)			\
  82	(sizeof(struct ghes_estatus_node) + (estatus_len))
  83#define GHES_ESTATUS_FROM_NODE(estatus_node)				\
  84	((struct acpi_hest_generic_status *)				\
  85	 ((struct ghes_estatus_node *)(estatus_node) + 1))
  86
  87/*
  88 * One struct ghes is created for each generic hardware error source.
  89 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
  90 * handler.
  91 *
  92 * estatus: memory buffer for error status block, allocated during
  93 * HEST parsing.
  94 */
  95#define GHES_TO_CLEAR		0x0001
  96#define GHES_EXITING		0x0002
  97
  98struct ghes {
  99	struct acpi_hest_generic *generic;
 100	struct acpi_hest_generic_status *estatus;
 101	u64 buffer_paddr;
 102	unsigned long flags;
 103	union {
 104		struct list_head list;
 105		struct timer_list timer;
 106		unsigned int irq;
 107	};
 108};
 109
 110struct ghes_estatus_node {
 111	struct llist_node llnode;
 112	struct acpi_hest_generic *generic;
 113};
 114
 115struct ghes_estatus_cache {
 116	u32 estatus_len;
 117	atomic_t count;
 118	struct acpi_hest_generic *generic;
 119	unsigned long long time_in;
 120	struct rcu_head rcu;
 121};
 122
 123bool ghes_disable;
 124module_param_named(disable, ghes_disable, bool, 0);
 125
 126static int ghes_panic_timeout	__read_mostly = 30;
 127
 128/*
 129 * All error sources notified with SCI shares one notifier function,
 130 * so they need to be linked and checked one by one.  This is applied
 131 * to NMI too.
 132 *
 133 * RCU is used for these lists, so ghes_list_mutex is only used for
 134 * list changing, not for traversing.
 135 */
 136static LIST_HEAD(ghes_sci);
 137static LIST_HEAD(ghes_nmi);
 138static DEFINE_MUTEX(ghes_list_mutex);
 139
 140/*
 141 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
 142 * mutual exclusion.
 143 */
 144static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
 145
 146/*
 147 * Because the memory area used to transfer hardware error information
 148 * from BIOS to Linux can be determined only in NMI, IRQ or timer
 149 * handler, but general ioremap can not be used in atomic context, so
 150 * a special version of atomic ioremap is implemented for that.
 151 */
 152
 153/*
 154 * Two virtual pages are used, one for NMI context, the other for
 155 * IRQ/PROCESS context
 156 */
 157#define GHES_IOREMAP_PAGES		2
 158#define GHES_IOREMAP_NMI_PAGE(base)	(base)
 159#define GHES_IOREMAP_IRQ_PAGE(base)	((base) + PAGE_SIZE)
 
 
 
 
 160
 161/* virtual memory area for atomic ioremap */
 162static struct vm_struct *ghes_ioremap_area;
 163/*
 164 * These 2 spinlock is used to prevent atomic ioremap virtual memory
 165 * area from being mapped simultaneously.
 166 */
 167static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
 168static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
 169
 170/*
 171 * printk is not safe in NMI context.  So in NMI handler, we allocate
 172 * required memory from lock-less memory allocator
 173 * (ghes_estatus_pool), save estatus into it, put them into lock-less
 174 * list (ghes_estatus_llist), then delay printk into IRQ context via
 175 * irq_work (ghes_proc_irq_work).  ghes_estatus_size_request record
 176 * required pool size by all NMI error source.
 177 */
 178static struct gen_pool *ghes_estatus_pool;
 179static unsigned long ghes_estatus_pool_size_request;
 180static struct llist_head ghes_estatus_llist;
 181static struct irq_work ghes_proc_irq_work;
 182
 183struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
 184static atomic_t ghes_estatus_cache_alloced;
 185
 186static int ghes_ioremap_init(void)
 187{
 188	ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
 189		VM_IOREMAP, VMALLOC_START, VMALLOC_END);
 190	if (!ghes_ioremap_area) {
 191		pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
 192		return -ENOMEM;
 193	}
 194
 195	return 0;
 196}
 197
 198static void ghes_ioremap_exit(void)
 199{
 200	free_vm_area(ghes_ioremap_area);
 201}
 202
 203static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
 204{
 205	unsigned long vaddr;
 206
 207	vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
 208	ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
 209			   pfn << PAGE_SHIFT, PAGE_KERNEL);
 210
 211	return (void __iomem *)vaddr;
 212}
 213
 214static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
 215{
 216	unsigned long vaddr;
 
 217
 218	vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
 219	ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
 220			   pfn << PAGE_SHIFT, PAGE_KERNEL);
 
 
 
 221
 222	return (void __iomem *)vaddr;
 223}
 224
 225static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
 226{
 227	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
 228	void *base = ghes_ioremap_area->addr;
 229
 230	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
 231	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
 232	__flush_tlb_one(vaddr);
 233}
 234
 235static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
 236{
 237	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
 238	void *base = ghes_ioremap_area->addr;
 239
 240	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
 241	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
 242	__flush_tlb_one(vaddr);
 243}
 244
 245static int ghes_estatus_pool_init(void)
 246{
 247	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
 248	if (!ghes_estatus_pool)
 249		return -ENOMEM;
 250	return 0;
 251}
 252
 253static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
 254					      struct gen_pool_chunk *chunk,
 255					      void *data)
 256{
 257	free_page(chunk->start_addr);
 258}
 259
 260static void ghes_estatus_pool_exit(void)
 261{
 262	gen_pool_for_each_chunk(ghes_estatus_pool,
 263				ghes_estatus_pool_free_chunk_page, NULL);
 264	gen_pool_destroy(ghes_estatus_pool);
 265}
 266
 267static int ghes_estatus_pool_expand(unsigned long len)
 268{
 269	unsigned long i, pages, size, addr;
 270	int ret;
 271
 272	ghes_estatus_pool_size_request += PAGE_ALIGN(len);
 273	size = gen_pool_size(ghes_estatus_pool);
 274	if (size >= ghes_estatus_pool_size_request)
 275		return 0;
 276	pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
 277	for (i = 0; i < pages; i++) {
 278		addr = __get_free_page(GFP_KERNEL);
 279		if (!addr)
 280			return -ENOMEM;
 281		ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
 282		if (ret)
 283			return ret;
 284	}
 285
 286	return 0;
 287}
 288
 289static void ghes_estatus_pool_shrink(unsigned long len)
 290{
 291	ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
 292}
 293
 294static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 295{
 296	struct ghes *ghes;
 297	unsigned int error_block_length;
 298	int rc;
 299
 300	ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
 301	if (!ghes)
 302		return ERR_PTR(-ENOMEM);
 303	ghes->generic = generic;
 304	rc = apei_map_generic_address(&generic->error_status_address);
 305	if (rc)
 306		goto err_free;
 307	error_block_length = generic->error_block_length;
 308	if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
 309		pr_warning(FW_WARN GHES_PFX
 310			   "Error status block length is too long: %u for "
 311			   "generic hardware error source: %d.\n",
 312			   error_block_length, generic->header.source_id);
 313		error_block_length = GHES_ESTATUS_MAX_SIZE;
 314	}
 315	ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
 316	if (!ghes->estatus) {
 317		rc = -ENOMEM;
 318		goto err_unmap;
 319	}
 320
 321	return ghes;
 322
 323err_unmap:
 324	apei_unmap_generic_address(&generic->error_status_address);
 325err_free:
 326	kfree(ghes);
 327	return ERR_PTR(rc);
 328}
 329
 330static void ghes_fini(struct ghes *ghes)
 331{
 332	kfree(ghes->estatus);
 333	apei_unmap_generic_address(&ghes->generic->error_status_address);
 334}
 335
 336enum {
 337	GHES_SEV_NO = 0x0,
 338	GHES_SEV_CORRECTED = 0x1,
 339	GHES_SEV_RECOVERABLE = 0x2,
 340	GHES_SEV_PANIC = 0x3,
 341};
 342
 343static inline int ghes_severity(int severity)
 344{
 345	switch (severity) {
 346	case CPER_SEV_INFORMATIONAL:
 347		return GHES_SEV_NO;
 348	case CPER_SEV_CORRECTED:
 349		return GHES_SEV_CORRECTED;
 350	case CPER_SEV_RECOVERABLE:
 351		return GHES_SEV_RECOVERABLE;
 352	case CPER_SEV_FATAL:
 353		return GHES_SEV_PANIC;
 354	default:
 355		/* Unknown, go panic */
 356		return GHES_SEV_PANIC;
 357	}
 358}
 359
 360static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
 361				  int from_phys)
 362{
 363	void __iomem *vaddr;
 364	unsigned long flags = 0;
 365	int in_nmi = in_nmi();
 366	u64 offset;
 367	u32 trunk;
 368
 369	while (len > 0) {
 370		offset = paddr - (paddr & PAGE_MASK);
 371		if (in_nmi) {
 372			raw_spin_lock(&ghes_ioremap_lock_nmi);
 373			vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
 374		} else {
 375			spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
 376			vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
 377		}
 378		trunk = PAGE_SIZE - offset;
 379		trunk = min(trunk, len);
 380		if (from_phys)
 381			memcpy_fromio(buffer, vaddr + offset, trunk);
 382		else
 383			memcpy_toio(vaddr + offset, buffer, trunk);
 384		len -= trunk;
 385		paddr += trunk;
 386		buffer += trunk;
 387		if (in_nmi) {
 388			ghes_iounmap_nmi(vaddr);
 389			raw_spin_unlock(&ghes_ioremap_lock_nmi);
 390		} else {
 391			ghes_iounmap_irq(vaddr);
 392			spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
 393		}
 394	}
 395}
 396
 397static int ghes_read_estatus(struct ghes *ghes, int silent)
 398{
 399	struct acpi_hest_generic *g = ghes->generic;
 400	u64 buf_paddr;
 401	u32 len;
 402	int rc;
 403
 404	rc = apei_read(&buf_paddr, &g->error_status_address);
 405	if (rc) {
 406		if (!silent && printk_ratelimit())
 407			pr_warning(FW_WARN GHES_PFX
 408"Failed to read error status block address for hardware error source: %d.\n",
 409				   g->header.source_id);
 410		return -EIO;
 411	}
 412	if (!buf_paddr)
 413		return -ENOENT;
 414
 415	ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
 416			      sizeof(*ghes->estatus), 1);
 417	if (!ghes->estatus->block_status)
 418		return -ENOENT;
 419
 420	ghes->buffer_paddr = buf_paddr;
 421	ghes->flags |= GHES_TO_CLEAR;
 422
 423	rc = -EIO;
 424	len = apei_estatus_len(ghes->estatus);
 425	if (len < sizeof(*ghes->estatus))
 426		goto err_read_block;
 427	if (len > ghes->generic->error_block_length)
 428		goto err_read_block;
 429	if (apei_estatus_check_header(ghes->estatus))
 430		goto err_read_block;
 431	ghes_copy_tofrom_phys(ghes->estatus + 1,
 432			      buf_paddr + sizeof(*ghes->estatus),
 433			      len - sizeof(*ghes->estatus), 1);
 434	if (apei_estatus_check(ghes->estatus))
 435		goto err_read_block;
 436	rc = 0;
 437
 438err_read_block:
 439	if (rc && !silent && printk_ratelimit())
 440		pr_warning(FW_WARN GHES_PFX
 441			   "Failed to read error status block!\n");
 442	return rc;
 443}
 444
 445static void ghes_clear_estatus(struct ghes *ghes)
 446{
 447	ghes->estatus->block_status = 0;
 448	if (!(ghes->flags & GHES_TO_CLEAR))
 449		return;
 450	ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
 451			      sizeof(ghes->estatus->block_status), 0);
 452	ghes->flags &= ~GHES_TO_CLEAR;
 453}
 454
 455static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 456{
 457	int sev, sec_sev;
 458	struct acpi_hest_generic_data *gdata;
 459
 460	sev = ghes_severity(estatus->error_severity);
 461	apei_estatus_for_each_section(estatus, gdata) {
 462		sec_sev = ghes_severity(gdata->error_severity);
 463		if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
 464				 CPER_SEC_PLATFORM_MEM)) {
 465			struct cper_sec_mem_err *mem_err;
 466			mem_err = (struct cper_sec_mem_err *)(gdata+1);
 467#ifdef CONFIG_X86_MCE
 468			apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
 469						  mem_err);
 470#endif
 471#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
 472			if (sev == GHES_SEV_RECOVERABLE &&
 473			    sec_sev == GHES_SEV_RECOVERABLE &&
 474			    mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
 475				unsigned long pfn;
 476				pfn = mem_err->physical_addr >> PAGE_SHIFT;
 477				memory_failure_queue(pfn, 0, 0);
 478			}
 479#endif
 480		}
 481#ifdef CONFIG_ACPI_APEI_PCIEAER
 482		else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
 483				      CPER_SEC_PCIE)) {
 484			struct cper_sec_pcie *pcie_err;
 485			pcie_err = (struct cper_sec_pcie *)(gdata+1);
 486			if (sev == GHES_SEV_RECOVERABLE &&
 487			    sec_sev == GHES_SEV_RECOVERABLE &&
 488			    pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
 489			    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
 490				unsigned int devfn;
 491				int aer_severity;
 
 492				devfn = PCI_DEVFN(pcie_err->device_id.device,
 493						  pcie_err->device_id.function);
 494				aer_severity = cper_severity_to_aer(sev);
 
 
 
 
 
 
 
 
 
 495				aer_recover_queue(pcie_err->device_id.segment,
 496						  pcie_err->device_id.bus,
 497						  devfn, aer_severity);
 
 
 498			}
 499
 500		}
 501#endif
 502	}
 503}
 504
 505static void __ghes_print_estatus(const char *pfx,
 506				 const struct acpi_hest_generic *generic,
 507				 const struct acpi_hest_generic_status *estatus)
 508{
 509	static atomic_t seqno;
 510	unsigned int curr_seqno;
 511	char pfx_seq[64];
 512
 513	if (pfx == NULL) {
 514		if (ghes_severity(estatus->error_severity) <=
 515		    GHES_SEV_CORRECTED)
 516			pfx = KERN_WARNING;
 517		else
 518			pfx = KERN_ERR;
 519	}
 520	curr_seqno = atomic_inc_return(&seqno);
 521	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
 522	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
 523	       pfx_seq, generic->header.source_id);
 524	apei_estatus_print(pfx_seq, estatus);
 525}
 526
 527static int ghes_print_estatus(const char *pfx,
 528			      const struct acpi_hest_generic *generic,
 529			      const struct acpi_hest_generic_status *estatus)
 530{
 531	/* Not more than 2 messages every 5 seconds */
 532	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
 533	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
 534	struct ratelimit_state *ratelimit;
 535
 536	if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
 537		ratelimit = &ratelimit_corrected;
 538	else
 539		ratelimit = &ratelimit_uncorrected;
 540	if (__ratelimit(ratelimit)) {
 541		__ghes_print_estatus(pfx, generic, estatus);
 542		return 1;
 543	}
 544	return 0;
 545}
 546
 547/*
 548 * GHES error status reporting throttle, to report more kinds of
 549 * errors, instead of just most frequently occurred errors.
 550 */
 551static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
 552{
 553	u32 len;
 554	int i, cached = 0;
 555	unsigned long long now;
 556	struct ghes_estatus_cache *cache;
 557	struct acpi_hest_generic_status *cache_estatus;
 558
 559	len = apei_estatus_len(estatus);
 560	rcu_read_lock();
 561	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
 562		cache = rcu_dereference(ghes_estatus_caches[i]);
 563		if (cache == NULL)
 564			continue;
 565		if (len != cache->estatus_len)
 566			continue;
 567		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
 568		if (memcmp(estatus, cache_estatus, len))
 569			continue;
 570		atomic_inc(&cache->count);
 571		now = sched_clock();
 572		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
 573			cached = 1;
 574		break;
 575	}
 576	rcu_read_unlock();
 577	return cached;
 578}
 579
 580static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
 581	struct acpi_hest_generic *generic,
 582	struct acpi_hest_generic_status *estatus)
 583{
 584	int alloced;
 585	u32 len, cache_len;
 586	struct ghes_estatus_cache *cache;
 587	struct acpi_hest_generic_status *cache_estatus;
 588
 589	alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
 590	if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
 591		atomic_dec(&ghes_estatus_cache_alloced);
 592		return NULL;
 593	}
 594	len = apei_estatus_len(estatus);
 595	cache_len = GHES_ESTATUS_CACHE_LEN(len);
 596	cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
 597	if (!cache) {
 598		atomic_dec(&ghes_estatus_cache_alloced);
 599		return NULL;
 600	}
 601	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
 602	memcpy(cache_estatus, estatus, len);
 603	cache->estatus_len = len;
 604	atomic_set(&cache->count, 0);
 605	cache->generic = generic;
 606	cache->time_in = sched_clock();
 607	return cache;
 608}
 609
 610static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
 611{
 612	u32 len;
 613
 614	len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
 615	len = GHES_ESTATUS_CACHE_LEN(len);
 616	gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
 617	atomic_dec(&ghes_estatus_cache_alloced);
 618}
 619
 620static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
 621{
 622	struct ghes_estatus_cache *cache;
 623
 624	cache = container_of(head, struct ghes_estatus_cache, rcu);
 625	ghes_estatus_cache_free(cache);
 626}
 627
 628static void ghes_estatus_cache_add(
 629	struct acpi_hest_generic *generic,
 630	struct acpi_hest_generic_status *estatus)
 631{
 632	int i, slot = -1, count;
 633	unsigned long long now, duration, period, max_period = 0;
 634	struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
 635
 636	new_cache = ghes_estatus_cache_alloc(generic, estatus);
 637	if (new_cache == NULL)
 638		return;
 639	rcu_read_lock();
 640	now = sched_clock();
 641	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
 642		cache = rcu_dereference(ghes_estatus_caches[i]);
 643		if (cache == NULL) {
 644			slot = i;
 645			slot_cache = NULL;
 646			break;
 647		}
 648		duration = now - cache->time_in;
 649		if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
 650			slot = i;
 651			slot_cache = cache;
 652			break;
 653		}
 654		count = atomic_read(&cache->count);
 655		period = duration;
 656		do_div(period, (count + 1));
 657		if (period > max_period) {
 658			max_period = period;
 659			slot = i;
 660			slot_cache = cache;
 661		}
 662	}
 663	/* new_cache must be put into array after its contents are written */
 664	smp_wmb();
 665	if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
 666				  slot_cache, new_cache) == slot_cache) {
 667		if (slot_cache)
 668			call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
 669	} else
 670		ghes_estatus_cache_free(new_cache);
 671	rcu_read_unlock();
 672}
 673
 674static int ghes_proc(struct ghes *ghes)
 675{
 676	int rc;
 677
 678	rc = ghes_read_estatus(ghes, 0);
 679	if (rc)
 680		goto out;
 681	if (!ghes_estatus_cached(ghes->estatus)) {
 682		if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
 683			ghes_estatus_cache_add(ghes->generic, ghes->estatus);
 684	}
 685	ghes_do_proc(ghes->estatus);
 686out:
 687	ghes_clear_estatus(ghes);
 688	return 0;
 689}
 690
 691static void ghes_add_timer(struct ghes *ghes)
 692{
 693	struct acpi_hest_generic *g = ghes->generic;
 694	unsigned long expire;
 695
 696	if (!g->notify.poll_interval) {
 697		pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
 698			   g->header.source_id);
 699		return;
 700	}
 701	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
 702	ghes->timer.expires = round_jiffies_relative(expire);
 703	add_timer(&ghes->timer);
 704}
 705
 706static void ghes_poll_func(unsigned long data)
 707{
 708	struct ghes *ghes = (void *)data;
 709
 710	ghes_proc(ghes);
 711	if (!(ghes->flags & GHES_EXITING))
 712		ghes_add_timer(ghes);
 713}
 714
 715static irqreturn_t ghes_irq_func(int irq, void *data)
 716{
 717	struct ghes *ghes = data;
 718	int rc;
 719
 720	rc = ghes_proc(ghes);
 721	if (rc)
 722		return IRQ_NONE;
 723
 724	return IRQ_HANDLED;
 725}
 726
 727static int ghes_notify_sci(struct notifier_block *this,
 728				  unsigned long event, void *data)
 729{
 730	struct ghes *ghes;
 731	int ret = NOTIFY_DONE;
 732
 733	rcu_read_lock();
 734	list_for_each_entry_rcu(ghes, &ghes_sci, list) {
 735		if (!ghes_proc(ghes))
 736			ret = NOTIFY_OK;
 737	}
 738	rcu_read_unlock();
 739
 740	return ret;
 741}
 742
 743static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
 744{
 745	struct llist_node *next, *tail = NULL;
 746
 747	while (llnode) {
 748		next = llnode->next;
 749		llnode->next = tail;
 750		tail = llnode;
 751		llnode = next;
 752	}
 
 
 
 
 
 753
 754	return tail;
 755}
 
 
 
 
 
 
 
 756
 757static void ghes_proc_in_irq(struct irq_work *irq_work)
 758{
 759	struct llist_node *llnode, *next;
 760	struct ghes_estatus_node *estatus_node;
 761	struct acpi_hest_generic *generic;
 762	struct acpi_hest_generic_status *estatus;
 763	u32 len, node_len;
 764
 765	llnode = llist_del_all(&ghes_estatus_llist);
 766	/*
 767	 * Because the time order of estatus in list is reversed,
 768	 * revert it back to proper order.
 769	 */
 770	llnode = llist_nodes_reverse(llnode);
 771	while (llnode) {
 772		next = llnode->next;
 773		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
 774					   llnode);
 775		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 776		len = apei_estatus_len(estatus);
 777		node_len = GHES_ESTATUS_NODE_LEN(len);
 778		ghes_do_proc(estatus);
 779		if (!ghes_estatus_cached(estatus)) {
 780			generic = estatus_node->generic;
 781			if (ghes_print_estatus(NULL, generic, estatus))
 782				ghes_estatus_cache_add(generic, estatus);
 783		}
 784		gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
 785			      node_len);
 786		llnode = next;
 787	}
 788}
 789
 790static void ghes_print_queued_estatus(void)
 791{
 792	struct llist_node *llnode;
 793	struct ghes_estatus_node *estatus_node;
 794	struct acpi_hest_generic *generic;
 795	struct acpi_hest_generic_status *estatus;
 796	u32 len, node_len;
 797
 798	llnode = llist_del_all(&ghes_estatus_llist);
 799	/*
 800	 * Because the time order of estatus in list is reversed,
 801	 * revert it back to proper order.
 802	 */
 803	llnode = llist_nodes_reverse(llnode);
 804	while (llnode) {
 805		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
 806					   llnode);
 807		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 808		len = apei_estatus_len(estatus);
 809		node_len = GHES_ESTATUS_NODE_LEN(len);
 810		generic = estatus_node->generic;
 811		ghes_print_estatus(NULL, generic, estatus);
 812		llnode = llnode->next;
 813	}
 814}
 815
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
 817{
 818	struct ghes *ghes, *ghes_global = NULL;
 819	int sev, sev_global = -1;
 820	int ret = NMI_DONE;
 
 
 821
 822	raw_spin_lock(&ghes_nmi_lock);
 823	list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
 824		if (ghes_read_estatus(ghes, 1)) {
 825			ghes_clear_estatus(ghes);
 826			continue;
 827		}
 828		sev = ghes_severity(ghes->estatus->error_severity);
 829		if (sev > sev_global) {
 830			sev_global = sev;
 831			ghes_global = ghes;
 832		}
 833		ret = NMI_HANDLED;
 834	}
 835
 836	if (ret == NMI_DONE)
 837		goto out;
 838
 839	if (sev_global >= GHES_SEV_PANIC) {
 840		oops_begin();
 841		ghes_print_queued_estatus();
 842		__ghes_print_estatus(KERN_EMERG, ghes_global->generic,
 843				     ghes_global->estatus);
 844		/* reboot to log the error! */
 845		if (panic_timeout == 0)
 846			panic_timeout = ghes_panic_timeout;
 847		panic("Fatal hardware error!");
 848	}
 849
 850	list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
 851#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 852		u32 len, node_len;
 853		struct ghes_estatus_node *estatus_node;
 854		struct acpi_hest_generic_status *estatus;
 855#endif
 856		if (!(ghes->flags & GHES_TO_CLEAR))
 857			continue;
 858#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 859		if (ghes_estatus_cached(ghes->estatus))
 860			goto next;
 861		/* Save estatus for further processing in IRQ context */
 862		len = apei_estatus_len(ghes->estatus);
 863		node_len = GHES_ESTATUS_NODE_LEN(len);
 864		estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
 865						      node_len);
 866		if (estatus_node) {
 867			estatus_node->generic = ghes->generic;
 868			estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 869			memcpy(estatus, ghes->estatus, len);
 870			llist_add(&estatus_node->llnode, &ghes_estatus_llist);
 871		}
 872next:
 873#endif
 874		ghes_clear_estatus(ghes);
 
 
 875	}
 
 876#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 877	irq_work_queue(&ghes_proc_irq_work);
 878#endif
 879
 880out:
 881	raw_spin_unlock(&ghes_nmi_lock);
 882	return ret;
 883}
 884
 885static struct notifier_block ghes_notifier_sci = {
 886	.notifier_call = ghes_notify_sci,
 887};
 888
 889static unsigned long ghes_esource_prealloc_size(
 890	const struct acpi_hest_generic *generic)
 891{
 892	unsigned long block_length, prealloc_records, prealloc_size;
 893
 894	block_length = min_t(unsigned long, generic->error_block_length,
 895			     GHES_ESTATUS_MAX_SIZE);
 896	prealloc_records = max_t(unsigned long,
 897				 generic->records_to_preallocate, 1);
 898	prealloc_size = min_t(unsigned long, block_length * prealloc_records,
 899			      GHES_ESOURCE_PREALLOC_MAX_SIZE);
 900
 901	return prealloc_size;
 902}
 903
 904static int __devinit ghes_probe(struct platform_device *ghes_dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905{
 906	struct acpi_hest_generic *generic;
 907	struct ghes *ghes = NULL;
 908	unsigned long len;
 909	int rc = -EINVAL;
 910
 911	generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
 912	if (!generic->enabled)
 913		return -ENODEV;
 914
 915	switch (generic->notify.type) {
 916	case ACPI_HEST_NOTIFY_POLLED:
 917	case ACPI_HEST_NOTIFY_EXTERNAL:
 918	case ACPI_HEST_NOTIFY_SCI:
 
 919	case ACPI_HEST_NOTIFY_NMI:
 
 
 
 
 
 920		break;
 921	case ACPI_HEST_NOTIFY_LOCAL:
 922		pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
 923			   generic->header.source_id);
 924		goto err;
 925	default:
 926		pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
 927			   generic->notify.type, generic->header.source_id);
 928		goto err;
 929	}
 930
 931	rc = -EIO;
 932	if (generic->error_block_length <
 933	    sizeof(struct acpi_hest_generic_status)) {
 934		pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
 935			   generic->error_block_length,
 936			   generic->header.source_id);
 937		goto err;
 938	}
 939	ghes = ghes_new(generic);
 940	if (IS_ERR(ghes)) {
 941		rc = PTR_ERR(ghes);
 942		ghes = NULL;
 943		goto err;
 944	}
 
 
 
 
 
 945	switch (generic->notify.type) {
 946	case ACPI_HEST_NOTIFY_POLLED:
 947		ghes->timer.function = ghes_poll_func;
 948		ghes->timer.data = (unsigned long)ghes;
 949		init_timer_deferrable(&ghes->timer);
 950		ghes_add_timer(ghes);
 951		break;
 952	case ACPI_HEST_NOTIFY_EXTERNAL:
 953		/* External interrupt vector is GSI */
 954		if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
 
 955			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
 956			       generic->header.source_id);
 957			goto err;
 958		}
 959		if (request_irq(ghes->irq, ghes_irq_func,
 960				0, "GHES IRQ", ghes)) {
 961			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
 962			       generic->header.source_id);
 963			goto err;
 964		}
 965		break;
 966	case ACPI_HEST_NOTIFY_SCI:
 967		mutex_lock(&ghes_list_mutex);
 968		if (list_empty(&ghes_sci))
 969			register_acpi_hed_notifier(&ghes_notifier_sci);
 970		list_add_rcu(&ghes->list, &ghes_sci);
 971		mutex_unlock(&ghes_list_mutex);
 972		break;
 973	case ACPI_HEST_NOTIFY_NMI:
 974		len = ghes_esource_prealloc_size(generic);
 975		ghes_estatus_pool_expand(len);
 976		mutex_lock(&ghes_list_mutex);
 977		if (list_empty(&ghes_nmi))
 978			register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0,
 979						"ghes");
 980		list_add_rcu(&ghes->list, &ghes_nmi);
 981		mutex_unlock(&ghes_list_mutex);
 982		break;
 983	default:
 984		BUG();
 985	}
 986	platform_set_drvdata(ghes_dev, ghes);
 987
 988	return 0;
 
 
 989err:
 990	if (ghes) {
 991		ghes_fini(ghes);
 992		kfree(ghes);
 993	}
 994	return rc;
 995}
 996
 997static int __devexit ghes_remove(struct platform_device *ghes_dev)
 998{
 999	struct ghes *ghes;
1000	struct acpi_hest_generic *generic;
1001	unsigned long len;
1002
1003	ghes = platform_get_drvdata(ghes_dev);
1004	generic = ghes->generic;
1005
1006	ghes->flags |= GHES_EXITING;
1007	switch (generic->notify.type) {
1008	case ACPI_HEST_NOTIFY_POLLED:
1009		del_timer_sync(&ghes->timer);
1010		break;
1011	case ACPI_HEST_NOTIFY_EXTERNAL:
1012		free_irq(ghes->irq, ghes);
1013		break;
1014	case ACPI_HEST_NOTIFY_SCI:
1015		mutex_lock(&ghes_list_mutex);
1016		list_del_rcu(&ghes->list);
1017		if (list_empty(&ghes_sci))
1018			unregister_acpi_hed_notifier(&ghes_notifier_sci);
1019		mutex_unlock(&ghes_list_mutex);
1020		break;
1021	case ACPI_HEST_NOTIFY_NMI:
1022		mutex_lock(&ghes_list_mutex);
1023		list_del_rcu(&ghes->list);
1024		if (list_empty(&ghes_nmi))
1025			unregister_nmi_handler(NMI_LOCAL, "ghes");
1026		mutex_unlock(&ghes_list_mutex);
1027		/*
1028		 * To synchronize with NMI handler, ghes can only be
1029		 * freed after NMI handler finishes.
1030		 */
1031		synchronize_rcu();
1032		len = ghes_esource_prealloc_size(generic);
1033		ghes_estatus_pool_shrink(len);
1034		break;
1035	default:
1036		BUG();
1037		break;
1038	}
1039
1040	ghes_fini(ghes);
 
 
 
1041	kfree(ghes);
1042
1043	platform_set_drvdata(ghes_dev, NULL);
1044
1045	return 0;
1046}
1047
1048static struct platform_driver ghes_platform_driver = {
1049	.driver		= {
1050		.name	= "GHES",
1051		.owner	= THIS_MODULE,
1052	},
1053	.probe		= ghes_probe,
1054	.remove		= ghes_remove,
1055};
1056
1057static int __init ghes_init(void)
1058{
1059	int rc;
1060
1061	if (acpi_disabled)
1062		return -ENODEV;
1063
1064	if (hest_disable) {
1065		pr_info(GHES_PFX "HEST is not enabled!\n");
1066		return -EINVAL;
1067	}
1068
1069	if (ghes_disable) {
1070		pr_info(GHES_PFX "GHES is not enabled!\n");
1071		return -EINVAL;
1072	}
1073
1074	init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1075
1076	rc = ghes_ioremap_init();
1077	if (rc)
1078		goto err;
1079
1080	rc = ghes_estatus_pool_init();
1081	if (rc)
1082		goto err_ioremap_exit;
1083
1084	rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
1085				      GHES_ESTATUS_CACHE_ALLOCED_MAX);
1086	if (rc)
1087		goto err_pool_exit;
1088
1089	rc = platform_driver_register(&ghes_platform_driver);
1090	if (rc)
1091		goto err_pool_exit;
1092
1093	rc = apei_osc_setup();
1094	if (rc == 0 && osc_sb_apei_support_acked)
1095		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1096	else if (rc == 0 && !osc_sb_apei_support_acked)
1097		pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1098	else if (rc && osc_sb_apei_support_acked)
1099		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1100	else
1101		pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1102
1103	return 0;
1104err_pool_exit:
1105	ghes_estatus_pool_exit();
1106err_ioremap_exit:
1107	ghes_ioremap_exit();
1108err:
1109	return rc;
1110}
1111
1112static void __exit ghes_exit(void)
1113{
1114	platform_driver_unregister(&ghes_platform_driver);
1115	ghes_estatus_pool_exit();
1116	ghes_ioremap_exit();
1117}
1118
1119module_init(ghes_init);
1120module_exit(ghes_exit);
1121
1122MODULE_AUTHOR("Huang Ying");
1123MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
1124MODULE_LICENSE("GPL");
1125MODULE_ALIAS("platform:GHES");
v4.6
   1/*
   2 * APEI Generic Hardware Error Source support
   3 *
   4 * Generic Hardware Error Source provides a way to report platform
   5 * hardware errors (such as that from chipset). It works in so called
   6 * "Firmware First" mode, that is, hardware errors are reported to
   7 * firmware firstly, then reported to Linux by firmware. This way,
   8 * some non-standard hardware error registers or non-standard hardware
   9 * link can be checked by firmware to produce more hardware error
  10 * information for Linux.
  11 *
  12 * For more information about Generic Hardware Error Source, please
  13 * refer to ACPI Specification version 4.0, section 17.3.2.6
  14 *
  15 * Copyright 2010,2011 Intel Corp.
  16 *   Author: Huang Ying <ying.huang@intel.com>
  17 *
  18 * This program is free software; you can redistribute it and/or
  19 * modify it under the terms of the GNU General Public License version
  20 * 2 as published by the Free Software Foundation;
  21 *
  22 * This program is distributed in the hope that it will be useful,
  23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  25 * GNU General Public License for more details.
 
 
 
 
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/moduleparam.h>
  30#include <linux/init.h>
  31#include <linux/acpi.h>
 
  32#include <linux/io.h>
  33#include <linux/interrupt.h>
  34#include <linux/timer.h>
  35#include <linux/cper.h>
  36#include <linux/kdebug.h>
  37#include <linux/platform_device.h>
  38#include <linux/mutex.h>
  39#include <linux/ratelimit.h>
  40#include <linux/vmalloc.h>
  41#include <linux/irq_work.h>
  42#include <linux/llist.h>
  43#include <linux/genalloc.h>
  44#include <linux/pci.h>
  45#include <linux/aer.h>
  46#include <linux/nmi.h>
  47
  48#include <acpi/ghes.h>
  49#include <acpi/apei.h>
 
 
  50#include <asm/tlbflush.h>
 
  51
  52#include "apei-internal.h"
  53
  54#define GHES_PFX	"GHES: "
  55
  56#define GHES_ESTATUS_MAX_SIZE		65536
  57#define GHES_ESOURCE_PREALLOC_MAX_SIZE	65536
  58
  59#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
  60
  61/* This is just an estimation for memory pool allocation */
  62#define GHES_ESTATUS_CACHE_AVG_SIZE	512
  63
  64#define GHES_ESTATUS_CACHES_SIZE	4
  65
  66#define GHES_ESTATUS_IN_CACHE_MAX_NSEC	10000000000ULL
  67/* Prevent too many caches are allocated because of RCU */
  68#define GHES_ESTATUS_CACHE_ALLOCED_MAX	(GHES_ESTATUS_CACHES_SIZE * 3 / 2)
  69
  70#define GHES_ESTATUS_CACHE_LEN(estatus_len)			\
  71	(sizeof(struct ghes_estatus_cache) + (estatus_len))
  72#define GHES_ESTATUS_FROM_CACHE(estatus_cache)			\
  73	((struct acpi_hest_generic_status *)				\
  74	 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
  75
  76#define GHES_ESTATUS_NODE_LEN(estatus_len)			\
  77	(sizeof(struct ghes_estatus_node) + (estatus_len))
  78#define GHES_ESTATUS_FROM_NODE(estatus_node)			\
  79	((struct acpi_hest_generic_status *)				\
  80	 ((struct ghes_estatus_node *)(estatus_node) + 1))
  81
  82/*
  83 * This driver isn't really modular, however for the time being,
  84 * continuing to use module_param is the easiest way to remain
  85 * compatible with existing boot arg use cases.
 
 
 
  86 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87bool ghes_disable;
  88module_param_named(disable, ghes_disable, bool, 0);
  89
 
 
  90/*
  91 * All error sources notified with SCI shares one notifier function,
  92 * so they need to be linked and checked one by one.  This is applied
  93 * to NMI too.
  94 *
  95 * RCU is used for these lists, so ghes_list_mutex is only used for
  96 * list changing, not for traversing.
  97 */
  98static LIST_HEAD(ghes_sci);
 
  99static DEFINE_MUTEX(ghes_list_mutex);
 100
 101/*
 
 
 
 
 
 
 102 * Because the memory area used to transfer hardware error information
 103 * from BIOS to Linux can be determined only in NMI, IRQ or timer
 104 * handler, but general ioremap can not be used in atomic context, so
 105 * a special version of atomic ioremap is implemented for that.
 106 */
 107
 108/*
 109 * Two virtual pages are used, one for IRQ/PROCESS context, the other for
 110 * NMI context (optionally).
 111 */
 112#ifdef CONFIG_HAVE_ACPI_APEI_NMI
 113#define GHES_IOREMAP_PAGES           2
 114#else
 115#define GHES_IOREMAP_PAGES           1
 116#endif
 117#define GHES_IOREMAP_IRQ_PAGE(base)	(base)
 118#define GHES_IOREMAP_NMI_PAGE(base)	((base) + PAGE_SIZE)
 119
 120/* virtual memory area for atomic ioremap */
 121static struct vm_struct *ghes_ioremap_area;
 122/*
 123 * These 2 spinlock is used to prevent atomic ioremap virtual memory
 124 * area from being mapped simultaneously.
 125 */
 126static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
 127static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
 128
 
 
 
 
 
 
 
 
 129static struct gen_pool *ghes_estatus_pool;
 130static unsigned long ghes_estatus_pool_size_request;
 
 
 131
 132static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
 133static atomic_t ghes_estatus_cache_alloced;
 134
 135static int ghes_ioremap_init(void)
 136{
 137	ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
 138		VM_IOREMAP, VMALLOC_START, VMALLOC_END);
 139	if (!ghes_ioremap_area) {
 140		pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
 141		return -ENOMEM;
 142	}
 143
 144	return 0;
 145}
 146
 147static void ghes_ioremap_exit(void)
 148{
 149	free_vm_area(ghes_ioremap_area);
 150}
 151
 152static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
 153{
 154	unsigned long vaddr;
 155
 156	vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
 157	ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
 158			   pfn << PAGE_SHIFT, PAGE_KERNEL);
 159
 160	return (void __iomem *)vaddr;
 161}
 162
 163static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
 164{
 165	unsigned long vaddr, paddr;
 166	pgprot_t prot;
 167
 168	vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
 169
 170	paddr = pfn << PAGE_SHIFT;
 171	prot = arch_apei_get_mem_attribute(paddr);
 172
 173	ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
 174
 175	return (void __iomem *)vaddr;
 176}
 177
 178static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
 179{
 180	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
 181	void *base = ghes_ioremap_area->addr;
 182
 183	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
 184	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
 185	arch_apei_flush_tlb_one(vaddr);
 186}
 187
 188static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
 189{
 190	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
 191	void *base = ghes_ioremap_area->addr;
 192
 193	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
 194	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
 195	arch_apei_flush_tlb_one(vaddr);
 196}
 197
 198static int ghes_estatus_pool_init(void)
 199{
 200	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
 201	if (!ghes_estatus_pool)
 202		return -ENOMEM;
 203	return 0;
 204}
 205
 206static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
 207					      struct gen_pool_chunk *chunk,
 208					      void *data)
 209{
 210	free_page(chunk->start_addr);
 211}
 212
 213static void ghes_estatus_pool_exit(void)
 214{
 215	gen_pool_for_each_chunk(ghes_estatus_pool,
 216				ghes_estatus_pool_free_chunk_page, NULL);
 217	gen_pool_destroy(ghes_estatus_pool);
 218}
 219
 220static int ghes_estatus_pool_expand(unsigned long len)
 221{
 222	unsigned long i, pages, size, addr;
 223	int ret;
 224
 225	ghes_estatus_pool_size_request += PAGE_ALIGN(len);
 226	size = gen_pool_size(ghes_estatus_pool);
 227	if (size >= ghes_estatus_pool_size_request)
 228		return 0;
 229	pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
 230	for (i = 0; i < pages; i++) {
 231		addr = __get_free_page(GFP_KERNEL);
 232		if (!addr)
 233			return -ENOMEM;
 234		ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
 235		if (ret)
 236			return ret;
 237	}
 238
 239	return 0;
 240}
 241
 
 
 
 
 
 242static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 243{
 244	struct ghes *ghes;
 245	unsigned int error_block_length;
 246	int rc;
 247
 248	ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
 249	if (!ghes)
 250		return ERR_PTR(-ENOMEM);
 251	ghes->generic = generic;
 252	rc = apei_map_generic_address(&generic->error_status_address);
 253	if (rc)
 254		goto err_free;
 255	error_block_length = generic->error_block_length;
 256	if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
 257		pr_warning(FW_WARN GHES_PFX
 258			   "Error status block length is too long: %u for "
 259			   "generic hardware error source: %d.\n",
 260			   error_block_length, generic->header.source_id);
 261		error_block_length = GHES_ESTATUS_MAX_SIZE;
 262	}
 263	ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
 264	if (!ghes->estatus) {
 265		rc = -ENOMEM;
 266		goto err_unmap;
 267	}
 268
 269	return ghes;
 270
 271err_unmap:
 272	apei_unmap_generic_address(&generic->error_status_address);
 273err_free:
 274	kfree(ghes);
 275	return ERR_PTR(rc);
 276}
 277
 278static void ghes_fini(struct ghes *ghes)
 279{
 280	kfree(ghes->estatus);
 281	apei_unmap_generic_address(&ghes->generic->error_status_address);
 282}
 283
 
 
 
 
 
 
 
 284static inline int ghes_severity(int severity)
 285{
 286	switch (severity) {
 287	case CPER_SEV_INFORMATIONAL:
 288		return GHES_SEV_NO;
 289	case CPER_SEV_CORRECTED:
 290		return GHES_SEV_CORRECTED;
 291	case CPER_SEV_RECOVERABLE:
 292		return GHES_SEV_RECOVERABLE;
 293	case CPER_SEV_FATAL:
 294		return GHES_SEV_PANIC;
 295	default:
 296		/* Unknown, go panic */
 297		return GHES_SEV_PANIC;
 298	}
 299}
 300
 301static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
 302				  int from_phys)
 303{
 304	void __iomem *vaddr;
 305	unsigned long flags = 0;
 306	int in_nmi = in_nmi();
 307	u64 offset;
 308	u32 trunk;
 309
 310	while (len > 0) {
 311		offset = paddr - (paddr & PAGE_MASK);
 312		if (in_nmi) {
 313			raw_spin_lock(&ghes_ioremap_lock_nmi);
 314			vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
 315		} else {
 316			spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
 317			vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
 318		}
 319		trunk = PAGE_SIZE - offset;
 320		trunk = min(trunk, len);
 321		if (from_phys)
 322			memcpy_fromio(buffer, vaddr + offset, trunk);
 323		else
 324			memcpy_toio(vaddr + offset, buffer, trunk);
 325		len -= trunk;
 326		paddr += trunk;
 327		buffer += trunk;
 328		if (in_nmi) {
 329			ghes_iounmap_nmi(vaddr);
 330			raw_spin_unlock(&ghes_ioremap_lock_nmi);
 331		} else {
 332			ghes_iounmap_irq(vaddr);
 333			spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
 334		}
 335	}
 336}
 337
 338static int ghes_read_estatus(struct ghes *ghes, int silent)
 339{
 340	struct acpi_hest_generic *g = ghes->generic;
 341	u64 buf_paddr;
 342	u32 len;
 343	int rc;
 344
 345	rc = apei_read(&buf_paddr, &g->error_status_address);
 346	if (rc) {
 347		if (!silent && printk_ratelimit())
 348			pr_warning(FW_WARN GHES_PFX
 349"Failed to read error status block address for hardware error source: %d.\n",
 350				   g->header.source_id);
 351		return -EIO;
 352	}
 353	if (!buf_paddr)
 354		return -ENOENT;
 355
 356	ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
 357			      sizeof(*ghes->estatus), 1);
 358	if (!ghes->estatus->block_status)
 359		return -ENOENT;
 360
 361	ghes->buffer_paddr = buf_paddr;
 362	ghes->flags |= GHES_TO_CLEAR;
 363
 364	rc = -EIO;
 365	len = cper_estatus_len(ghes->estatus);
 366	if (len < sizeof(*ghes->estatus))
 367		goto err_read_block;
 368	if (len > ghes->generic->error_block_length)
 369		goto err_read_block;
 370	if (cper_estatus_check_header(ghes->estatus))
 371		goto err_read_block;
 372	ghes_copy_tofrom_phys(ghes->estatus + 1,
 373			      buf_paddr + sizeof(*ghes->estatus),
 374			      len - sizeof(*ghes->estatus), 1);
 375	if (cper_estatus_check(ghes->estatus))
 376		goto err_read_block;
 377	rc = 0;
 378
 379err_read_block:
 380	if (rc && !silent && printk_ratelimit())
 381		pr_warning(FW_WARN GHES_PFX
 382			   "Failed to read error status block!\n");
 383	return rc;
 384}
 385
 386static void ghes_clear_estatus(struct ghes *ghes)
 387{
 388	ghes->estatus->block_status = 0;
 389	if (!(ghes->flags & GHES_TO_CLEAR))
 390		return;
 391	ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
 392			      sizeof(ghes->estatus->block_status), 0);
 393	ghes->flags &= ~GHES_TO_CLEAR;
 394}
 395
 396static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
 397{
 398#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
 399	unsigned long pfn;
 400	int flags = -1;
 401	int sec_sev = ghes_severity(gdata->error_severity);
 402	struct cper_sec_mem_err *mem_err;
 403	mem_err = (struct cper_sec_mem_err *)(gdata + 1);
 404
 405	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
 406		return;
 407
 408	pfn = mem_err->physical_addr >> PAGE_SHIFT;
 409	if (!pfn_valid(pfn)) {
 410		pr_warn_ratelimited(FW_WARN GHES_PFX
 411		"Invalid address in generic error data: %#llx\n",
 412		mem_err->physical_addr);
 413		return;
 414	}
 415
 416	/* iff following two events can be handled properly by now */
 417	if (sec_sev == GHES_SEV_CORRECTED &&
 418	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
 419		flags = MF_SOFT_OFFLINE;
 420	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
 421		flags = 0;
 422
 423	if (flags != -1)
 424		memory_failure_queue(pfn, 0, flags);
 425#endif
 426}
 427
 428static void ghes_do_proc(struct ghes *ghes,
 429			 const struct acpi_hest_generic_status *estatus)
 430{
 431	int sev, sec_sev;
 432	struct acpi_hest_generic_data *gdata;
 433
 434	sev = ghes_severity(estatus->error_severity);
 435	apei_estatus_for_each_section(estatus, gdata) {
 436		sec_sev = ghes_severity(gdata->error_severity);
 437		if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
 438				 CPER_SEC_PLATFORM_MEM)) {
 439			struct cper_sec_mem_err *mem_err;
 440			mem_err = (struct cper_sec_mem_err *)(gdata+1);
 441			ghes_edac_report_mem_error(ghes, sev, mem_err);
 442
 443			arch_apei_report_mem_error(sev, mem_err);
 444			ghes_handle_memory_failure(gdata, sev);
 
 
 
 
 
 
 
 
 
 445		}
 446#ifdef CONFIG_ACPI_APEI_PCIEAER
 447		else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
 448				      CPER_SEC_PCIE)) {
 449			struct cper_sec_pcie *pcie_err;
 450			pcie_err = (struct cper_sec_pcie *)(gdata+1);
 451			if (sev == GHES_SEV_RECOVERABLE &&
 452			    sec_sev == GHES_SEV_RECOVERABLE &&
 453			    pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
 454			    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
 455				unsigned int devfn;
 456				int aer_severity;
 457
 458				devfn = PCI_DEVFN(pcie_err->device_id.device,
 459						  pcie_err->device_id.function);
 460				aer_severity = cper_severity_to_aer(sev);
 461
 462				/*
 463				 * If firmware reset the component to contain
 464				 * the error, we must reinitialize it before
 465				 * use, so treat it as a fatal AER error.
 466				 */
 467				if (gdata->flags & CPER_SEC_RESET)
 468					aer_severity = AER_FATAL;
 469
 470				aer_recover_queue(pcie_err->device_id.segment,
 471						  pcie_err->device_id.bus,
 472						  devfn, aer_severity,
 473						  (struct aer_capability_regs *)
 474						  pcie_err->aer_info);
 475			}
 476
 477		}
 478#endif
 479	}
 480}
 481
 482static void __ghes_print_estatus(const char *pfx,
 483				 const struct acpi_hest_generic *generic,
 484				 const struct acpi_hest_generic_status *estatus)
 485{
 486	static atomic_t seqno;
 487	unsigned int curr_seqno;
 488	char pfx_seq[64];
 489
 490	if (pfx == NULL) {
 491		if (ghes_severity(estatus->error_severity) <=
 492		    GHES_SEV_CORRECTED)
 493			pfx = KERN_WARNING;
 494		else
 495			pfx = KERN_ERR;
 496	}
 497	curr_seqno = atomic_inc_return(&seqno);
 498	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
 499	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
 500	       pfx_seq, generic->header.source_id);
 501	cper_estatus_print(pfx_seq, estatus);
 502}
 503
 504static int ghes_print_estatus(const char *pfx,
 505			      const struct acpi_hest_generic *generic,
 506			      const struct acpi_hest_generic_status *estatus)
 507{
 508	/* Not more than 2 messages every 5 seconds */
 509	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
 510	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
 511	struct ratelimit_state *ratelimit;
 512
 513	if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
 514		ratelimit = &ratelimit_corrected;
 515	else
 516		ratelimit = &ratelimit_uncorrected;
 517	if (__ratelimit(ratelimit)) {
 518		__ghes_print_estatus(pfx, generic, estatus);
 519		return 1;
 520	}
 521	return 0;
 522}
 523
 524/*
 525 * GHES error status reporting throttle, to report more kinds of
 526 * errors, instead of just most frequently occurred errors.
 527 */
 528static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
 529{
 530	u32 len;
 531	int i, cached = 0;
 532	unsigned long long now;
 533	struct ghes_estatus_cache *cache;
 534	struct acpi_hest_generic_status *cache_estatus;
 535
 536	len = cper_estatus_len(estatus);
 537	rcu_read_lock();
 538	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
 539		cache = rcu_dereference(ghes_estatus_caches[i]);
 540		if (cache == NULL)
 541			continue;
 542		if (len != cache->estatus_len)
 543			continue;
 544		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
 545		if (memcmp(estatus, cache_estatus, len))
 546			continue;
 547		atomic_inc(&cache->count);
 548		now = sched_clock();
 549		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
 550			cached = 1;
 551		break;
 552	}
 553	rcu_read_unlock();
 554	return cached;
 555}
 556
 557static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
 558	struct acpi_hest_generic *generic,
 559	struct acpi_hest_generic_status *estatus)
 560{
 561	int alloced;
 562	u32 len, cache_len;
 563	struct ghes_estatus_cache *cache;
 564	struct acpi_hest_generic_status *cache_estatus;
 565
 566	alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
 567	if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
 568		atomic_dec(&ghes_estatus_cache_alloced);
 569		return NULL;
 570	}
 571	len = cper_estatus_len(estatus);
 572	cache_len = GHES_ESTATUS_CACHE_LEN(len);
 573	cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
 574	if (!cache) {
 575		atomic_dec(&ghes_estatus_cache_alloced);
 576		return NULL;
 577	}
 578	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
 579	memcpy(cache_estatus, estatus, len);
 580	cache->estatus_len = len;
 581	atomic_set(&cache->count, 0);
 582	cache->generic = generic;
 583	cache->time_in = sched_clock();
 584	return cache;
 585}
 586
 587static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
 588{
 589	u32 len;
 590
 591	len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
 592	len = GHES_ESTATUS_CACHE_LEN(len);
 593	gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
 594	atomic_dec(&ghes_estatus_cache_alloced);
 595}
 596
 597static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
 598{
 599	struct ghes_estatus_cache *cache;
 600
 601	cache = container_of(head, struct ghes_estatus_cache, rcu);
 602	ghes_estatus_cache_free(cache);
 603}
 604
 605static void ghes_estatus_cache_add(
 606	struct acpi_hest_generic *generic,
 607	struct acpi_hest_generic_status *estatus)
 608{
 609	int i, slot = -1, count;
 610	unsigned long long now, duration, period, max_period = 0;
 611	struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
 612
 613	new_cache = ghes_estatus_cache_alloc(generic, estatus);
 614	if (new_cache == NULL)
 615		return;
 616	rcu_read_lock();
 617	now = sched_clock();
 618	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
 619		cache = rcu_dereference(ghes_estatus_caches[i]);
 620		if (cache == NULL) {
 621			slot = i;
 622			slot_cache = NULL;
 623			break;
 624		}
 625		duration = now - cache->time_in;
 626		if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
 627			slot = i;
 628			slot_cache = cache;
 629			break;
 630		}
 631		count = atomic_read(&cache->count);
 632		period = duration;
 633		do_div(period, (count + 1));
 634		if (period > max_period) {
 635			max_period = period;
 636			slot = i;
 637			slot_cache = cache;
 638		}
 639	}
 640	/* new_cache must be put into array after its contents are written */
 641	smp_wmb();
 642	if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
 643				  slot_cache, new_cache) == slot_cache) {
 644		if (slot_cache)
 645			call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
 646	} else
 647		ghes_estatus_cache_free(new_cache);
 648	rcu_read_unlock();
 649}
 650
 651static int ghes_proc(struct ghes *ghes)
 652{
 653	int rc;
 654
 655	rc = ghes_read_estatus(ghes, 0);
 656	if (rc)
 657		goto out;
 658	if (!ghes_estatus_cached(ghes->estatus)) {
 659		if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
 660			ghes_estatus_cache_add(ghes->generic, ghes->estatus);
 661	}
 662	ghes_do_proc(ghes, ghes->estatus);
 663out:
 664	ghes_clear_estatus(ghes);
 665	return 0;
 666}
 667
 668static void ghes_add_timer(struct ghes *ghes)
 669{
 670	struct acpi_hest_generic *g = ghes->generic;
 671	unsigned long expire;
 672
 673	if (!g->notify.poll_interval) {
 674		pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
 675			   g->header.source_id);
 676		return;
 677	}
 678	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
 679	ghes->timer.expires = round_jiffies_relative(expire);
 680	add_timer(&ghes->timer);
 681}
 682
 683static void ghes_poll_func(unsigned long data)
 684{
 685	struct ghes *ghes = (void *)data;
 686
 687	ghes_proc(ghes);
 688	if (!(ghes->flags & GHES_EXITING))
 689		ghes_add_timer(ghes);
 690}
 691
 692static irqreturn_t ghes_irq_func(int irq, void *data)
 693{
 694	struct ghes *ghes = data;
 695	int rc;
 696
 697	rc = ghes_proc(ghes);
 698	if (rc)
 699		return IRQ_NONE;
 700
 701	return IRQ_HANDLED;
 702}
 703
 704static int ghes_notify_sci(struct notifier_block *this,
 705				  unsigned long event, void *data)
 706{
 707	struct ghes *ghes;
 708	int ret = NOTIFY_DONE;
 709
 710	rcu_read_lock();
 711	list_for_each_entry_rcu(ghes, &ghes_sci, list) {
 712		if (!ghes_proc(ghes))
 713			ret = NOTIFY_OK;
 714	}
 715	rcu_read_unlock();
 716
 717	return ret;
 718}
 719
 720static struct notifier_block ghes_notifier_sci = {
 721	.notifier_call = ghes_notify_sci,
 722};
 723
 724#ifdef CONFIG_HAVE_ACPI_APEI_NMI
 725/*
 726 * printk is not safe in NMI context.  So in NMI handler, we allocate
 727 * required memory from lock-less memory allocator
 728 * (ghes_estatus_pool), save estatus into it, put them into lock-less
 729 * list (ghes_estatus_llist), then delay printk into IRQ context via
 730 * irq_work (ghes_proc_irq_work).  ghes_estatus_size_request record
 731 * required pool size by all NMI error source.
 732 */
 733static struct llist_head ghes_estatus_llist;
 734static struct irq_work ghes_proc_irq_work;
 735
 736/*
 737 * NMI may be triggered on any CPU, so ghes_in_nmi is used for
 738 * having only one concurrent reader.
 739 */
 740static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
 741
 742static LIST_HEAD(ghes_nmi);
 743
 744static int ghes_panic_timeout	__read_mostly = 30;
 745
 746static void ghes_proc_in_irq(struct irq_work *irq_work)
 747{
 748	struct llist_node *llnode, *next;
 749	struct ghes_estatus_node *estatus_node;
 750	struct acpi_hest_generic *generic;
 751	struct acpi_hest_generic_status *estatus;
 752	u32 len, node_len;
 753
 754	llnode = llist_del_all(&ghes_estatus_llist);
 755	/*
 756	 * Because the time order of estatus in list is reversed,
 757	 * revert it back to proper order.
 758	 */
 759	llnode = llist_reverse_order(llnode);
 760	while (llnode) {
 761		next = llnode->next;
 762		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
 763					   llnode);
 764		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 765		len = cper_estatus_len(estatus);
 766		node_len = GHES_ESTATUS_NODE_LEN(len);
 767		ghes_do_proc(estatus_node->ghes, estatus);
 768		if (!ghes_estatus_cached(estatus)) {
 769			generic = estatus_node->generic;
 770			if (ghes_print_estatus(NULL, generic, estatus))
 771				ghes_estatus_cache_add(generic, estatus);
 772		}
 773		gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
 774			      node_len);
 775		llnode = next;
 776	}
 777}
 778
 779static void ghes_print_queued_estatus(void)
 780{
 781	struct llist_node *llnode;
 782	struct ghes_estatus_node *estatus_node;
 783	struct acpi_hest_generic *generic;
 784	struct acpi_hest_generic_status *estatus;
 785	u32 len, node_len;
 786
 787	llnode = llist_del_all(&ghes_estatus_llist);
 788	/*
 789	 * Because the time order of estatus in list is reversed,
 790	 * revert it back to proper order.
 791	 */
 792	llnode = llist_reverse_order(llnode);
 793	while (llnode) {
 794		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
 795					   llnode);
 796		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 797		len = cper_estatus_len(estatus);
 798		node_len = GHES_ESTATUS_NODE_LEN(len);
 799		generic = estatus_node->generic;
 800		ghes_print_estatus(NULL, generic, estatus);
 801		llnode = llnode->next;
 802	}
 803}
 804
 805/* Save estatus for further processing in IRQ context */
 806static void __process_error(struct ghes *ghes)
 807{
 808#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 809	u32 len, node_len;
 810	struct ghes_estatus_node *estatus_node;
 811	struct acpi_hest_generic_status *estatus;
 812
 813	if (ghes_estatus_cached(ghes->estatus))
 814		return;
 815
 816	len = cper_estatus_len(ghes->estatus);
 817	node_len = GHES_ESTATUS_NODE_LEN(len);
 818
 819	estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
 820	if (!estatus_node)
 821		return;
 822
 823	estatus_node->ghes = ghes;
 824	estatus_node->generic = ghes->generic;
 825	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
 826	memcpy(estatus, ghes->estatus, len);
 827	llist_add(&estatus_node->llnode, &ghes_estatus_llist);
 828#endif
 829}
 830
 831static void __ghes_panic(struct ghes *ghes)
 832{
 833	oops_begin();
 834	ghes_print_queued_estatus();
 835	__ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
 836
 837	/* reboot to log the error! */
 838	if (panic_timeout == 0)
 839		panic_timeout = ghes_panic_timeout;
 840	panic("Fatal hardware error!");
 841}
 842
 843static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
 844{
 845	struct ghes *ghes;
 846	int sev, ret = NMI_DONE;
 847
 848	if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
 849		return ret;
 850
 
 851	list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
 852		if (ghes_read_estatus(ghes, 1)) {
 853			ghes_clear_estatus(ghes);
 854			continue;
 855		}
 
 
 
 
 
 
 
 
 
 
 856
 857		sev = ghes_severity(ghes->estatus->error_severity);
 858		if (sev >= GHES_SEV_PANIC)
 859			__ghes_panic(ghes);
 
 
 
 
 
 
 
 860
 
 
 
 
 
 
 861		if (!(ghes->flags & GHES_TO_CLEAR))
 862			continue;
 863
 864		__process_error(ghes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 865		ghes_clear_estatus(ghes);
 866
 867		ret = NMI_HANDLED;
 868	}
 869
 870#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 871	irq_work_queue(&ghes_proc_irq_work);
 872#endif
 873	atomic_dec(&ghes_in_nmi);
 
 
 874	return ret;
 875}
 876
 
 
 
 
 877static unsigned long ghes_esource_prealloc_size(
 878	const struct acpi_hest_generic *generic)
 879{
 880	unsigned long block_length, prealloc_records, prealloc_size;
 881
 882	block_length = min_t(unsigned long, generic->error_block_length,
 883			     GHES_ESTATUS_MAX_SIZE);
 884	prealloc_records = max_t(unsigned long,
 885				 generic->records_to_preallocate, 1);
 886	prealloc_size = min_t(unsigned long, block_length * prealloc_records,
 887			      GHES_ESOURCE_PREALLOC_MAX_SIZE);
 888
 889	return prealloc_size;
 890}
 891
 892static void ghes_estatus_pool_shrink(unsigned long len)
 893{
 894	ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
 895}
 896
 897static void ghes_nmi_add(struct ghes *ghes)
 898{
 899	unsigned long len;
 900
 901	len = ghes_esource_prealloc_size(ghes->generic);
 902	ghes_estatus_pool_expand(len);
 903	mutex_lock(&ghes_list_mutex);
 904	if (list_empty(&ghes_nmi))
 905		register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
 906	list_add_rcu(&ghes->list, &ghes_nmi);
 907	mutex_unlock(&ghes_list_mutex);
 908}
 909
 910static void ghes_nmi_remove(struct ghes *ghes)
 911{
 912	unsigned long len;
 913
 914	mutex_lock(&ghes_list_mutex);
 915	list_del_rcu(&ghes->list);
 916	if (list_empty(&ghes_nmi))
 917		unregister_nmi_handler(NMI_LOCAL, "ghes");
 918	mutex_unlock(&ghes_list_mutex);
 919	/*
 920	 * To synchronize with NMI handler, ghes can only be
 921	 * freed after NMI handler finishes.
 922	 */
 923	synchronize_rcu();
 924	len = ghes_esource_prealloc_size(ghes->generic);
 925	ghes_estatus_pool_shrink(len);
 926}
 927
 928static void ghes_nmi_init_cxt(void)
 929{
 930	init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
 931}
 932#else /* CONFIG_HAVE_ACPI_APEI_NMI */
 933static inline void ghes_nmi_add(struct ghes *ghes)
 934{
 935	pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n",
 936	       ghes->generic->header.source_id);
 937	BUG();
 938}
 939
 940static inline void ghes_nmi_remove(struct ghes *ghes)
 941{
 942	pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n",
 943	       ghes->generic->header.source_id);
 944	BUG();
 945}
 946
 947static inline void ghes_nmi_init_cxt(void)
 948{
 949}
 950#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
 951
 952static int ghes_probe(struct platform_device *ghes_dev)
 953{
 954	struct acpi_hest_generic *generic;
 955	struct ghes *ghes = NULL;
 956
 957	int rc = -EINVAL;
 958
 959	generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
 960	if (!generic->enabled)
 961		return -ENODEV;
 962
 963	switch (generic->notify.type) {
 964	case ACPI_HEST_NOTIFY_POLLED:
 965	case ACPI_HEST_NOTIFY_EXTERNAL:
 966	case ACPI_HEST_NOTIFY_SCI:
 967		break;
 968	case ACPI_HEST_NOTIFY_NMI:
 969		if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
 970			pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
 971				generic->header.source_id);
 972			goto err;
 973		}
 974		break;
 975	case ACPI_HEST_NOTIFY_LOCAL:
 976		pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
 977			   generic->header.source_id);
 978		goto err;
 979	default:
 980		pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
 981			   generic->notify.type, generic->header.source_id);
 982		goto err;
 983	}
 984
 985	rc = -EIO;
 986	if (generic->error_block_length <
 987	    sizeof(struct acpi_hest_generic_status)) {
 988		pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
 989			   generic->error_block_length,
 990			   generic->header.source_id);
 991		goto err;
 992	}
 993	ghes = ghes_new(generic);
 994	if (IS_ERR(ghes)) {
 995		rc = PTR_ERR(ghes);
 996		ghes = NULL;
 997		goto err;
 998	}
 999
1000	rc = ghes_edac_register(ghes, &ghes_dev->dev);
1001	if (rc < 0)
1002		goto err;
1003
1004	switch (generic->notify.type) {
1005	case ACPI_HEST_NOTIFY_POLLED:
1006		ghes->timer.function = ghes_poll_func;
1007		ghes->timer.data = (unsigned long)ghes;
1008		init_timer_deferrable(&ghes->timer);
1009		ghes_add_timer(ghes);
1010		break;
1011	case ACPI_HEST_NOTIFY_EXTERNAL:
1012		/* External interrupt vector is GSI */
1013		rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1014		if (rc) {
1015			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1016			       generic->header.source_id);
1017			goto err_edac_unreg;
1018		}
1019		rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
1020		if (rc) {
1021			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1022			       generic->header.source_id);
1023			goto err_edac_unreg;
1024		}
1025		break;
1026	case ACPI_HEST_NOTIFY_SCI:
1027		mutex_lock(&ghes_list_mutex);
1028		if (list_empty(&ghes_sci))
1029			register_acpi_hed_notifier(&ghes_notifier_sci);
1030		list_add_rcu(&ghes->list, &ghes_sci);
1031		mutex_unlock(&ghes_list_mutex);
1032		break;
1033	case ACPI_HEST_NOTIFY_NMI:
1034		ghes_nmi_add(ghes);
 
 
 
 
 
 
 
1035		break;
1036	default:
1037		BUG();
1038	}
1039	platform_set_drvdata(ghes_dev, ghes);
1040
1041	return 0;
1042err_edac_unreg:
1043	ghes_edac_unregister(ghes);
1044err:
1045	if (ghes) {
1046		ghes_fini(ghes);
1047		kfree(ghes);
1048	}
1049	return rc;
1050}
1051
1052static int ghes_remove(struct platform_device *ghes_dev)
1053{
1054	struct ghes *ghes;
1055	struct acpi_hest_generic *generic;
 
1056
1057	ghes = platform_get_drvdata(ghes_dev);
1058	generic = ghes->generic;
1059
1060	ghes->flags |= GHES_EXITING;
1061	switch (generic->notify.type) {
1062	case ACPI_HEST_NOTIFY_POLLED:
1063		del_timer_sync(&ghes->timer);
1064		break;
1065	case ACPI_HEST_NOTIFY_EXTERNAL:
1066		free_irq(ghes->irq, ghes);
1067		break;
1068	case ACPI_HEST_NOTIFY_SCI:
1069		mutex_lock(&ghes_list_mutex);
1070		list_del_rcu(&ghes->list);
1071		if (list_empty(&ghes_sci))
1072			unregister_acpi_hed_notifier(&ghes_notifier_sci);
1073		mutex_unlock(&ghes_list_mutex);
1074		break;
1075	case ACPI_HEST_NOTIFY_NMI:
1076		ghes_nmi_remove(ghes);
 
 
 
 
 
 
 
 
 
 
 
1077		break;
1078	default:
1079		BUG();
1080		break;
1081	}
1082
1083	ghes_fini(ghes);
1084
1085	ghes_edac_unregister(ghes);
1086
1087	kfree(ghes);
1088
1089	platform_set_drvdata(ghes_dev, NULL);
1090
1091	return 0;
1092}
1093
1094static struct platform_driver ghes_platform_driver = {
1095	.driver		= {
1096		.name	= "GHES",
 
1097	},
1098	.probe		= ghes_probe,
1099	.remove		= ghes_remove,
1100};
1101
1102static int __init ghes_init(void)
1103{
1104	int rc;
1105
1106	if (acpi_disabled)
1107		return -ENODEV;
1108
1109	if (hest_disable) {
1110		pr_info(GHES_PFX "HEST is not enabled!\n");
1111		return -EINVAL;
1112	}
1113
1114	if (ghes_disable) {
1115		pr_info(GHES_PFX "GHES is not enabled!\n");
1116		return -EINVAL;
1117	}
1118
1119	ghes_nmi_init_cxt();
1120
1121	rc = ghes_ioremap_init();
1122	if (rc)
1123		goto err;
1124
1125	rc = ghes_estatus_pool_init();
1126	if (rc)
1127		goto err_ioremap_exit;
1128
1129	rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
1130				      GHES_ESTATUS_CACHE_ALLOCED_MAX);
1131	if (rc)
1132		goto err_pool_exit;
1133
1134	rc = platform_driver_register(&ghes_platform_driver);
1135	if (rc)
1136		goto err_pool_exit;
1137
1138	rc = apei_osc_setup();
1139	if (rc == 0 && osc_sb_apei_support_acked)
1140		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1141	else if (rc == 0 && !osc_sb_apei_support_acked)
1142		pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1143	else if (rc && osc_sb_apei_support_acked)
1144		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1145	else
1146		pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1147
1148	return 0;
1149err_pool_exit:
1150	ghes_estatus_pool_exit();
1151err_ioremap_exit:
1152	ghes_ioremap_exit();
1153err:
1154	return rc;
1155}
1156device_initcall(ghes_init);