Loading...
1/*
2 * APEI Generic Hardware Error Source support
3 *
4 * Generic Hardware Error Source provides a way to report platform
5 * hardware errors (such as that from chipset). It works in so called
6 * "Firmware First" mode, that is, hardware errors are reported to
7 * firmware firstly, then reported to Linux by firmware. This way,
8 * some non-standard hardware error registers or non-standard hardware
9 * link can be checked by firmware to produce more hardware error
10 * information for Linux.
11 *
12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6
14 *
15 * Copyright 2010,2011 Intel Corp.
16 * Author: Huang Ying <ying.huang@intel.com>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License version
20 * 2 as published by the Free Software Foundation;
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 */
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/acpi.h>
36#include <linux/acpi_io.h>
37#include <linux/io.h>
38#include <linux/interrupt.h>
39#include <linux/timer.h>
40#include <linux/cper.h>
41#include <linux/kdebug.h>
42#include <linux/platform_device.h>
43#include <linux/mutex.h>
44#include <linux/ratelimit.h>
45#include <linux/vmalloc.h>
46#include <linux/irq_work.h>
47#include <linux/llist.h>
48#include <linux/genalloc.h>
49#include <linux/pci.h>
50#include <linux/aer.h>
51#include <acpi/apei.h>
52#include <acpi/hed.h>
53#include <asm/mce.h>
54#include <asm/tlbflush.h>
55#include <asm/nmi.h>
56
57#include "apei-internal.h"
58
59#define GHES_PFX "GHES: "
60
61#define GHES_ESTATUS_MAX_SIZE 65536
62#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
63
64#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
65
66/* This is just an estimation for memory pool allocation */
67#define GHES_ESTATUS_CACHE_AVG_SIZE 512
68
69#define GHES_ESTATUS_CACHES_SIZE 4
70
71#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
72/* Prevent too many caches are allocated because of RCU */
73#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
74
75#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
76 (sizeof(struct ghes_estatus_cache) + (estatus_len))
77#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
78 ((struct acpi_hest_generic_status *) \
79 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
80
81#define GHES_ESTATUS_NODE_LEN(estatus_len) \
82 (sizeof(struct ghes_estatus_node) + (estatus_len))
83#define GHES_ESTATUS_FROM_NODE(estatus_node) \
84 ((struct acpi_hest_generic_status *) \
85 ((struct ghes_estatus_node *)(estatus_node) + 1))
86
87/*
88 * One struct ghes is created for each generic hardware error source.
89 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
90 * handler.
91 *
92 * estatus: memory buffer for error status block, allocated during
93 * HEST parsing.
94 */
95#define GHES_TO_CLEAR 0x0001
96#define GHES_EXITING 0x0002
97
98struct ghes {
99 struct acpi_hest_generic *generic;
100 struct acpi_hest_generic_status *estatus;
101 u64 buffer_paddr;
102 unsigned long flags;
103 union {
104 struct list_head list;
105 struct timer_list timer;
106 unsigned int irq;
107 };
108};
109
110struct ghes_estatus_node {
111 struct llist_node llnode;
112 struct acpi_hest_generic *generic;
113};
114
115struct ghes_estatus_cache {
116 u32 estatus_len;
117 atomic_t count;
118 struct acpi_hest_generic *generic;
119 unsigned long long time_in;
120 struct rcu_head rcu;
121};
122
123bool ghes_disable;
124module_param_named(disable, ghes_disable, bool, 0);
125
126static int ghes_panic_timeout __read_mostly = 30;
127
128/*
129 * All error sources notified with SCI shares one notifier function,
130 * so they need to be linked and checked one by one. This is applied
131 * to NMI too.
132 *
133 * RCU is used for these lists, so ghes_list_mutex is only used for
134 * list changing, not for traversing.
135 */
136static LIST_HEAD(ghes_sci);
137static LIST_HEAD(ghes_nmi);
138static DEFINE_MUTEX(ghes_list_mutex);
139
140/*
141 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
142 * mutual exclusion.
143 */
144static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
145
146/*
147 * Because the memory area used to transfer hardware error information
148 * from BIOS to Linux can be determined only in NMI, IRQ or timer
149 * handler, but general ioremap can not be used in atomic context, so
150 * a special version of atomic ioremap is implemented for that.
151 */
152
153/*
154 * Two virtual pages are used, one for NMI context, the other for
155 * IRQ/PROCESS context
156 */
157#define GHES_IOREMAP_PAGES 2
158#define GHES_IOREMAP_NMI_PAGE(base) (base)
159#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE)
160
161/* virtual memory area for atomic ioremap */
162static struct vm_struct *ghes_ioremap_area;
163/*
164 * These 2 spinlock is used to prevent atomic ioremap virtual memory
165 * area from being mapped simultaneously.
166 */
167static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
168static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
169
170/*
171 * printk is not safe in NMI context. So in NMI handler, we allocate
172 * required memory from lock-less memory allocator
173 * (ghes_estatus_pool), save estatus into it, put them into lock-less
174 * list (ghes_estatus_llist), then delay printk into IRQ context via
175 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
176 * required pool size by all NMI error source.
177 */
178static struct gen_pool *ghes_estatus_pool;
179static unsigned long ghes_estatus_pool_size_request;
180static struct llist_head ghes_estatus_llist;
181static struct irq_work ghes_proc_irq_work;
182
183struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
184static atomic_t ghes_estatus_cache_alloced;
185
186static int ghes_ioremap_init(void)
187{
188 ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
189 VM_IOREMAP, VMALLOC_START, VMALLOC_END);
190 if (!ghes_ioremap_area) {
191 pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
192 return -ENOMEM;
193 }
194
195 return 0;
196}
197
198static void ghes_ioremap_exit(void)
199{
200 free_vm_area(ghes_ioremap_area);
201}
202
203static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
204{
205 unsigned long vaddr;
206
207 vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
208 ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
209 pfn << PAGE_SHIFT, PAGE_KERNEL);
210
211 return (void __iomem *)vaddr;
212}
213
214static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
215{
216 unsigned long vaddr;
217
218 vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
219 ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
220 pfn << PAGE_SHIFT, PAGE_KERNEL);
221
222 return (void __iomem *)vaddr;
223}
224
225static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
226{
227 unsigned long vaddr = (unsigned long __force)vaddr_ptr;
228 void *base = ghes_ioremap_area->addr;
229
230 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
231 unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
232 __flush_tlb_one(vaddr);
233}
234
235static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
236{
237 unsigned long vaddr = (unsigned long __force)vaddr_ptr;
238 void *base = ghes_ioremap_area->addr;
239
240 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
241 unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
242 __flush_tlb_one(vaddr);
243}
244
245static int ghes_estatus_pool_init(void)
246{
247 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
248 if (!ghes_estatus_pool)
249 return -ENOMEM;
250 return 0;
251}
252
253static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
254 struct gen_pool_chunk *chunk,
255 void *data)
256{
257 free_page(chunk->start_addr);
258}
259
260static void ghes_estatus_pool_exit(void)
261{
262 gen_pool_for_each_chunk(ghes_estatus_pool,
263 ghes_estatus_pool_free_chunk_page, NULL);
264 gen_pool_destroy(ghes_estatus_pool);
265}
266
267static int ghes_estatus_pool_expand(unsigned long len)
268{
269 unsigned long i, pages, size, addr;
270 int ret;
271
272 ghes_estatus_pool_size_request += PAGE_ALIGN(len);
273 size = gen_pool_size(ghes_estatus_pool);
274 if (size >= ghes_estatus_pool_size_request)
275 return 0;
276 pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
277 for (i = 0; i < pages; i++) {
278 addr = __get_free_page(GFP_KERNEL);
279 if (!addr)
280 return -ENOMEM;
281 ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
282 if (ret)
283 return ret;
284 }
285
286 return 0;
287}
288
289static void ghes_estatus_pool_shrink(unsigned long len)
290{
291 ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
292}
293
294static struct ghes *ghes_new(struct acpi_hest_generic *generic)
295{
296 struct ghes *ghes;
297 unsigned int error_block_length;
298 int rc;
299
300 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
301 if (!ghes)
302 return ERR_PTR(-ENOMEM);
303 ghes->generic = generic;
304 rc = apei_map_generic_address(&generic->error_status_address);
305 if (rc)
306 goto err_free;
307 error_block_length = generic->error_block_length;
308 if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
309 pr_warning(FW_WARN GHES_PFX
310 "Error status block length is too long: %u for "
311 "generic hardware error source: %d.\n",
312 error_block_length, generic->header.source_id);
313 error_block_length = GHES_ESTATUS_MAX_SIZE;
314 }
315 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
316 if (!ghes->estatus) {
317 rc = -ENOMEM;
318 goto err_unmap;
319 }
320
321 return ghes;
322
323err_unmap:
324 apei_unmap_generic_address(&generic->error_status_address);
325err_free:
326 kfree(ghes);
327 return ERR_PTR(rc);
328}
329
330static void ghes_fini(struct ghes *ghes)
331{
332 kfree(ghes->estatus);
333 apei_unmap_generic_address(&ghes->generic->error_status_address);
334}
335
336enum {
337 GHES_SEV_NO = 0x0,
338 GHES_SEV_CORRECTED = 0x1,
339 GHES_SEV_RECOVERABLE = 0x2,
340 GHES_SEV_PANIC = 0x3,
341};
342
343static inline int ghes_severity(int severity)
344{
345 switch (severity) {
346 case CPER_SEV_INFORMATIONAL:
347 return GHES_SEV_NO;
348 case CPER_SEV_CORRECTED:
349 return GHES_SEV_CORRECTED;
350 case CPER_SEV_RECOVERABLE:
351 return GHES_SEV_RECOVERABLE;
352 case CPER_SEV_FATAL:
353 return GHES_SEV_PANIC;
354 default:
355 /* Unknown, go panic */
356 return GHES_SEV_PANIC;
357 }
358}
359
360static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
361 int from_phys)
362{
363 void __iomem *vaddr;
364 unsigned long flags = 0;
365 int in_nmi = in_nmi();
366 u64 offset;
367 u32 trunk;
368
369 while (len > 0) {
370 offset = paddr - (paddr & PAGE_MASK);
371 if (in_nmi) {
372 raw_spin_lock(&ghes_ioremap_lock_nmi);
373 vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
374 } else {
375 spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
376 vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
377 }
378 trunk = PAGE_SIZE - offset;
379 trunk = min(trunk, len);
380 if (from_phys)
381 memcpy_fromio(buffer, vaddr + offset, trunk);
382 else
383 memcpy_toio(vaddr + offset, buffer, trunk);
384 len -= trunk;
385 paddr += trunk;
386 buffer += trunk;
387 if (in_nmi) {
388 ghes_iounmap_nmi(vaddr);
389 raw_spin_unlock(&ghes_ioremap_lock_nmi);
390 } else {
391 ghes_iounmap_irq(vaddr);
392 spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
393 }
394 }
395}
396
397static int ghes_read_estatus(struct ghes *ghes, int silent)
398{
399 struct acpi_hest_generic *g = ghes->generic;
400 u64 buf_paddr;
401 u32 len;
402 int rc;
403
404 rc = apei_read(&buf_paddr, &g->error_status_address);
405 if (rc) {
406 if (!silent && printk_ratelimit())
407 pr_warning(FW_WARN GHES_PFX
408"Failed to read error status block address for hardware error source: %d.\n",
409 g->header.source_id);
410 return -EIO;
411 }
412 if (!buf_paddr)
413 return -ENOENT;
414
415 ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
416 sizeof(*ghes->estatus), 1);
417 if (!ghes->estatus->block_status)
418 return -ENOENT;
419
420 ghes->buffer_paddr = buf_paddr;
421 ghes->flags |= GHES_TO_CLEAR;
422
423 rc = -EIO;
424 len = apei_estatus_len(ghes->estatus);
425 if (len < sizeof(*ghes->estatus))
426 goto err_read_block;
427 if (len > ghes->generic->error_block_length)
428 goto err_read_block;
429 if (apei_estatus_check_header(ghes->estatus))
430 goto err_read_block;
431 ghes_copy_tofrom_phys(ghes->estatus + 1,
432 buf_paddr + sizeof(*ghes->estatus),
433 len - sizeof(*ghes->estatus), 1);
434 if (apei_estatus_check(ghes->estatus))
435 goto err_read_block;
436 rc = 0;
437
438err_read_block:
439 if (rc && !silent && printk_ratelimit())
440 pr_warning(FW_WARN GHES_PFX
441 "Failed to read error status block!\n");
442 return rc;
443}
444
445static void ghes_clear_estatus(struct ghes *ghes)
446{
447 ghes->estatus->block_status = 0;
448 if (!(ghes->flags & GHES_TO_CLEAR))
449 return;
450 ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
451 sizeof(ghes->estatus->block_status), 0);
452 ghes->flags &= ~GHES_TO_CLEAR;
453}
454
455static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
456{
457 int sev, sec_sev;
458 struct acpi_hest_generic_data *gdata;
459
460 sev = ghes_severity(estatus->error_severity);
461 apei_estatus_for_each_section(estatus, gdata) {
462 sec_sev = ghes_severity(gdata->error_severity);
463 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
464 CPER_SEC_PLATFORM_MEM)) {
465 struct cper_sec_mem_err *mem_err;
466 mem_err = (struct cper_sec_mem_err *)(gdata+1);
467#ifdef CONFIG_X86_MCE
468 apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
469 mem_err);
470#endif
471#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
472 if (sev == GHES_SEV_RECOVERABLE &&
473 sec_sev == GHES_SEV_RECOVERABLE &&
474 mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
475 unsigned long pfn;
476 pfn = mem_err->physical_addr >> PAGE_SHIFT;
477 memory_failure_queue(pfn, 0, 0);
478 }
479#endif
480 }
481#ifdef CONFIG_ACPI_APEI_PCIEAER
482 else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
483 CPER_SEC_PCIE)) {
484 struct cper_sec_pcie *pcie_err;
485 pcie_err = (struct cper_sec_pcie *)(gdata+1);
486 if (sev == GHES_SEV_RECOVERABLE &&
487 sec_sev == GHES_SEV_RECOVERABLE &&
488 pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
489 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
490 unsigned int devfn;
491 int aer_severity;
492 devfn = PCI_DEVFN(pcie_err->device_id.device,
493 pcie_err->device_id.function);
494 aer_severity = cper_severity_to_aer(sev);
495 aer_recover_queue(pcie_err->device_id.segment,
496 pcie_err->device_id.bus,
497 devfn, aer_severity);
498 }
499
500 }
501#endif
502 }
503}
504
505static void __ghes_print_estatus(const char *pfx,
506 const struct acpi_hest_generic *generic,
507 const struct acpi_hest_generic_status *estatus)
508{
509 static atomic_t seqno;
510 unsigned int curr_seqno;
511 char pfx_seq[64];
512
513 if (pfx == NULL) {
514 if (ghes_severity(estatus->error_severity) <=
515 GHES_SEV_CORRECTED)
516 pfx = KERN_WARNING;
517 else
518 pfx = KERN_ERR;
519 }
520 curr_seqno = atomic_inc_return(&seqno);
521 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
522 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
523 pfx_seq, generic->header.source_id);
524 apei_estatus_print(pfx_seq, estatus);
525}
526
527static int ghes_print_estatus(const char *pfx,
528 const struct acpi_hest_generic *generic,
529 const struct acpi_hest_generic_status *estatus)
530{
531 /* Not more than 2 messages every 5 seconds */
532 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
533 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
534 struct ratelimit_state *ratelimit;
535
536 if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
537 ratelimit = &ratelimit_corrected;
538 else
539 ratelimit = &ratelimit_uncorrected;
540 if (__ratelimit(ratelimit)) {
541 __ghes_print_estatus(pfx, generic, estatus);
542 return 1;
543 }
544 return 0;
545}
546
547/*
548 * GHES error status reporting throttle, to report more kinds of
549 * errors, instead of just most frequently occurred errors.
550 */
551static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
552{
553 u32 len;
554 int i, cached = 0;
555 unsigned long long now;
556 struct ghes_estatus_cache *cache;
557 struct acpi_hest_generic_status *cache_estatus;
558
559 len = apei_estatus_len(estatus);
560 rcu_read_lock();
561 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
562 cache = rcu_dereference(ghes_estatus_caches[i]);
563 if (cache == NULL)
564 continue;
565 if (len != cache->estatus_len)
566 continue;
567 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
568 if (memcmp(estatus, cache_estatus, len))
569 continue;
570 atomic_inc(&cache->count);
571 now = sched_clock();
572 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
573 cached = 1;
574 break;
575 }
576 rcu_read_unlock();
577 return cached;
578}
579
580static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
581 struct acpi_hest_generic *generic,
582 struct acpi_hest_generic_status *estatus)
583{
584 int alloced;
585 u32 len, cache_len;
586 struct ghes_estatus_cache *cache;
587 struct acpi_hest_generic_status *cache_estatus;
588
589 alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
590 if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
591 atomic_dec(&ghes_estatus_cache_alloced);
592 return NULL;
593 }
594 len = apei_estatus_len(estatus);
595 cache_len = GHES_ESTATUS_CACHE_LEN(len);
596 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
597 if (!cache) {
598 atomic_dec(&ghes_estatus_cache_alloced);
599 return NULL;
600 }
601 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
602 memcpy(cache_estatus, estatus, len);
603 cache->estatus_len = len;
604 atomic_set(&cache->count, 0);
605 cache->generic = generic;
606 cache->time_in = sched_clock();
607 return cache;
608}
609
610static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
611{
612 u32 len;
613
614 len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
615 len = GHES_ESTATUS_CACHE_LEN(len);
616 gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
617 atomic_dec(&ghes_estatus_cache_alloced);
618}
619
620static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
621{
622 struct ghes_estatus_cache *cache;
623
624 cache = container_of(head, struct ghes_estatus_cache, rcu);
625 ghes_estatus_cache_free(cache);
626}
627
628static void ghes_estatus_cache_add(
629 struct acpi_hest_generic *generic,
630 struct acpi_hest_generic_status *estatus)
631{
632 int i, slot = -1, count;
633 unsigned long long now, duration, period, max_period = 0;
634 struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
635
636 new_cache = ghes_estatus_cache_alloc(generic, estatus);
637 if (new_cache == NULL)
638 return;
639 rcu_read_lock();
640 now = sched_clock();
641 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
642 cache = rcu_dereference(ghes_estatus_caches[i]);
643 if (cache == NULL) {
644 slot = i;
645 slot_cache = NULL;
646 break;
647 }
648 duration = now - cache->time_in;
649 if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
650 slot = i;
651 slot_cache = cache;
652 break;
653 }
654 count = atomic_read(&cache->count);
655 period = duration;
656 do_div(period, (count + 1));
657 if (period > max_period) {
658 max_period = period;
659 slot = i;
660 slot_cache = cache;
661 }
662 }
663 /* new_cache must be put into array after its contents are written */
664 smp_wmb();
665 if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
666 slot_cache, new_cache) == slot_cache) {
667 if (slot_cache)
668 call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
669 } else
670 ghes_estatus_cache_free(new_cache);
671 rcu_read_unlock();
672}
673
674static int ghes_proc(struct ghes *ghes)
675{
676 int rc;
677
678 rc = ghes_read_estatus(ghes, 0);
679 if (rc)
680 goto out;
681 if (!ghes_estatus_cached(ghes->estatus)) {
682 if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
683 ghes_estatus_cache_add(ghes->generic, ghes->estatus);
684 }
685 ghes_do_proc(ghes->estatus);
686out:
687 ghes_clear_estatus(ghes);
688 return 0;
689}
690
691static void ghes_add_timer(struct ghes *ghes)
692{
693 struct acpi_hest_generic *g = ghes->generic;
694 unsigned long expire;
695
696 if (!g->notify.poll_interval) {
697 pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
698 g->header.source_id);
699 return;
700 }
701 expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
702 ghes->timer.expires = round_jiffies_relative(expire);
703 add_timer(&ghes->timer);
704}
705
706static void ghes_poll_func(unsigned long data)
707{
708 struct ghes *ghes = (void *)data;
709
710 ghes_proc(ghes);
711 if (!(ghes->flags & GHES_EXITING))
712 ghes_add_timer(ghes);
713}
714
715static irqreturn_t ghes_irq_func(int irq, void *data)
716{
717 struct ghes *ghes = data;
718 int rc;
719
720 rc = ghes_proc(ghes);
721 if (rc)
722 return IRQ_NONE;
723
724 return IRQ_HANDLED;
725}
726
727static int ghes_notify_sci(struct notifier_block *this,
728 unsigned long event, void *data)
729{
730 struct ghes *ghes;
731 int ret = NOTIFY_DONE;
732
733 rcu_read_lock();
734 list_for_each_entry_rcu(ghes, &ghes_sci, list) {
735 if (!ghes_proc(ghes))
736 ret = NOTIFY_OK;
737 }
738 rcu_read_unlock();
739
740 return ret;
741}
742
743static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
744{
745 struct llist_node *next, *tail = NULL;
746
747 while (llnode) {
748 next = llnode->next;
749 llnode->next = tail;
750 tail = llnode;
751 llnode = next;
752 }
753
754 return tail;
755}
756
757static void ghes_proc_in_irq(struct irq_work *irq_work)
758{
759 struct llist_node *llnode, *next;
760 struct ghes_estatus_node *estatus_node;
761 struct acpi_hest_generic *generic;
762 struct acpi_hest_generic_status *estatus;
763 u32 len, node_len;
764
765 llnode = llist_del_all(&ghes_estatus_llist);
766 /*
767 * Because the time order of estatus in list is reversed,
768 * revert it back to proper order.
769 */
770 llnode = llist_nodes_reverse(llnode);
771 while (llnode) {
772 next = llnode->next;
773 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
774 llnode);
775 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
776 len = apei_estatus_len(estatus);
777 node_len = GHES_ESTATUS_NODE_LEN(len);
778 ghes_do_proc(estatus);
779 if (!ghes_estatus_cached(estatus)) {
780 generic = estatus_node->generic;
781 if (ghes_print_estatus(NULL, generic, estatus))
782 ghes_estatus_cache_add(generic, estatus);
783 }
784 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
785 node_len);
786 llnode = next;
787 }
788}
789
790static void ghes_print_queued_estatus(void)
791{
792 struct llist_node *llnode;
793 struct ghes_estatus_node *estatus_node;
794 struct acpi_hest_generic *generic;
795 struct acpi_hest_generic_status *estatus;
796 u32 len, node_len;
797
798 llnode = llist_del_all(&ghes_estatus_llist);
799 /*
800 * Because the time order of estatus in list is reversed,
801 * revert it back to proper order.
802 */
803 llnode = llist_nodes_reverse(llnode);
804 while (llnode) {
805 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
806 llnode);
807 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
808 len = apei_estatus_len(estatus);
809 node_len = GHES_ESTATUS_NODE_LEN(len);
810 generic = estatus_node->generic;
811 ghes_print_estatus(NULL, generic, estatus);
812 llnode = llnode->next;
813 }
814}
815
816static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
817{
818 struct ghes *ghes, *ghes_global = NULL;
819 int sev, sev_global = -1;
820 int ret = NMI_DONE;
821
822 raw_spin_lock(&ghes_nmi_lock);
823 list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
824 if (ghes_read_estatus(ghes, 1)) {
825 ghes_clear_estatus(ghes);
826 continue;
827 }
828 sev = ghes_severity(ghes->estatus->error_severity);
829 if (sev > sev_global) {
830 sev_global = sev;
831 ghes_global = ghes;
832 }
833 ret = NMI_HANDLED;
834 }
835
836 if (ret == NMI_DONE)
837 goto out;
838
839 if (sev_global >= GHES_SEV_PANIC) {
840 oops_begin();
841 ghes_print_queued_estatus();
842 __ghes_print_estatus(KERN_EMERG, ghes_global->generic,
843 ghes_global->estatus);
844 /* reboot to log the error! */
845 if (panic_timeout == 0)
846 panic_timeout = ghes_panic_timeout;
847 panic("Fatal hardware error!");
848 }
849
850 list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
851#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
852 u32 len, node_len;
853 struct ghes_estatus_node *estatus_node;
854 struct acpi_hest_generic_status *estatus;
855#endif
856 if (!(ghes->flags & GHES_TO_CLEAR))
857 continue;
858#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
859 if (ghes_estatus_cached(ghes->estatus))
860 goto next;
861 /* Save estatus for further processing in IRQ context */
862 len = apei_estatus_len(ghes->estatus);
863 node_len = GHES_ESTATUS_NODE_LEN(len);
864 estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
865 node_len);
866 if (estatus_node) {
867 estatus_node->generic = ghes->generic;
868 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
869 memcpy(estatus, ghes->estatus, len);
870 llist_add(&estatus_node->llnode, &ghes_estatus_llist);
871 }
872next:
873#endif
874 ghes_clear_estatus(ghes);
875 }
876#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
877 irq_work_queue(&ghes_proc_irq_work);
878#endif
879
880out:
881 raw_spin_unlock(&ghes_nmi_lock);
882 return ret;
883}
884
885static struct notifier_block ghes_notifier_sci = {
886 .notifier_call = ghes_notify_sci,
887};
888
889static unsigned long ghes_esource_prealloc_size(
890 const struct acpi_hest_generic *generic)
891{
892 unsigned long block_length, prealloc_records, prealloc_size;
893
894 block_length = min_t(unsigned long, generic->error_block_length,
895 GHES_ESTATUS_MAX_SIZE);
896 prealloc_records = max_t(unsigned long,
897 generic->records_to_preallocate, 1);
898 prealloc_size = min_t(unsigned long, block_length * prealloc_records,
899 GHES_ESOURCE_PREALLOC_MAX_SIZE);
900
901 return prealloc_size;
902}
903
904static int __devinit ghes_probe(struct platform_device *ghes_dev)
905{
906 struct acpi_hest_generic *generic;
907 struct ghes *ghes = NULL;
908 unsigned long len;
909 int rc = -EINVAL;
910
911 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
912 if (!generic->enabled)
913 return -ENODEV;
914
915 switch (generic->notify.type) {
916 case ACPI_HEST_NOTIFY_POLLED:
917 case ACPI_HEST_NOTIFY_EXTERNAL:
918 case ACPI_HEST_NOTIFY_SCI:
919 case ACPI_HEST_NOTIFY_NMI:
920 break;
921 case ACPI_HEST_NOTIFY_LOCAL:
922 pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
923 generic->header.source_id);
924 goto err;
925 default:
926 pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
927 generic->notify.type, generic->header.source_id);
928 goto err;
929 }
930
931 rc = -EIO;
932 if (generic->error_block_length <
933 sizeof(struct acpi_hest_generic_status)) {
934 pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
935 generic->error_block_length,
936 generic->header.source_id);
937 goto err;
938 }
939 ghes = ghes_new(generic);
940 if (IS_ERR(ghes)) {
941 rc = PTR_ERR(ghes);
942 ghes = NULL;
943 goto err;
944 }
945 switch (generic->notify.type) {
946 case ACPI_HEST_NOTIFY_POLLED:
947 ghes->timer.function = ghes_poll_func;
948 ghes->timer.data = (unsigned long)ghes;
949 init_timer_deferrable(&ghes->timer);
950 ghes_add_timer(ghes);
951 break;
952 case ACPI_HEST_NOTIFY_EXTERNAL:
953 /* External interrupt vector is GSI */
954 if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
955 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
956 generic->header.source_id);
957 goto err;
958 }
959 if (request_irq(ghes->irq, ghes_irq_func,
960 0, "GHES IRQ", ghes)) {
961 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
962 generic->header.source_id);
963 goto err;
964 }
965 break;
966 case ACPI_HEST_NOTIFY_SCI:
967 mutex_lock(&ghes_list_mutex);
968 if (list_empty(&ghes_sci))
969 register_acpi_hed_notifier(&ghes_notifier_sci);
970 list_add_rcu(&ghes->list, &ghes_sci);
971 mutex_unlock(&ghes_list_mutex);
972 break;
973 case ACPI_HEST_NOTIFY_NMI:
974 len = ghes_esource_prealloc_size(generic);
975 ghes_estatus_pool_expand(len);
976 mutex_lock(&ghes_list_mutex);
977 if (list_empty(&ghes_nmi))
978 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0,
979 "ghes");
980 list_add_rcu(&ghes->list, &ghes_nmi);
981 mutex_unlock(&ghes_list_mutex);
982 break;
983 default:
984 BUG();
985 }
986 platform_set_drvdata(ghes_dev, ghes);
987
988 return 0;
989err:
990 if (ghes) {
991 ghes_fini(ghes);
992 kfree(ghes);
993 }
994 return rc;
995}
996
997static int __devexit ghes_remove(struct platform_device *ghes_dev)
998{
999 struct ghes *ghes;
1000 struct acpi_hest_generic *generic;
1001 unsigned long len;
1002
1003 ghes = platform_get_drvdata(ghes_dev);
1004 generic = ghes->generic;
1005
1006 ghes->flags |= GHES_EXITING;
1007 switch (generic->notify.type) {
1008 case ACPI_HEST_NOTIFY_POLLED:
1009 del_timer_sync(&ghes->timer);
1010 break;
1011 case ACPI_HEST_NOTIFY_EXTERNAL:
1012 free_irq(ghes->irq, ghes);
1013 break;
1014 case ACPI_HEST_NOTIFY_SCI:
1015 mutex_lock(&ghes_list_mutex);
1016 list_del_rcu(&ghes->list);
1017 if (list_empty(&ghes_sci))
1018 unregister_acpi_hed_notifier(&ghes_notifier_sci);
1019 mutex_unlock(&ghes_list_mutex);
1020 break;
1021 case ACPI_HEST_NOTIFY_NMI:
1022 mutex_lock(&ghes_list_mutex);
1023 list_del_rcu(&ghes->list);
1024 if (list_empty(&ghes_nmi))
1025 unregister_nmi_handler(NMI_LOCAL, "ghes");
1026 mutex_unlock(&ghes_list_mutex);
1027 /*
1028 * To synchronize with NMI handler, ghes can only be
1029 * freed after NMI handler finishes.
1030 */
1031 synchronize_rcu();
1032 len = ghes_esource_prealloc_size(generic);
1033 ghes_estatus_pool_shrink(len);
1034 break;
1035 default:
1036 BUG();
1037 break;
1038 }
1039
1040 ghes_fini(ghes);
1041 kfree(ghes);
1042
1043 platform_set_drvdata(ghes_dev, NULL);
1044
1045 return 0;
1046}
1047
1048static struct platform_driver ghes_platform_driver = {
1049 .driver = {
1050 .name = "GHES",
1051 .owner = THIS_MODULE,
1052 },
1053 .probe = ghes_probe,
1054 .remove = ghes_remove,
1055};
1056
1057static int __init ghes_init(void)
1058{
1059 int rc;
1060
1061 if (acpi_disabled)
1062 return -ENODEV;
1063
1064 if (hest_disable) {
1065 pr_info(GHES_PFX "HEST is not enabled!\n");
1066 return -EINVAL;
1067 }
1068
1069 if (ghes_disable) {
1070 pr_info(GHES_PFX "GHES is not enabled!\n");
1071 return -EINVAL;
1072 }
1073
1074 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1075
1076 rc = ghes_ioremap_init();
1077 if (rc)
1078 goto err;
1079
1080 rc = ghes_estatus_pool_init();
1081 if (rc)
1082 goto err_ioremap_exit;
1083
1084 rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
1085 GHES_ESTATUS_CACHE_ALLOCED_MAX);
1086 if (rc)
1087 goto err_pool_exit;
1088
1089 rc = platform_driver_register(&ghes_platform_driver);
1090 if (rc)
1091 goto err_pool_exit;
1092
1093 rc = apei_osc_setup();
1094 if (rc == 0 && osc_sb_apei_support_acked)
1095 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1096 else if (rc == 0 && !osc_sb_apei_support_acked)
1097 pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1098 else if (rc && osc_sb_apei_support_acked)
1099 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1100 else
1101 pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1102
1103 return 0;
1104err_pool_exit:
1105 ghes_estatus_pool_exit();
1106err_ioremap_exit:
1107 ghes_ioremap_exit();
1108err:
1109 return rc;
1110}
1111
1112static void __exit ghes_exit(void)
1113{
1114 platform_driver_unregister(&ghes_platform_driver);
1115 ghes_estatus_pool_exit();
1116 ghes_ioremap_exit();
1117}
1118
1119module_init(ghes_init);
1120module_exit(ghes_exit);
1121
1122MODULE_AUTHOR("Huang Ying");
1123MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
1124MODULE_LICENSE("GPL");
1125MODULE_ALIAS("platform:GHES");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * APEI Generic Hardware Error Source support
4 *
5 * Generic Hardware Error Source provides a way to report platform
6 * hardware errors (such as that from chipset). It works in so called
7 * "Firmware First" mode, that is, hardware errors are reported to
8 * firmware firstly, then reported to Linux by firmware. This way,
9 * some non-standard hardware error registers or non-standard hardware
10 * link can be checked by firmware to produce more hardware error
11 * information for Linux.
12 *
13 * For more information about Generic Hardware Error Source, please
14 * refer to ACPI Specification version 4.0, section 17.3.2.6
15 *
16 * Copyright 2010,2011 Intel Corp.
17 * Author: Huang Ying <ying.huang@intel.com>
18 */
19
20#include <linux/arm_sdei.h>
21#include <linux/kernel.h>
22#include <linux/moduleparam.h>
23#include <linux/init.h>
24#include <linux/acpi.h>
25#include <linux/io.h>
26#include <linux/interrupt.h>
27#include <linux/timer.h>
28#include <linux/cper.h>
29#include <linux/platform_device.h>
30#include <linux/mutex.h>
31#include <linux/ratelimit.h>
32#include <linux/vmalloc.h>
33#include <linux/irq_work.h>
34#include <linux/llist.h>
35#include <linux/genalloc.h>
36#include <linux/pci.h>
37#include <linux/pfn.h>
38#include <linux/aer.h>
39#include <linux/nmi.h>
40#include <linux/sched/clock.h>
41#include <linux/uuid.h>
42#include <linux/ras.h>
43#include <linux/task_work.h>
44
45#include <acpi/actbl1.h>
46#include <acpi/ghes.h>
47#include <acpi/apei.h>
48#include <asm/fixmap.h>
49#include <asm/tlbflush.h>
50#include <ras/ras_event.h>
51
52#include "apei-internal.h"
53
54#define GHES_PFX "GHES: "
55
56#define GHES_ESTATUS_MAX_SIZE 65536
57#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
58
59#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
60
61/* This is just an estimation for memory pool allocation */
62#define GHES_ESTATUS_CACHE_AVG_SIZE 512
63
64#define GHES_ESTATUS_CACHES_SIZE 4
65
66#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
67/* Prevent too many caches are allocated because of RCU */
68#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
69
70#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
71 (sizeof(struct ghes_estatus_cache) + (estatus_len))
72#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
73 ((struct acpi_hest_generic_status *) \
74 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
75
76#define GHES_ESTATUS_NODE_LEN(estatus_len) \
77 (sizeof(struct ghes_estatus_node) + (estatus_len))
78#define GHES_ESTATUS_FROM_NODE(estatus_node) \
79 ((struct acpi_hest_generic_status *) \
80 ((struct ghes_estatus_node *)(estatus_node) + 1))
81
82#define GHES_VENDOR_ENTRY_LEN(gdata_len) \
83 (sizeof(struct ghes_vendor_record_entry) + (gdata_len))
84#define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \
85 ((struct acpi_hest_generic_data *) \
86 ((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
87
88/*
89 * NMI-like notifications vary by architecture, before the compiler can prune
90 * unused static functions it needs a value for these enums.
91 */
92#ifndef CONFIG_ARM_SDE_INTERFACE
93#define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses
94#define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses
95#endif
96
97static ATOMIC_NOTIFIER_HEAD(ghes_report_chain);
98
99static inline bool is_hest_type_generic_v2(struct ghes *ghes)
100{
101 return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
102}
103
104/*
105 * This driver isn't really modular, however for the time being,
106 * continuing to use module_param is the easiest way to remain
107 * compatible with existing boot arg use cases.
108 */
109bool ghes_disable;
110module_param_named(disable, ghes_disable, bool, 0);
111
112/*
113 * "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform
114 * check.
115 */
116static bool ghes_edac_force_enable;
117module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0);
118
119/*
120 * All error sources notified with HED (Hardware Error Device) share a
121 * single notifier callback, so they need to be linked and checked one
122 * by one. This holds true for NMI too.
123 *
124 * RCU is used for these lists, so ghes_list_mutex is only used for
125 * list changing, not for traversing.
126 */
127static LIST_HEAD(ghes_hed);
128static DEFINE_MUTEX(ghes_list_mutex);
129
130/*
131 * A list of GHES devices which are given to the corresponding EDAC driver
132 * ghes_edac for further use.
133 */
134static LIST_HEAD(ghes_devs);
135static DEFINE_MUTEX(ghes_devs_mutex);
136
137/*
138 * Because the memory area used to transfer hardware error information
139 * from BIOS to Linux can be determined only in NMI, IRQ or timer
140 * handler, but general ioremap can not be used in atomic context, so
141 * the fixmap is used instead.
142 *
143 * This spinlock is used to prevent the fixmap entry from being used
144 * simultaneously.
145 */
146static DEFINE_SPINLOCK(ghes_notify_lock_irq);
147
148struct ghes_vendor_record_entry {
149 struct work_struct work;
150 int error_severity;
151 char vendor_record[];
152};
153
154static struct gen_pool *ghes_estatus_pool;
155static unsigned long ghes_estatus_pool_size_request;
156
157static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
158static atomic_t ghes_estatus_cache_alloced;
159
160static int ghes_panic_timeout __read_mostly = 30;
161
162static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
163{
164 phys_addr_t paddr;
165 pgprot_t prot;
166
167 paddr = PFN_PHYS(pfn);
168 prot = arch_apei_get_mem_attribute(paddr);
169 __set_fixmap(fixmap_idx, paddr, prot);
170
171 return (void __iomem *) __fix_to_virt(fixmap_idx);
172}
173
174static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
175{
176 int _idx = virt_to_fix((unsigned long)vaddr);
177
178 WARN_ON_ONCE(fixmap_idx != _idx);
179 clear_fixmap(fixmap_idx);
180}
181
182int ghes_estatus_pool_init(unsigned int num_ghes)
183{
184 unsigned long addr, len;
185 int rc;
186
187 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
188 if (!ghes_estatus_pool)
189 return -ENOMEM;
190
191 len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
192 len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
193
194 ghes_estatus_pool_size_request = PAGE_ALIGN(len);
195 addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
196 if (!addr)
197 goto err_pool_alloc;
198
199 rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
200 if (rc)
201 goto err_pool_add;
202
203 return 0;
204
205err_pool_add:
206 vfree((void *)addr);
207
208err_pool_alloc:
209 gen_pool_destroy(ghes_estatus_pool);
210
211 return -ENOMEM;
212}
213
214static int map_gen_v2(struct ghes *ghes)
215{
216 return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
217}
218
219static void unmap_gen_v2(struct ghes *ghes)
220{
221 apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
222}
223
224static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
225{
226 int rc;
227 u64 val = 0;
228
229 rc = apei_read(&val, &gv2->read_ack_register);
230 if (rc)
231 return;
232
233 val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
234 val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset;
235
236 apei_write(val, &gv2->read_ack_register);
237}
238
239static struct ghes *ghes_new(struct acpi_hest_generic *generic)
240{
241 struct ghes *ghes;
242 unsigned int error_block_length;
243 int rc;
244
245 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
246 if (!ghes)
247 return ERR_PTR(-ENOMEM);
248
249 ghes->generic = generic;
250 if (is_hest_type_generic_v2(ghes)) {
251 rc = map_gen_v2(ghes);
252 if (rc)
253 goto err_free;
254 }
255
256 rc = apei_map_generic_address(&generic->error_status_address);
257 if (rc)
258 goto err_unmap_read_ack_addr;
259 error_block_length = generic->error_block_length;
260 if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
261 pr_warn(FW_WARN GHES_PFX
262 "Error status block length is too long: %u for "
263 "generic hardware error source: %d.\n",
264 error_block_length, generic->header.source_id);
265 error_block_length = GHES_ESTATUS_MAX_SIZE;
266 }
267 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
268 if (!ghes->estatus) {
269 rc = -ENOMEM;
270 goto err_unmap_status_addr;
271 }
272
273 return ghes;
274
275err_unmap_status_addr:
276 apei_unmap_generic_address(&generic->error_status_address);
277err_unmap_read_ack_addr:
278 if (is_hest_type_generic_v2(ghes))
279 unmap_gen_v2(ghes);
280err_free:
281 kfree(ghes);
282 return ERR_PTR(rc);
283}
284
285static void ghes_fini(struct ghes *ghes)
286{
287 kfree(ghes->estatus);
288 apei_unmap_generic_address(&ghes->generic->error_status_address);
289 if (is_hest_type_generic_v2(ghes))
290 unmap_gen_v2(ghes);
291}
292
293static inline int ghes_severity(int severity)
294{
295 switch (severity) {
296 case CPER_SEV_INFORMATIONAL:
297 return GHES_SEV_NO;
298 case CPER_SEV_CORRECTED:
299 return GHES_SEV_CORRECTED;
300 case CPER_SEV_RECOVERABLE:
301 return GHES_SEV_RECOVERABLE;
302 case CPER_SEV_FATAL:
303 return GHES_SEV_PANIC;
304 default:
305 /* Unknown, go panic */
306 return GHES_SEV_PANIC;
307 }
308}
309
310static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
311 int from_phys,
312 enum fixed_addresses fixmap_idx)
313{
314 void __iomem *vaddr;
315 u64 offset;
316 u32 trunk;
317
318 while (len > 0) {
319 offset = paddr - (paddr & PAGE_MASK);
320 vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
321 trunk = PAGE_SIZE - offset;
322 trunk = min(trunk, len);
323 if (from_phys)
324 memcpy_fromio(buffer, vaddr + offset, trunk);
325 else
326 memcpy_toio(vaddr + offset, buffer, trunk);
327 len -= trunk;
328 paddr += trunk;
329 buffer += trunk;
330 ghes_unmap(vaddr, fixmap_idx);
331 }
332}
333
334/* Check the top-level record header has an appropriate size. */
335static int __ghes_check_estatus(struct ghes *ghes,
336 struct acpi_hest_generic_status *estatus)
337{
338 u32 len = cper_estatus_len(estatus);
339
340 if (len < sizeof(*estatus)) {
341 pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
342 return -EIO;
343 }
344
345 if (len > ghes->generic->error_block_length) {
346 pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
347 return -EIO;
348 }
349
350 if (cper_estatus_check_header(estatus)) {
351 pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
352 return -EIO;
353 }
354
355 return 0;
356}
357
358/* Read the CPER block, returning its address, and header in estatus. */
359static int __ghes_peek_estatus(struct ghes *ghes,
360 struct acpi_hest_generic_status *estatus,
361 u64 *buf_paddr, enum fixed_addresses fixmap_idx)
362{
363 struct acpi_hest_generic *g = ghes->generic;
364 int rc;
365
366 rc = apei_read(buf_paddr, &g->error_status_address);
367 if (rc) {
368 *buf_paddr = 0;
369 pr_warn_ratelimited(FW_WARN GHES_PFX
370"Failed to read error status block address for hardware error source: %d.\n",
371 g->header.source_id);
372 return -EIO;
373 }
374 if (!*buf_paddr)
375 return -ENOENT;
376
377 ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
378 fixmap_idx);
379 if (!estatus->block_status) {
380 *buf_paddr = 0;
381 return -ENOENT;
382 }
383
384 return 0;
385}
386
387static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
388 u64 buf_paddr, enum fixed_addresses fixmap_idx,
389 size_t buf_len)
390{
391 ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
392 if (cper_estatus_check(estatus)) {
393 pr_warn_ratelimited(FW_WARN GHES_PFX
394 "Failed to read error status block!\n");
395 return -EIO;
396 }
397
398 return 0;
399}
400
401static int ghes_read_estatus(struct ghes *ghes,
402 struct acpi_hest_generic_status *estatus,
403 u64 *buf_paddr, enum fixed_addresses fixmap_idx)
404{
405 int rc;
406
407 rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
408 if (rc)
409 return rc;
410
411 rc = __ghes_check_estatus(ghes, estatus);
412 if (rc)
413 return rc;
414
415 return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
416 cper_estatus_len(estatus));
417}
418
419static void ghes_clear_estatus(struct ghes *ghes,
420 struct acpi_hest_generic_status *estatus,
421 u64 buf_paddr, enum fixed_addresses fixmap_idx)
422{
423 estatus->block_status = 0;
424
425 if (!buf_paddr)
426 return;
427
428 ghes_copy_tofrom_phys(estatus, buf_paddr,
429 sizeof(estatus->block_status), 0,
430 fixmap_idx);
431
432 /*
433 * GHESv2 type HEST entries introduce support for error acknowledgment,
434 * so only acknowledge the error if this support is present.
435 */
436 if (is_hest_type_generic_v2(ghes))
437 ghes_ack_error(ghes->generic_v2);
438}
439
440/*
441 * Called as task_work before returning to user-space.
442 * Ensure any queued work has been done before we return to the context that
443 * triggered the notification.
444 */
445static void ghes_kick_task_work(struct callback_head *head)
446{
447 struct acpi_hest_generic_status *estatus;
448 struct ghes_estatus_node *estatus_node;
449 u32 node_len;
450
451 estatus_node = container_of(head, struct ghes_estatus_node, task_work);
452 if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
453 memory_failure_queue_kick(estatus_node->task_work_cpu);
454
455 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
456 node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
457 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
458}
459
460static bool ghes_do_memory_failure(u64 physical_addr, int flags)
461{
462 unsigned long pfn;
463
464 if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
465 return false;
466
467 pfn = PHYS_PFN(physical_addr);
468 if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
469 pr_warn_ratelimited(FW_WARN GHES_PFX
470 "Invalid address in generic error data: %#llx\n",
471 physical_addr);
472 return false;
473 }
474
475 memory_failure_queue(pfn, flags);
476 return true;
477}
478
479static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
480 int sev)
481{
482 int flags = -1;
483 int sec_sev = ghes_severity(gdata->error_severity);
484 struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
485
486 if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
487 return false;
488
489 /* iff following two events can be handled properly by now */
490 if (sec_sev == GHES_SEV_CORRECTED &&
491 (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
492 flags = MF_SOFT_OFFLINE;
493 if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
494 flags = 0;
495
496 if (flags != -1)
497 return ghes_do_memory_failure(mem_err->physical_addr, flags);
498
499 return false;
500}
501
502static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
503{
504 struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
505 bool queued = false;
506 int sec_sev, i;
507 char *p;
508
509 log_arm_hw_error(err);
510
511 sec_sev = ghes_severity(gdata->error_severity);
512 if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
513 return false;
514
515 p = (char *)(err + 1);
516 for (i = 0; i < err->err_info_num; i++) {
517 struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
518 bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
519 bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
520 const char *error_type = "unknown error";
521
522 /*
523 * The field (err_info->error_info & BIT(26)) is fixed to set to
524 * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
525 * firmware won't mix corrected errors in an uncorrected section,
526 * and don't filter out 'corrected' error here.
527 */
528 if (is_cache && has_pa) {
529 queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
530 p += err_info->length;
531 continue;
532 }
533
534 if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
535 error_type = cper_proc_error_type_strs[err_info->type];
536
537 pr_warn_ratelimited(FW_WARN GHES_PFX
538 "Unhandled processor error type: %s\n",
539 error_type);
540 p += err_info->length;
541 }
542
543 return queued;
544}
545
546/*
547 * PCIe AER errors need to be sent to the AER driver for reporting and
548 * recovery. The GHES severities map to the following AER severities and
549 * require the following handling:
550 *
551 * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
552 * These need to be reported by the AER driver but no recovery is
553 * necessary.
554 * GHES_SEV_RECOVERABLE -> AER_NONFATAL
555 * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
556 * These both need to be reported and recovered from by the AER driver.
557 * GHES_SEV_PANIC does not make it to this handling since the kernel must
558 * panic.
559 */
560static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
561{
562#ifdef CONFIG_ACPI_APEI_PCIEAER
563 struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
564
565 if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
566 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
567 unsigned int devfn;
568 int aer_severity;
569
570 devfn = PCI_DEVFN(pcie_err->device_id.device,
571 pcie_err->device_id.function);
572 aer_severity = cper_severity_to_aer(gdata->error_severity);
573
574 /*
575 * If firmware reset the component to contain
576 * the error, we must reinitialize it before
577 * use, so treat it as a fatal AER error.
578 */
579 if (gdata->flags & CPER_SEC_RESET)
580 aer_severity = AER_FATAL;
581
582 aer_recover_queue(pcie_err->device_id.segment,
583 pcie_err->device_id.bus,
584 devfn, aer_severity,
585 (struct aer_capability_regs *)
586 pcie_err->aer_info);
587 }
588#endif
589}
590
591static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
592
593int ghes_register_vendor_record_notifier(struct notifier_block *nb)
594{
595 return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
596}
597EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
598
599void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
600{
601 blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
602}
603EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
604
605static void ghes_vendor_record_work_func(struct work_struct *work)
606{
607 struct ghes_vendor_record_entry *entry;
608 struct acpi_hest_generic_data *gdata;
609 u32 len;
610
611 entry = container_of(work, struct ghes_vendor_record_entry, work);
612 gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
613
614 blocking_notifier_call_chain(&vendor_record_notify_list,
615 entry->error_severity, gdata);
616
617 len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
618 gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
619}
620
621static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
622 int sev)
623{
624 struct acpi_hest_generic_data *copied_gdata;
625 struct ghes_vendor_record_entry *entry;
626 u32 len;
627
628 len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
629 entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
630 if (!entry)
631 return;
632
633 copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
634 memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
635 entry->error_severity = sev;
636
637 INIT_WORK(&entry->work, ghes_vendor_record_work_func);
638 schedule_work(&entry->work);
639}
640
641static bool ghes_do_proc(struct ghes *ghes,
642 const struct acpi_hest_generic_status *estatus)
643{
644 int sev, sec_sev;
645 struct acpi_hest_generic_data *gdata;
646 guid_t *sec_type;
647 const guid_t *fru_id = &guid_null;
648 char *fru_text = "";
649 bool queued = false;
650
651 sev = ghes_severity(estatus->error_severity);
652 apei_estatus_for_each_section(estatus, gdata) {
653 sec_type = (guid_t *)gdata->section_type;
654 sec_sev = ghes_severity(gdata->error_severity);
655 if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
656 fru_id = (guid_t *)gdata->fru_id;
657
658 if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
659 fru_text = gdata->fru_text;
660
661 if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
662 struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
663
664 atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
665
666 arch_apei_report_mem_error(sev, mem_err);
667 queued = ghes_handle_memory_failure(gdata, sev);
668 }
669 else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
670 ghes_handle_aer(gdata);
671 }
672 else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
673 queued = ghes_handle_arm_hw_error(gdata, sev);
674 } else {
675 void *err = acpi_hest_get_payload(gdata);
676
677 ghes_defer_non_standard_event(gdata, sev);
678 log_non_standard_event(sec_type, fru_id, fru_text,
679 sec_sev, err,
680 gdata->error_data_length);
681 }
682 }
683
684 return queued;
685}
686
687static void __ghes_print_estatus(const char *pfx,
688 const struct acpi_hest_generic *generic,
689 const struct acpi_hest_generic_status *estatus)
690{
691 static atomic_t seqno;
692 unsigned int curr_seqno;
693 char pfx_seq[64];
694
695 if (pfx == NULL) {
696 if (ghes_severity(estatus->error_severity) <=
697 GHES_SEV_CORRECTED)
698 pfx = KERN_WARNING;
699 else
700 pfx = KERN_ERR;
701 }
702 curr_seqno = atomic_inc_return(&seqno);
703 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
704 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
705 pfx_seq, generic->header.source_id);
706 cper_estatus_print(pfx_seq, estatus);
707}
708
709static int ghes_print_estatus(const char *pfx,
710 const struct acpi_hest_generic *generic,
711 const struct acpi_hest_generic_status *estatus)
712{
713 /* Not more than 2 messages every 5 seconds */
714 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
715 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
716 struct ratelimit_state *ratelimit;
717
718 if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
719 ratelimit = &ratelimit_corrected;
720 else
721 ratelimit = &ratelimit_uncorrected;
722 if (__ratelimit(ratelimit)) {
723 __ghes_print_estatus(pfx, generic, estatus);
724 return 1;
725 }
726 return 0;
727}
728
729/*
730 * GHES error status reporting throttle, to report more kinds of
731 * errors, instead of just most frequently occurred errors.
732 */
733static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
734{
735 u32 len;
736 int i, cached = 0;
737 unsigned long long now;
738 struct ghes_estatus_cache *cache;
739 struct acpi_hest_generic_status *cache_estatus;
740
741 len = cper_estatus_len(estatus);
742 rcu_read_lock();
743 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
744 cache = rcu_dereference(ghes_estatus_caches[i]);
745 if (cache == NULL)
746 continue;
747 if (len != cache->estatus_len)
748 continue;
749 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
750 if (memcmp(estatus, cache_estatus, len))
751 continue;
752 atomic_inc(&cache->count);
753 now = sched_clock();
754 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
755 cached = 1;
756 break;
757 }
758 rcu_read_unlock();
759 return cached;
760}
761
762static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
763 struct acpi_hest_generic *generic,
764 struct acpi_hest_generic_status *estatus)
765{
766 int alloced;
767 u32 len, cache_len;
768 struct ghes_estatus_cache *cache;
769 struct acpi_hest_generic_status *cache_estatus;
770
771 alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
772 if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
773 atomic_dec(&ghes_estatus_cache_alloced);
774 return NULL;
775 }
776 len = cper_estatus_len(estatus);
777 cache_len = GHES_ESTATUS_CACHE_LEN(len);
778 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
779 if (!cache) {
780 atomic_dec(&ghes_estatus_cache_alloced);
781 return NULL;
782 }
783 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
784 memcpy(cache_estatus, estatus, len);
785 cache->estatus_len = len;
786 atomic_set(&cache->count, 0);
787 cache->generic = generic;
788 cache->time_in = sched_clock();
789 return cache;
790}
791
792static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
793{
794 struct ghes_estatus_cache *cache;
795 u32 len;
796
797 cache = container_of(head, struct ghes_estatus_cache, rcu);
798 len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
799 len = GHES_ESTATUS_CACHE_LEN(len);
800 gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
801 atomic_dec(&ghes_estatus_cache_alloced);
802}
803
804static void
805ghes_estatus_cache_add(struct acpi_hest_generic *generic,
806 struct acpi_hest_generic_status *estatus)
807{
808 unsigned long long now, duration, period, max_period = 0;
809 struct ghes_estatus_cache *cache, *new_cache;
810 struct ghes_estatus_cache __rcu *victim;
811 int i, slot = -1, count;
812
813 new_cache = ghes_estatus_cache_alloc(generic, estatus);
814 if (!new_cache)
815 return;
816
817 rcu_read_lock();
818 now = sched_clock();
819 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
820 cache = rcu_dereference(ghes_estatus_caches[i]);
821 if (cache == NULL) {
822 slot = i;
823 break;
824 }
825 duration = now - cache->time_in;
826 if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
827 slot = i;
828 break;
829 }
830 count = atomic_read(&cache->count);
831 period = duration;
832 do_div(period, (count + 1));
833 if (period > max_period) {
834 max_period = period;
835 slot = i;
836 }
837 }
838 rcu_read_unlock();
839
840 if (slot != -1) {
841 /*
842 * Use release semantics to ensure that ghes_estatus_cached()
843 * running on another CPU will see the updated cache fields if
844 * it can see the new value of the pointer.
845 */
846 victim = xchg_release(&ghes_estatus_caches[slot],
847 RCU_INITIALIZER(new_cache));
848
849 /*
850 * At this point, victim may point to a cached item different
851 * from the one based on which we selected the slot. Instead of
852 * going to the loop again to pick another slot, let's just
853 * drop the other item anyway: this may cause a false cache
854 * miss later on, but that won't cause any problems.
855 */
856 if (victim)
857 call_rcu(&unrcu_pointer(victim)->rcu,
858 ghes_estatus_cache_rcu_free);
859 }
860}
861
862static void __ghes_panic(struct ghes *ghes,
863 struct acpi_hest_generic_status *estatus,
864 u64 buf_paddr, enum fixed_addresses fixmap_idx)
865{
866 __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
867
868 ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
869
870 /* reboot to log the error! */
871 if (!panic_timeout)
872 panic_timeout = ghes_panic_timeout;
873 panic("Fatal hardware error!");
874}
875
876static int ghes_proc(struct ghes *ghes)
877{
878 struct acpi_hest_generic_status *estatus = ghes->estatus;
879 u64 buf_paddr;
880 int rc;
881
882 rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
883 if (rc)
884 goto out;
885
886 if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
887 __ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
888
889 if (!ghes_estatus_cached(estatus)) {
890 if (ghes_print_estatus(NULL, ghes->generic, estatus))
891 ghes_estatus_cache_add(ghes->generic, estatus);
892 }
893 ghes_do_proc(ghes, estatus);
894
895out:
896 ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
897
898 return rc;
899}
900
901static void ghes_add_timer(struct ghes *ghes)
902{
903 struct acpi_hest_generic *g = ghes->generic;
904 unsigned long expire;
905
906 if (!g->notify.poll_interval) {
907 pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
908 g->header.source_id);
909 return;
910 }
911 expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
912 ghes->timer.expires = round_jiffies_relative(expire);
913 add_timer(&ghes->timer);
914}
915
916static void ghes_poll_func(struct timer_list *t)
917{
918 struct ghes *ghes = from_timer(ghes, t, timer);
919 unsigned long flags;
920
921 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
922 ghes_proc(ghes);
923 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
924 if (!(ghes->flags & GHES_EXITING))
925 ghes_add_timer(ghes);
926}
927
928static irqreturn_t ghes_irq_func(int irq, void *data)
929{
930 struct ghes *ghes = data;
931 unsigned long flags;
932 int rc;
933
934 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
935 rc = ghes_proc(ghes);
936 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
937 if (rc)
938 return IRQ_NONE;
939
940 return IRQ_HANDLED;
941}
942
943static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
944 void *data)
945{
946 struct ghes *ghes;
947 unsigned long flags;
948 int ret = NOTIFY_DONE;
949
950 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
951 rcu_read_lock();
952 list_for_each_entry_rcu(ghes, &ghes_hed, list) {
953 if (!ghes_proc(ghes))
954 ret = NOTIFY_OK;
955 }
956 rcu_read_unlock();
957 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
958
959 return ret;
960}
961
962static struct notifier_block ghes_notifier_hed = {
963 .notifier_call = ghes_notify_hed,
964};
965
966/*
967 * Handlers for CPER records may not be NMI safe. For example,
968 * memory_failure_queue() takes spinlocks and calls schedule_work_on().
969 * In any NMI-like handler, memory from ghes_estatus_pool is used to save
970 * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
971 * ghes_proc_in_irq() to run in IRQ context where each estatus in
972 * ghes_estatus_llist is processed.
973 *
974 * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
975 * to suppress frequent messages.
976 */
977static struct llist_head ghes_estatus_llist;
978static struct irq_work ghes_proc_irq_work;
979
980static void ghes_proc_in_irq(struct irq_work *irq_work)
981{
982 struct llist_node *llnode, *next;
983 struct ghes_estatus_node *estatus_node;
984 struct acpi_hest_generic *generic;
985 struct acpi_hest_generic_status *estatus;
986 bool task_work_pending;
987 u32 len, node_len;
988 int ret;
989
990 llnode = llist_del_all(&ghes_estatus_llist);
991 /*
992 * Because the time order of estatus in list is reversed,
993 * revert it back to proper order.
994 */
995 llnode = llist_reverse_order(llnode);
996 while (llnode) {
997 next = llnode->next;
998 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
999 llnode);
1000 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1001 len = cper_estatus_len(estatus);
1002 node_len = GHES_ESTATUS_NODE_LEN(len);
1003 task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
1004 if (!ghes_estatus_cached(estatus)) {
1005 generic = estatus_node->generic;
1006 if (ghes_print_estatus(NULL, generic, estatus))
1007 ghes_estatus_cache_add(generic, estatus);
1008 }
1009
1010 if (task_work_pending && current->mm) {
1011 estatus_node->task_work.func = ghes_kick_task_work;
1012 estatus_node->task_work_cpu = smp_processor_id();
1013 ret = task_work_add(current, &estatus_node->task_work,
1014 TWA_RESUME);
1015 if (ret)
1016 estatus_node->task_work.func = NULL;
1017 }
1018
1019 if (!estatus_node->task_work.func)
1020 gen_pool_free(ghes_estatus_pool,
1021 (unsigned long)estatus_node, node_len);
1022
1023 llnode = next;
1024 }
1025}
1026
1027static void ghes_print_queued_estatus(void)
1028{
1029 struct llist_node *llnode;
1030 struct ghes_estatus_node *estatus_node;
1031 struct acpi_hest_generic *generic;
1032 struct acpi_hest_generic_status *estatus;
1033
1034 llnode = llist_del_all(&ghes_estatus_llist);
1035 /*
1036 * Because the time order of estatus in list is reversed,
1037 * revert it back to proper order.
1038 */
1039 llnode = llist_reverse_order(llnode);
1040 while (llnode) {
1041 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1042 llnode);
1043 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1044 generic = estatus_node->generic;
1045 ghes_print_estatus(NULL, generic, estatus);
1046 llnode = llnode->next;
1047 }
1048}
1049
1050static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
1051 enum fixed_addresses fixmap_idx)
1052{
1053 struct acpi_hest_generic_status *estatus, tmp_header;
1054 struct ghes_estatus_node *estatus_node;
1055 u32 len, node_len;
1056 u64 buf_paddr;
1057 int sev, rc;
1058
1059 if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
1060 return -EOPNOTSUPP;
1061
1062 rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
1063 if (rc) {
1064 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1065 return rc;
1066 }
1067
1068 rc = __ghes_check_estatus(ghes, &tmp_header);
1069 if (rc) {
1070 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1071 return rc;
1072 }
1073
1074 len = cper_estatus_len(&tmp_header);
1075 node_len = GHES_ESTATUS_NODE_LEN(len);
1076 estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1077 if (!estatus_node)
1078 return -ENOMEM;
1079
1080 estatus_node->ghes = ghes;
1081 estatus_node->generic = ghes->generic;
1082 estatus_node->task_work.func = NULL;
1083 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1084
1085 if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1086 ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1087 rc = -ENOENT;
1088 goto no_work;
1089 }
1090
1091 sev = ghes_severity(estatus->error_severity);
1092 if (sev >= GHES_SEV_PANIC) {
1093 ghes_print_queued_estatus();
1094 __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1095 }
1096
1097 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1098
1099 /* This error has been reported before, don't process it again. */
1100 if (ghes_estatus_cached(estatus))
1101 goto no_work;
1102
1103 llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1104
1105 return rc;
1106
1107no_work:
1108 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1109 node_len);
1110
1111 return rc;
1112}
1113
1114static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1115 enum fixed_addresses fixmap_idx)
1116{
1117 int ret = -ENOENT;
1118 struct ghes *ghes;
1119
1120 rcu_read_lock();
1121 list_for_each_entry_rcu(ghes, rcu_list, list) {
1122 if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1123 ret = 0;
1124 }
1125 rcu_read_unlock();
1126
1127 if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1128 irq_work_queue(&ghes_proc_irq_work);
1129
1130 return ret;
1131}
1132
1133#ifdef CONFIG_ACPI_APEI_SEA
1134static LIST_HEAD(ghes_sea);
1135
1136/*
1137 * Return 0 only if one of the SEA error sources successfully reported an error
1138 * record sent from the firmware.
1139 */
1140int ghes_notify_sea(void)
1141{
1142 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1143 int rv;
1144
1145 raw_spin_lock(&ghes_notify_lock_sea);
1146 rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1147 raw_spin_unlock(&ghes_notify_lock_sea);
1148
1149 return rv;
1150}
1151
1152static void ghes_sea_add(struct ghes *ghes)
1153{
1154 mutex_lock(&ghes_list_mutex);
1155 list_add_rcu(&ghes->list, &ghes_sea);
1156 mutex_unlock(&ghes_list_mutex);
1157}
1158
1159static void ghes_sea_remove(struct ghes *ghes)
1160{
1161 mutex_lock(&ghes_list_mutex);
1162 list_del_rcu(&ghes->list);
1163 mutex_unlock(&ghes_list_mutex);
1164 synchronize_rcu();
1165}
1166#else /* CONFIG_ACPI_APEI_SEA */
1167static inline void ghes_sea_add(struct ghes *ghes) { }
1168static inline void ghes_sea_remove(struct ghes *ghes) { }
1169#endif /* CONFIG_ACPI_APEI_SEA */
1170
1171#ifdef CONFIG_HAVE_ACPI_APEI_NMI
1172/*
1173 * NMI may be triggered on any CPU, so ghes_in_nmi is used for
1174 * having only one concurrent reader.
1175 */
1176static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1177
1178static LIST_HEAD(ghes_nmi);
1179
1180static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1181{
1182 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1183 int ret = NMI_DONE;
1184
1185 if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1186 return ret;
1187
1188 raw_spin_lock(&ghes_notify_lock_nmi);
1189 if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1190 ret = NMI_HANDLED;
1191 raw_spin_unlock(&ghes_notify_lock_nmi);
1192
1193 atomic_dec(&ghes_in_nmi);
1194 return ret;
1195}
1196
1197static void ghes_nmi_add(struct ghes *ghes)
1198{
1199 mutex_lock(&ghes_list_mutex);
1200 if (list_empty(&ghes_nmi))
1201 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1202 list_add_rcu(&ghes->list, &ghes_nmi);
1203 mutex_unlock(&ghes_list_mutex);
1204}
1205
1206static void ghes_nmi_remove(struct ghes *ghes)
1207{
1208 mutex_lock(&ghes_list_mutex);
1209 list_del_rcu(&ghes->list);
1210 if (list_empty(&ghes_nmi))
1211 unregister_nmi_handler(NMI_LOCAL, "ghes");
1212 mutex_unlock(&ghes_list_mutex);
1213 /*
1214 * To synchronize with NMI handler, ghes can only be
1215 * freed after NMI handler finishes.
1216 */
1217 synchronize_rcu();
1218}
1219#else /* CONFIG_HAVE_ACPI_APEI_NMI */
1220static inline void ghes_nmi_add(struct ghes *ghes) { }
1221static inline void ghes_nmi_remove(struct ghes *ghes) { }
1222#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1223
1224static void ghes_nmi_init_cxt(void)
1225{
1226 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1227}
1228
1229static int __ghes_sdei_callback(struct ghes *ghes,
1230 enum fixed_addresses fixmap_idx)
1231{
1232 if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1233 irq_work_queue(&ghes_proc_irq_work);
1234
1235 return 0;
1236 }
1237
1238 return -ENOENT;
1239}
1240
1241static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1242 void *arg)
1243{
1244 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1245 struct ghes *ghes = arg;
1246 int err;
1247
1248 raw_spin_lock(&ghes_notify_lock_sdei_normal);
1249 err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1250 raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1251
1252 return err;
1253}
1254
1255static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1256 void *arg)
1257{
1258 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1259 struct ghes *ghes = arg;
1260 int err;
1261
1262 raw_spin_lock(&ghes_notify_lock_sdei_critical);
1263 err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1264 raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1265
1266 return err;
1267}
1268
1269static int apei_sdei_register_ghes(struct ghes *ghes)
1270{
1271 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1272 return -EOPNOTSUPP;
1273
1274 return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1275 ghes_sdei_critical_callback);
1276}
1277
1278static int apei_sdei_unregister_ghes(struct ghes *ghes)
1279{
1280 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1281 return -EOPNOTSUPP;
1282
1283 return sdei_unregister_ghes(ghes);
1284}
1285
1286static int ghes_probe(struct platform_device *ghes_dev)
1287{
1288 struct acpi_hest_generic *generic;
1289 struct ghes *ghes = NULL;
1290 unsigned long flags;
1291
1292 int rc = -EINVAL;
1293
1294 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1295 if (!generic->enabled)
1296 return -ENODEV;
1297
1298 switch (generic->notify.type) {
1299 case ACPI_HEST_NOTIFY_POLLED:
1300 case ACPI_HEST_NOTIFY_EXTERNAL:
1301 case ACPI_HEST_NOTIFY_SCI:
1302 case ACPI_HEST_NOTIFY_GSIV:
1303 case ACPI_HEST_NOTIFY_GPIO:
1304 break;
1305
1306 case ACPI_HEST_NOTIFY_SEA:
1307 if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1308 pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1309 generic->header.source_id);
1310 rc = -ENOTSUPP;
1311 goto err;
1312 }
1313 break;
1314 case ACPI_HEST_NOTIFY_NMI:
1315 if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1316 pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1317 generic->header.source_id);
1318 goto err;
1319 }
1320 break;
1321 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1322 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1323 pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1324 generic->header.source_id);
1325 goto err;
1326 }
1327 break;
1328 case ACPI_HEST_NOTIFY_LOCAL:
1329 pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1330 generic->header.source_id);
1331 goto err;
1332 default:
1333 pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1334 generic->notify.type, generic->header.source_id);
1335 goto err;
1336 }
1337
1338 rc = -EIO;
1339 if (generic->error_block_length <
1340 sizeof(struct acpi_hest_generic_status)) {
1341 pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1342 generic->error_block_length, generic->header.source_id);
1343 goto err;
1344 }
1345 ghes = ghes_new(generic);
1346 if (IS_ERR(ghes)) {
1347 rc = PTR_ERR(ghes);
1348 ghes = NULL;
1349 goto err;
1350 }
1351
1352 switch (generic->notify.type) {
1353 case ACPI_HEST_NOTIFY_POLLED:
1354 timer_setup(&ghes->timer, ghes_poll_func, 0);
1355 ghes_add_timer(ghes);
1356 break;
1357 case ACPI_HEST_NOTIFY_EXTERNAL:
1358 /* External interrupt vector is GSI */
1359 rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1360 if (rc) {
1361 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1362 generic->header.source_id);
1363 goto err;
1364 }
1365 rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1366 "GHES IRQ", ghes);
1367 if (rc) {
1368 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1369 generic->header.source_id);
1370 goto err;
1371 }
1372 break;
1373
1374 case ACPI_HEST_NOTIFY_SCI:
1375 case ACPI_HEST_NOTIFY_GSIV:
1376 case ACPI_HEST_NOTIFY_GPIO:
1377 mutex_lock(&ghes_list_mutex);
1378 if (list_empty(&ghes_hed))
1379 register_acpi_hed_notifier(&ghes_notifier_hed);
1380 list_add_rcu(&ghes->list, &ghes_hed);
1381 mutex_unlock(&ghes_list_mutex);
1382 break;
1383
1384 case ACPI_HEST_NOTIFY_SEA:
1385 ghes_sea_add(ghes);
1386 break;
1387 case ACPI_HEST_NOTIFY_NMI:
1388 ghes_nmi_add(ghes);
1389 break;
1390 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1391 rc = apei_sdei_register_ghes(ghes);
1392 if (rc)
1393 goto err;
1394 break;
1395 default:
1396 BUG();
1397 }
1398
1399 platform_set_drvdata(ghes_dev, ghes);
1400
1401 ghes->dev = &ghes_dev->dev;
1402
1403 mutex_lock(&ghes_devs_mutex);
1404 list_add_tail(&ghes->elist, &ghes_devs);
1405 mutex_unlock(&ghes_devs_mutex);
1406
1407 /* Handle any pending errors right away */
1408 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1409 ghes_proc(ghes);
1410 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1411
1412 return 0;
1413
1414err:
1415 if (ghes) {
1416 ghes_fini(ghes);
1417 kfree(ghes);
1418 }
1419 return rc;
1420}
1421
1422static int ghes_remove(struct platform_device *ghes_dev)
1423{
1424 int rc;
1425 struct ghes *ghes;
1426 struct acpi_hest_generic *generic;
1427
1428 ghes = platform_get_drvdata(ghes_dev);
1429 generic = ghes->generic;
1430
1431 ghes->flags |= GHES_EXITING;
1432 switch (generic->notify.type) {
1433 case ACPI_HEST_NOTIFY_POLLED:
1434 timer_shutdown_sync(&ghes->timer);
1435 break;
1436 case ACPI_HEST_NOTIFY_EXTERNAL:
1437 free_irq(ghes->irq, ghes);
1438 break;
1439
1440 case ACPI_HEST_NOTIFY_SCI:
1441 case ACPI_HEST_NOTIFY_GSIV:
1442 case ACPI_HEST_NOTIFY_GPIO:
1443 mutex_lock(&ghes_list_mutex);
1444 list_del_rcu(&ghes->list);
1445 if (list_empty(&ghes_hed))
1446 unregister_acpi_hed_notifier(&ghes_notifier_hed);
1447 mutex_unlock(&ghes_list_mutex);
1448 synchronize_rcu();
1449 break;
1450
1451 case ACPI_HEST_NOTIFY_SEA:
1452 ghes_sea_remove(ghes);
1453 break;
1454 case ACPI_HEST_NOTIFY_NMI:
1455 ghes_nmi_remove(ghes);
1456 break;
1457 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1458 rc = apei_sdei_unregister_ghes(ghes);
1459 if (rc)
1460 return rc;
1461 break;
1462 default:
1463 BUG();
1464 break;
1465 }
1466
1467 ghes_fini(ghes);
1468
1469 mutex_lock(&ghes_devs_mutex);
1470 list_del(&ghes->elist);
1471 mutex_unlock(&ghes_devs_mutex);
1472
1473 kfree(ghes);
1474
1475 return 0;
1476}
1477
1478static struct platform_driver ghes_platform_driver = {
1479 .driver = {
1480 .name = "GHES",
1481 },
1482 .probe = ghes_probe,
1483 .remove = ghes_remove,
1484};
1485
1486void __init acpi_ghes_init(void)
1487{
1488 int rc;
1489
1490 sdei_init();
1491
1492 if (acpi_disabled)
1493 return;
1494
1495 switch (hest_disable) {
1496 case HEST_NOT_FOUND:
1497 return;
1498 case HEST_DISABLED:
1499 pr_info(GHES_PFX "HEST is not enabled!\n");
1500 return;
1501 default:
1502 break;
1503 }
1504
1505 if (ghes_disable) {
1506 pr_info(GHES_PFX "GHES is not enabled!\n");
1507 return;
1508 }
1509
1510 ghes_nmi_init_cxt();
1511
1512 rc = platform_driver_register(&ghes_platform_driver);
1513 if (rc)
1514 return;
1515
1516 rc = apei_osc_setup();
1517 if (rc == 0 && osc_sb_apei_support_acked)
1518 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1519 else if (rc == 0 && !osc_sb_apei_support_acked)
1520 pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1521 else if (rc && osc_sb_apei_support_acked)
1522 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1523 else
1524 pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1525}
1526
1527/*
1528 * Known x86 systems that prefer GHES error reporting:
1529 */
1530static struct acpi_platform_list plat_list[] = {
1531 {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions},
1532 { } /* End */
1533};
1534
1535struct list_head *ghes_get_devices(void)
1536{
1537 int idx = -1;
1538
1539 if (IS_ENABLED(CONFIG_X86)) {
1540 idx = acpi_match_platform_list(plat_list);
1541 if (idx < 0) {
1542 if (!ghes_edac_force_enable)
1543 return NULL;
1544
1545 pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
1546 }
1547 }
1548
1549 return &ghes_devs;
1550}
1551EXPORT_SYMBOL_GPL(ghes_get_devices);
1552
1553void ghes_register_report_chain(struct notifier_block *nb)
1554{
1555 atomic_notifier_chain_register(&ghes_report_chain, nb);
1556}
1557EXPORT_SYMBOL_GPL(ghes_register_report_chain);
1558
1559void ghes_unregister_report_chain(struct notifier_block *nb)
1560{
1561 atomic_notifier_chain_unregister(&ghes_report_chain, nb);
1562}
1563EXPORT_SYMBOL_GPL(ghes_unregister_report_chain);