Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
4 * infrastructure
5 *
6 * APEI allows to report errors (for example from the chipset) to
7 * the operating system. This improves NMI handling especially. In
8 * addition it supports error serialization and error injection.
9 *
10 * For more information about APEI, please refer to ACPI Specification
11 * version 4.0, chapter 17.
12 *
13 * This file has Common functions used by more than one APEI table,
14 * including framework of interpreter for ERST and EINJ; resource
15 * management for APEI registers.
16 *
17 * Copyright (C) 2009, Intel Corp.
18 * Author: Huang Ying <ying.huang@intel.com>
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/acpi.h>
25#include <linux/slab.h>
26#include <linux/io.h>
27#include <linux/kref.h>
28#include <linux/interrupt.h>
29#include <linux/debugfs.h>
30#include <acpi/apei.h>
31#include <linux/unaligned.h>
32
33#include "apei-internal.h"
34
35#define APEI_PFX "APEI: "
36
37/*
38 * APEI ERST (Error Record Serialization Table) and EINJ (Error
39 * INJection) interpreter framework.
40 */
41
42#define APEI_EXEC_PRESERVE_REGISTER 0x1
43
44void apei_exec_ctx_init(struct apei_exec_context *ctx,
45 struct apei_exec_ins_type *ins_table,
46 u32 instructions,
47 struct acpi_whea_header *action_table,
48 u32 entries)
49{
50 ctx->ins_table = ins_table;
51 ctx->instructions = instructions;
52 ctx->action_table = action_table;
53 ctx->entries = entries;
54}
55EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
56
57int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
58{
59 int rc;
60
61 rc = apei_read(val, &entry->register_region);
62 if (rc)
63 return rc;
64 *val >>= entry->register_region.bit_offset;
65 *val &= entry->mask;
66
67 return 0;
68}
69
70int apei_exec_read_register(struct apei_exec_context *ctx,
71 struct acpi_whea_header *entry)
72{
73 int rc;
74 u64 val = 0;
75
76 rc = __apei_exec_read_register(entry, &val);
77 if (rc)
78 return rc;
79 ctx->value = val;
80
81 return 0;
82}
83EXPORT_SYMBOL_GPL(apei_exec_read_register);
84
85int apei_exec_read_register_value(struct apei_exec_context *ctx,
86 struct acpi_whea_header *entry)
87{
88 int rc;
89
90 rc = apei_exec_read_register(ctx, entry);
91 if (rc)
92 return rc;
93 ctx->value = (ctx->value == entry->value);
94
95 return 0;
96}
97EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
98
99int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
100{
101 int rc;
102
103 val &= entry->mask;
104 val <<= entry->register_region.bit_offset;
105 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
106 u64 valr = 0;
107 rc = apei_read(&valr, &entry->register_region);
108 if (rc)
109 return rc;
110 valr &= ~(entry->mask << entry->register_region.bit_offset);
111 val |= valr;
112 }
113 rc = apei_write(val, &entry->register_region);
114
115 return rc;
116}
117
118int apei_exec_write_register(struct apei_exec_context *ctx,
119 struct acpi_whea_header *entry)
120{
121 return __apei_exec_write_register(entry, ctx->value);
122}
123EXPORT_SYMBOL_GPL(apei_exec_write_register);
124
125int apei_exec_write_register_value(struct apei_exec_context *ctx,
126 struct acpi_whea_header *entry)
127{
128 ctx->value = entry->value;
129
130 return apei_exec_write_register(ctx, entry);
131}
132EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
133
134int apei_exec_noop(struct apei_exec_context *ctx,
135 struct acpi_whea_header *entry)
136{
137 return 0;
138}
139EXPORT_SYMBOL_GPL(apei_exec_noop);
140
141/*
142 * Interpret the specified action. Go through whole action table,
143 * execute all instructions belong to the action.
144 */
145int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
146 bool optional)
147{
148 int rc = -ENOENT;
149 u32 i, ip;
150 struct acpi_whea_header *entry;
151 apei_exec_ins_func_t run;
152
153 ctx->ip = 0;
154
155 /*
156 * "ip" is the instruction pointer of current instruction,
157 * "ctx->ip" specifies the next instruction to executed,
158 * instruction "run" function may change the "ctx->ip" to
159 * implement "goto" semantics.
160 */
161rewind:
162 ip = 0;
163 for (i = 0; i < ctx->entries; i++) {
164 entry = &ctx->action_table[i];
165 if (entry->action != action)
166 continue;
167 if (ip == ctx->ip) {
168 if (entry->instruction >= ctx->instructions ||
169 !ctx->ins_table[entry->instruction].run) {
170 pr_warn(FW_WARN APEI_PFX
171 "Invalid action table, unknown instruction type: %d\n",
172 entry->instruction);
173 return -EINVAL;
174 }
175 run = ctx->ins_table[entry->instruction].run;
176 rc = run(ctx, entry);
177 if (rc < 0)
178 return rc;
179 else if (rc != APEI_EXEC_SET_IP)
180 ctx->ip++;
181 }
182 ip++;
183 if (ctx->ip < ip)
184 goto rewind;
185 }
186
187 return !optional && rc < 0 ? rc : 0;
188}
189EXPORT_SYMBOL_GPL(__apei_exec_run);
190
191typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
192 struct acpi_whea_header *entry,
193 void *data);
194
195static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
196 apei_exec_entry_func_t func,
197 void *data,
198 int *end)
199{
200 u8 ins;
201 int i, rc;
202 struct acpi_whea_header *entry;
203 struct apei_exec_ins_type *ins_table = ctx->ins_table;
204
205 for (i = 0; i < ctx->entries; i++) {
206 entry = ctx->action_table + i;
207 ins = entry->instruction;
208 if (end)
209 *end = i;
210 if (ins >= ctx->instructions || !ins_table[ins].run) {
211 pr_warn(FW_WARN APEI_PFX
212 "Invalid action table, unknown instruction type: %d\n",
213 ins);
214 return -EINVAL;
215 }
216 rc = func(ctx, entry, data);
217 if (rc)
218 return rc;
219 }
220
221 return 0;
222}
223
224static int pre_map_gar_callback(struct apei_exec_context *ctx,
225 struct acpi_whea_header *entry,
226 void *data)
227{
228 u8 ins = entry->instruction;
229
230 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
231 return apei_map_generic_address(&entry->register_region);
232
233 return 0;
234}
235
236/*
237 * Pre-map all GARs in action table to make it possible to access them
238 * in NMI handler.
239 */
240int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
241{
242 int rc, end;
243
244 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
245 NULL, &end);
246 if (rc) {
247 struct apei_exec_context ctx_unmap;
248 memcpy(&ctx_unmap, ctx, sizeof(*ctx));
249 ctx_unmap.entries = end;
250 apei_exec_post_unmap_gars(&ctx_unmap);
251 }
252
253 return rc;
254}
255EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
256
257static int post_unmap_gar_callback(struct apei_exec_context *ctx,
258 struct acpi_whea_header *entry,
259 void *data)
260{
261 u8 ins = entry->instruction;
262
263 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
264 apei_unmap_generic_address(&entry->register_region);
265
266 return 0;
267}
268
269/* Post-unmap all GAR in action table. */
270int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
271{
272 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
273 NULL, NULL);
274}
275EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
276
277/*
278 * Resource management for GARs in APEI
279 */
280struct apei_res {
281 struct list_head list;
282 unsigned long start;
283 unsigned long end;
284};
285
286/* Collect all resources requested, to avoid conflict */
287static struct apei_resources apei_resources_all = {
288 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
289 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
290};
291
292static int apei_res_add(struct list_head *res_list,
293 unsigned long start, unsigned long size)
294{
295 struct apei_res *res, *resn, *res_ins = NULL;
296 unsigned long end = start + size;
297
298 if (end <= start)
299 return 0;
300repeat:
301 list_for_each_entry_safe(res, resn, res_list, list) {
302 if (res->start > end || res->end < start)
303 continue;
304 else if (end <= res->end && start >= res->start) {
305 kfree(res_ins);
306 return 0;
307 }
308 list_del(&res->list);
309 res->start = start = min(res->start, start);
310 res->end = end = max(res->end, end);
311 kfree(res_ins);
312 res_ins = res;
313 goto repeat;
314 }
315
316 if (res_ins)
317 list_add(&res_ins->list, res_list);
318 else {
319 res_ins = kmalloc(sizeof(*res_ins), GFP_KERNEL);
320 if (!res_ins)
321 return -ENOMEM;
322 res_ins->start = start;
323 res_ins->end = end;
324 list_add(&res_ins->list, res_list);
325 }
326
327 return 0;
328}
329
330static int apei_res_sub(struct list_head *res_list1,
331 struct list_head *res_list2)
332{
333 struct apei_res *res1, *resn1, *res2, *res;
334 res1 = list_entry(res_list1->next, struct apei_res, list);
335 resn1 = list_entry(res1->list.next, struct apei_res, list);
336 while (&res1->list != res_list1) {
337 list_for_each_entry(res2, res_list2, list) {
338 if (res1->start >= res2->end ||
339 res1->end <= res2->start)
340 continue;
341 else if (res1->end <= res2->end &&
342 res1->start >= res2->start) {
343 list_del(&res1->list);
344 kfree(res1);
345 break;
346 } else if (res1->end > res2->end &&
347 res1->start < res2->start) {
348 res = kmalloc(sizeof(*res), GFP_KERNEL);
349 if (!res)
350 return -ENOMEM;
351 res->start = res2->end;
352 res->end = res1->end;
353 res1->end = res2->start;
354 list_add(&res->list, &res1->list);
355 resn1 = res;
356 } else {
357 if (res1->start < res2->start)
358 res1->end = res2->start;
359 else
360 res1->start = res2->end;
361 }
362 }
363 res1 = resn1;
364 resn1 = list_entry(resn1->list.next, struct apei_res, list);
365 }
366
367 return 0;
368}
369
370static void apei_res_clean(struct list_head *res_list)
371{
372 struct apei_res *res, *resn;
373
374 list_for_each_entry_safe(res, resn, res_list, list) {
375 list_del(&res->list);
376 kfree(res);
377 }
378}
379
380void apei_resources_fini(struct apei_resources *resources)
381{
382 apei_res_clean(&resources->iomem);
383 apei_res_clean(&resources->ioport);
384}
385EXPORT_SYMBOL_GPL(apei_resources_fini);
386
387static int apei_resources_merge(struct apei_resources *resources1,
388 struct apei_resources *resources2)
389{
390 int rc;
391 struct apei_res *res;
392
393 list_for_each_entry(res, &resources2->iomem, list) {
394 rc = apei_res_add(&resources1->iomem, res->start,
395 res->end - res->start);
396 if (rc)
397 return rc;
398 }
399 list_for_each_entry(res, &resources2->ioport, list) {
400 rc = apei_res_add(&resources1->ioport, res->start,
401 res->end - res->start);
402 if (rc)
403 return rc;
404 }
405
406 return 0;
407}
408
409int apei_resources_add(struct apei_resources *resources,
410 unsigned long start, unsigned long size,
411 bool iomem)
412{
413 if (iomem)
414 return apei_res_add(&resources->iomem, start, size);
415 else
416 return apei_res_add(&resources->ioport, start, size);
417}
418EXPORT_SYMBOL_GPL(apei_resources_add);
419
420/*
421 * EINJ has two groups of GARs (EINJ table entry and trigger table
422 * entry), so common resources are subtracted from the trigger table
423 * resources before the second requesting.
424 */
425int apei_resources_sub(struct apei_resources *resources1,
426 struct apei_resources *resources2)
427{
428 int rc;
429
430 rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
431 if (rc)
432 return rc;
433 return apei_res_sub(&resources1->ioport, &resources2->ioport);
434}
435EXPORT_SYMBOL_GPL(apei_resources_sub);
436
437static int apei_get_res_callback(__u64 start, __u64 size, void *data)
438{
439 struct apei_resources *resources = data;
440 return apei_res_add(&resources->iomem, start, size);
441}
442
443static int apei_get_nvs_resources(struct apei_resources *resources)
444{
445 return acpi_nvs_for_each_region(apei_get_res_callback, resources);
446}
447
448int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
449 void *data), void *data);
450static int apei_get_arch_resources(struct apei_resources *resources)
451
452{
453 return arch_apei_filter_addr(apei_get_res_callback, resources);
454}
455
456/*
457 * IO memory/port resource management mechanism is used to check
458 * whether memory/port area used by GARs conflicts with normal memory
459 * or IO memory/port of devices.
460 */
461int apei_resources_request(struct apei_resources *resources,
462 const char *desc)
463{
464 struct apei_res *res, *res_bak = NULL;
465 struct resource *r;
466 struct apei_resources nvs_resources, arch_res;
467 int rc;
468
469 rc = apei_resources_sub(resources, &apei_resources_all);
470 if (rc)
471 return rc;
472
473 /*
474 * Some firmware uses ACPI NVS region, that has been marked as
475 * busy, so exclude it from APEI resources to avoid false
476 * conflict.
477 */
478 apei_resources_init(&nvs_resources);
479 rc = apei_get_nvs_resources(&nvs_resources);
480 if (rc)
481 goto nvs_res_fini;
482 rc = apei_resources_sub(resources, &nvs_resources);
483 if (rc)
484 goto nvs_res_fini;
485
486 if (arch_apei_filter_addr) {
487 apei_resources_init(&arch_res);
488 rc = apei_get_arch_resources(&arch_res);
489 if (rc)
490 goto arch_res_fini;
491 rc = apei_resources_sub(resources, &arch_res);
492 if (rc)
493 goto arch_res_fini;
494 }
495
496 rc = -EINVAL;
497 list_for_each_entry(res, &resources->iomem, list) {
498 r = request_mem_region(res->start, res->end - res->start,
499 desc);
500 if (!r) {
501 pr_err(APEI_PFX
502 "Can not request [mem %#010llx-%#010llx] for %s registers\n",
503 (unsigned long long)res->start,
504 (unsigned long long)res->end - 1, desc);
505 res_bak = res;
506 goto err_unmap_iomem;
507 }
508 }
509
510 list_for_each_entry(res, &resources->ioport, list) {
511 r = request_region(res->start, res->end - res->start, desc);
512 if (!r) {
513 pr_err(APEI_PFX
514 "Can not request [io %#06llx-%#06llx] for %s registers\n",
515 (unsigned long long)res->start,
516 (unsigned long long)res->end - 1, desc);
517 res_bak = res;
518 goto err_unmap_ioport;
519 }
520 }
521
522 rc = apei_resources_merge(&apei_resources_all, resources);
523 if (rc) {
524 pr_err(APEI_PFX "Fail to merge resources!\n");
525 goto err_unmap_ioport;
526 }
527
528 goto arch_res_fini;
529
530err_unmap_ioport:
531 list_for_each_entry(res, &resources->ioport, list) {
532 if (res == res_bak)
533 break;
534 release_region(res->start, res->end - res->start);
535 }
536 res_bak = NULL;
537err_unmap_iomem:
538 list_for_each_entry(res, &resources->iomem, list) {
539 if (res == res_bak)
540 break;
541 release_mem_region(res->start, res->end - res->start);
542 }
543arch_res_fini:
544 if (arch_apei_filter_addr)
545 apei_resources_fini(&arch_res);
546nvs_res_fini:
547 apei_resources_fini(&nvs_resources);
548 return rc;
549}
550EXPORT_SYMBOL_GPL(apei_resources_request);
551
552void apei_resources_release(struct apei_resources *resources)
553{
554 int rc;
555 struct apei_res *res;
556
557 list_for_each_entry(res, &resources->iomem, list)
558 release_mem_region(res->start, res->end - res->start);
559 list_for_each_entry(res, &resources->ioport, list)
560 release_region(res->start, res->end - res->start);
561
562 rc = apei_resources_sub(&apei_resources_all, resources);
563 if (rc)
564 pr_err(APEI_PFX "Fail to sub resources!\n");
565}
566EXPORT_SYMBOL_GPL(apei_resources_release);
567
568static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
569 u32 *access_bit_width)
570{
571 u32 bit_width, bit_offset, access_size_code, space_id;
572
573 bit_width = reg->bit_width;
574 bit_offset = reg->bit_offset;
575 access_size_code = reg->access_width;
576 space_id = reg->space_id;
577 *paddr = get_unaligned(®->address);
578 if (!*paddr) {
579 pr_warn(FW_BUG APEI_PFX
580 "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
581 *paddr, bit_width, bit_offset, access_size_code,
582 space_id);
583 return -EINVAL;
584 }
585
586 if (access_size_code < 1 || access_size_code > 4) {
587 pr_warn(FW_BUG APEI_PFX
588 "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
589 *paddr, bit_width, bit_offset, access_size_code,
590 space_id);
591 return -EINVAL;
592 }
593 *access_bit_width = 1UL << (access_size_code + 2);
594
595 /* Fixup common BIOS bug */
596 if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
597 *access_bit_width < 32)
598 *access_bit_width = 32;
599 else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
600 *access_bit_width < 64)
601 *access_bit_width = 64;
602
603 if ((bit_width + bit_offset) > *access_bit_width) {
604 pr_warn(FW_BUG APEI_PFX
605 "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
606 *paddr, bit_width, bit_offset, access_size_code,
607 space_id);
608 return -EINVAL;
609 }
610
611 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
612 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
613 pr_warn(FW_BUG APEI_PFX
614 "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
615 *paddr, bit_width, bit_offset, access_size_code,
616 space_id);
617 return -EINVAL;
618 }
619
620 return 0;
621}
622
623int apei_map_generic_address(struct acpi_generic_address *reg)
624{
625 int rc;
626 u32 access_bit_width;
627 u64 address;
628
629 rc = apei_check_gar(reg, &address, &access_bit_width);
630 if (rc)
631 return rc;
632
633 /* IO space doesn't need mapping */
634 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
635 return 0;
636
637 if (!acpi_os_map_generic_address(reg))
638 return -ENXIO;
639
640 return 0;
641}
642EXPORT_SYMBOL_GPL(apei_map_generic_address);
643
644/* read GAR in interrupt (including NMI) or process context */
645int apei_read(u64 *val, struct acpi_generic_address *reg)
646{
647 int rc;
648 u32 access_bit_width;
649 u64 address;
650 acpi_status status;
651
652 rc = apei_check_gar(reg, &address, &access_bit_width);
653 if (rc)
654 return rc;
655
656 *val = 0;
657 switch(reg->space_id) {
658 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
659 status = acpi_os_read_memory((acpi_physical_address) address,
660 val, access_bit_width);
661 if (ACPI_FAILURE(status))
662 return -EIO;
663 break;
664 case ACPI_ADR_SPACE_SYSTEM_IO:
665 status = acpi_os_read_port(address, (u32 *)val,
666 access_bit_width);
667 if (ACPI_FAILURE(status))
668 return -EIO;
669 break;
670 default:
671 return -EINVAL;
672 }
673
674 return 0;
675}
676EXPORT_SYMBOL_GPL(apei_read);
677
678/* write GAR in interrupt (including NMI) or process context */
679int apei_write(u64 val, struct acpi_generic_address *reg)
680{
681 int rc;
682 u32 access_bit_width;
683 u64 address;
684 acpi_status status;
685
686 rc = apei_check_gar(reg, &address, &access_bit_width);
687 if (rc)
688 return rc;
689
690 switch (reg->space_id) {
691 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
692 status = acpi_os_write_memory((acpi_physical_address) address,
693 val, access_bit_width);
694 if (ACPI_FAILURE(status))
695 return -EIO;
696 break;
697 case ACPI_ADR_SPACE_SYSTEM_IO:
698 status = acpi_os_write_port(address, val, access_bit_width);
699 if (ACPI_FAILURE(status))
700 return -EIO;
701 break;
702 default:
703 return -EINVAL;
704 }
705
706 return 0;
707}
708EXPORT_SYMBOL_GPL(apei_write);
709
710static int collect_res_callback(struct apei_exec_context *ctx,
711 struct acpi_whea_header *entry,
712 void *data)
713{
714 struct apei_resources *resources = data;
715 struct acpi_generic_address *reg = &entry->register_region;
716 u8 ins = entry->instruction;
717 u32 access_bit_width;
718 u64 paddr;
719 int rc;
720
721 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
722 return 0;
723
724 rc = apei_check_gar(reg, &paddr, &access_bit_width);
725 if (rc)
726 return rc;
727
728 switch (reg->space_id) {
729 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
730 return apei_res_add(&resources->iomem, paddr,
731 access_bit_width / 8);
732 case ACPI_ADR_SPACE_SYSTEM_IO:
733 return apei_res_add(&resources->ioport, paddr,
734 access_bit_width / 8);
735 default:
736 return -EINVAL;
737 }
738}
739
740/*
741 * Same register may be used by multiple instructions in GARs, so
742 * resources are collected before requesting.
743 */
744int apei_exec_collect_resources(struct apei_exec_context *ctx,
745 struct apei_resources *resources)
746{
747 return apei_exec_for_each_entry(ctx, collect_res_callback,
748 resources, NULL);
749}
750EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
751
752struct dentry *apei_get_debugfs_dir(void)
753{
754 static struct dentry *dapei;
755
756 if (!dapei)
757 dapei = debugfs_create_dir("apei", NULL);
758
759 return dapei;
760}
761EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
762
763int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr,
764 void *data)
765{
766 return 1;
767}
768EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff);
769
770void __weak arch_apei_report_mem_error(int sev,
771 struct cper_sec_mem_err *mem_err)
772{
773}
774EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
775
776int apei_osc_setup(void)
777{
778 static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
779 acpi_handle handle;
780 u32 capbuf[3];
781 struct acpi_osc_context context = {
782 .uuid_str = whea_uuid_str,
783 .rev = 1,
784 .cap.length = sizeof(capbuf),
785 .cap.pointer = capbuf,
786 };
787
788 capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
789 capbuf[OSC_SUPPORT_DWORD] = 1;
790 capbuf[OSC_CONTROL_DWORD] = 0;
791
792 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
793 || ACPI_FAILURE(acpi_run_osc(handle, &context)))
794 return -EIO;
795 else {
796 kfree(context.ret.pointer);
797 return 0;
798 }
799}
800EXPORT_SYMBOL_GPL(apei_osc_setup);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
4 * infrastructure
5 *
6 * APEI allows to report errors (for example from the chipset) to the
7 * the operating system. This improves NMI handling especially. In
8 * addition it supports error serialization and error injection.
9 *
10 * For more information about APEI, please refer to ACPI Specification
11 * version 4.0, chapter 17.
12 *
13 * This file has Common functions used by more than one APEI table,
14 * including framework of interpreter for ERST and EINJ; resource
15 * management for APEI registers.
16 *
17 * Copyright (C) 2009, Intel Corp.
18 * Author: Huang Ying <ying.huang@intel.com>
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/acpi.h>
25#include <linux/slab.h>
26#include <linux/io.h>
27#include <linux/kref.h>
28#include <linux/rculist.h>
29#include <linux/interrupt.h>
30#include <linux/debugfs.h>
31#include <asm/unaligned.h>
32
33#include "apei-internal.h"
34
35#define APEI_PFX "APEI: "
36
37/*
38 * APEI ERST (Error Record Serialization Table) and EINJ (Error
39 * INJection) interpreter framework.
40 */
41
42#define APEI_EXEC_PRESERVE_REGISTER 0x1
43
44void apei_exec_ctx_init(struct apei_exec_context *ctx,
45 struct apei_exec_ins_type *ins_table,
46 u32 instructions,
47 struct acpi_whea_header *action_table,
48 u32 entries)
49{
50 ctx->ins_table = ins_table;
51 ctx->instructions = instructions;
52 ctx->action_table = action_table;
53 ctx->entries = entries;
54}
55EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
56
57int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
58{
59 int rc;
60
61 rc = apei_read(val, &entry->register_region);
62 if (rc)
63 return rc;
64 *val >>= entry->register_region.bit_offset;
65 *val &= entry->mask;
66
67 return 0;
68}
69
70int apei_exec_read_register(struct apei_exec_context *ctx,
71 struct acpi_whea_header *entry)
72{
73 int rc;
74 u64 val = 0;
75
76 rc = __apei_exec_read_register(entry, &val);
77 if (rc)
78 return rc;
79 ctx->value = val;
80
81 return 0;
82}
83EXPORT_SYMBOL_GPL(apei_exec_read_register);
84
85int apei_exec_read_register_value(struct apei_exec_context *ctx,
86 struct acpi_whea_header *entry)
87{
88 int rc;
89
90 rc = apei_exec_read_register(ctx, entry);
91 if (rc)
92 return rc;
93 ctx->value = (ctx->value == entry->value);
94
95 return 0;
96}
97EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
98
99int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
100{
101 int rc;
102
103 val &= entry->mask;
104 val <<= entry->register_region.bit_offset;
105 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
106 u64 valr = 0;
107 rc = apei_read(&valr, &entry->register_region);
108 if (rc)
109 return rc;
110 valr &= ~(entry->mask << entry->register_region.bit_offset);
111 val |= valr;
112 }
113 rc = apei_write(val, &entry->register_region);
114
115 return rc;
116}
117
118int apei_exec_write_register(struct apei_exec_context *ctx,
119 struct acpi_whea_header *entry)
120{
121 return __apei_exec_write_register(entry, ctx->value);
122}
123EXPORT_SYMBOL_GPL(apei_exec_write_register);
124
125int apei_exec_write_register_value(struct apei_exec_context *ctx,
126 struct acpi_whea_header *entry)
127{
128 int rc;
129
130 ctx->value = entry->value;
131 rc = apei_exec_write_register(ctx, entry);
132
133 return rc;
134}
135EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
136
137int apei_exec_noop(struct apei_exec_context *ctx,
138 struct acpi_whea_header *entry)
139{
140 return 0;
141}
142EXPORT_SYMBOL_GPL(apei_exec_noop);
143
144/*
145 * Interpret the specified action. Go through whole action table,
146 * execute all instructions belong to the action.
147 */
148int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
149 bool optional)
150{
151 int rc = -ENOENT;
152 u32 i, ip;
153 struct acpi_whea_header *entry;
154 apei_exec_ins_func_t run;
155
156 ctx->ip = 0;
157
158 /*
159 * "ip" is the instruction pointer of current instruction,
160 * "ctx->ip" specifies the next instruction to executed,
161 * instruction "run" function may change the "ctx->ip" to
162 * implement "goto" semantics.
163 */
164rewind:
165 ip = 0;
166 for (i = 0; i < ctx->entries; i++) {
167 entry = &ctx->action_table[i];
168 if (entry->action != action)
169 continue;
170 if (ip == ctx->ip) {
171 if (entry->instruction >= ctx->instructions ||
172 !ctx->ins_table[entry->instruction].run) {
173 pr_warn(FW_WARN APEI_PFX
174 "Invalid action table, unknown instruction type: %d\n",
175 entry->instruction);
176 return -EINVAL;
177 }
178 run = ctx->ins_table[entry->instruction].run;
179 rc = run(ctx, entry);
180 if (rc < 0)
181 return rc;
182 else if (rc != APEI_EXEC_SET_IP)
183 ctx->ip++;
184 }
185 ip++;
186 if (ctx->ip < ip)
187 goto rewind;
188 }
189
190 return !optional && rc < 0 ? rc : 0;
191}
192EXPORT_SYMBOL_GPL(__apei_exec_run);
193
194typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
195 struct acpi_whea_header *entry,
196 void *data);
197
198static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
199 apei_exec_entry_func_t func,
200 void *data,
201 int *end)
202{
203 u8 ins;
204 int i, rc;
205 struct acpi_whea_header *entry;
206 struct apei_exec_ins_type *ins_table = ctx->ins_table;
207
208 for (i = 0; i < ctx->entries; i++) {
209 entry = ctx->action_table + i;
210 ins = entry->instruction;
211 if (end)
212 *end = i;
213 if (ins >= ctx->instructions || !ins_table[ins].run) {
214 pr_warn(FW_WARN APEI_PFX
215 "Invalid action table, unknown instruction type: %d\n",
216 ins);
217 return -EINVAL;
218 }
219 rc = func(ctx, entry, data);
220 if (rc)
221 return rc;
222 }
223
224 return 0;
225}
226
227static int pre_map_gar_callback(struct apei_exec_context *ctx,
228 struct acpi_whea_header *entry,
229 void *data)
230{
231 u8 ins = entry->instruction;
232
233 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
234 return apei_map_generic_address(&entry->register_region);
235
236 return 0;
237}
238
239/*
240 * Pre-map all GARs in action table to make it possible to access them
241 * in NMI handler.
242 */
243int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
244{
245 int rc, end;
246
247 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
248 NULL, &end);
249 if (rc) {
250 struct apei_exec_context ctx_unmap;
251 memcpy(&ctx_unmap, ctx, sizeof(*ctx));
252 ctx_unmap.entries = end;
253 apei_exec_post_unmap_gars(&ctx_unmap);
254 }
255
256 return rc;
257}
258EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
259
260static int post_unmap_gar_callback(struct apei_exec_context *ctx,
261 struct acpi_whea_header *entry,
262 void *data)
263{
264 u8 ins = entry->instruction;
265
266 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
267 apei_unmap_generic_address(&entry->register_region);
268
269 return 0;
270}
271
272/* Post-unmap all GAR in action table. */
273int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
274{
275 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
276 NULL, NULL);
277}
278EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
279
280/*
281 * Resource management for GARs in APEI
282 */
283struct apei_res {
284 struct list_head list;
285 unsigned long start;
286 unsigned long end;
287};
288
289/* Collect all resources requested, to avoid conflict */
290static struct apei_resources apei_resources_all = {
291 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
292 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
293};
294
295static int apei_res_add(struct list_head *res_list,
296 unsigned long start, unsigned long size)
297{
298 struct apei_res *res, *resn, *res_ins = NULL;
299 unsigned long end = start + size;
300
301 if (end <= start)
302 return 0;
303repeat:
304 list_for_each_entry_safe(res, resn, res_list, list) {
305 if (res->start > end || res->end < start)
306 continue;
307 else if (end <= res->end && start >= res->start) {
308 kfree(res_ins);
309 return 0;
310 }
311 list_del(&res->list);
312 res->start = start = min(res->start, start);
313 res->end = end = max(res->end, end);
314 kfree(res_ins);
315 res_ins = res;
316 goto repeat;
317 }
318
319 if (res_ins)
320 list_add(&res_ins->list, res_list);
321 else {
322 res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
323 if (!res_ins)
324 return -ENOMEM;
325 res_ins->start = start;
326 res_ins->end = end;
327 list_add(&res_ins->list, res_list);
328 }
329
330 return 0;
331}
332
333static int apei_res_sub(struct list_head *res_list1,
334 struct list_head *res_list2)
335{
336 struct apei_res *res1, *resn1, *res2, *res;
337 res1 = list_entry(res_list1->next, struct apei_res, list);
338 resn1 = list_entry(res1->list.next, struct apei_res, list);
339 while (&res1->list != res_list1) {
340 list_for_each_entry(res2, res_list2, list) {
341 if (res1->start >= res2->end ||
342 res1->end <= res2->start)
343 continue;
344 else if (res1->end <= res2->end &&
345 res1->start >= res2->start) {
346 list_del(&res1->list);
347 kfree(res1);
348 break;
349 } else if (res1->end > res2->end &&
350 res1->start < res2->start) {
351 res = kmalloc(sizeof(*res), GFP_KERNEL);
352 if (!res)
353 return -ENOMEM;
354 res->start = res2->end;
355 res->end = res1->end;
356 res1->end = res2->start;
357 list_add(&res->list, &res1->list);
358 resn1 = res;
359 } else {
360 if (res1->start < res2->start)
361 res1->end = res2->start;
362 else
363 res1->start = res2->end;
364 }
365 }
366 res1 = resn1;
367 resn1 = list_entry(resn1->list.next, struct apei_res, list);
368 }
369
370 return 0;
371}
372
373static void apei_res_clean(struct list_head *res_list)
374{
375 struct apei_res *res, *resn;
376
377 list_for_each_entry_safe(res, resn, res_list, list) {
378 list_del(&res->list);
379 kfree(res);
380 }
381}
382
383void apei_resources_fini(struct apei_resources *resources)
384{
385 apei_res_clean(&resources->iomem);
386 apei_res_clean(&resources->ioport);
387}
388EXPORT_SYMBOL_GPL(apei_resources_fini);
389
390static int apei_resources_merge(struct apei_resources *resources1,
391 struct apei_resources *resources2)
392{
393 int rc;
394 struct apei_res *res;
395
396 list_for_each_entry(res, &resources2->iomem, list) {
397 rc = apei_res_add(&resources1->iomem, res->start,
398 res->end - res->start);
399 if (rc)
400 return rc;
401 }
402 list_for_each_entry(res, &resources2->ioport, list) {
403 rc = apei_res_add(&resources1->ioport, res->start,
404 res->end - res->start);
405 if (rc)
406 return rc;
407 }
408
409 return 0;
410}
411
412int apei_resources_add(struct apei_resources *resources,
413 unsigned long start, unsigned long size,
414 bool iomem)
415{
416 if (iomem)
417 return apei_res_add(&resources->iomem, start, size);
418 else
419 return apei_res_add(&resources->ioport, start, size);
420}
421EXPORT_SYMBOL_GPL(apei_resources_add);
422
423/*
424 * EINJ has two groups of GARs (EINJ table entry and trigger table
425 * entry), so common resources are subtracted from the trigger table
426 * resources before the second requesting.
427 */
428int apei_resources_sub(struct apei_resources *resources1,
429 struct apei_resources *resources2)
430{
431 int rc;
432
433 rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
434 if (rc)
435 return rc;
436 return apei_res_sub(&resources1->ioport, &resources2->ioport);
437}
438EXPORT_SYMBOL_GPL(apei_resources_sub);
439
440static int apei_get_res_callback(__u64 start, __u64 size, void *data)
441{
442 struct apei_resources *resources = data;
443 return apei_res_add(&resources->iomem, start, size);
444}
445
446static int apei_get_nvs_resources(struct apei_resources *resources)
447{
448 return acpi_nvs_for_each_region(apei_get_res_callback, resources);
449}
450
451int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
452 void *data), void *data);
453static int apei_get_arch_resources(struct apei_resources *resources)
454
455{
456 return arch_apei_filter_addr(apei_get_res_callback, resources);
457}
458
459/*
460 * IO memory/port resource management mechanism is used to check
461 * whether memory/port area used by GARs conflicts with normal memory
462 * or IO memory/port of devices.
463 */
464int apei_resources_request(struct apei_resources *resources,
465 const char *desc)
466{
467 struct apei_res *res, *res_bak = NULL;
468 struct resource *r;
469 struct apei_resources nvs_resources, arch_res;
470 int rc;
471
472 rc = apei_resources_sub(resources, &apei_resources_all);
473 if (rc)
474 return rc;
475
476 /*
477 * Some firmware uses ACPI NVS region, that has been marked as
478 * busy, so exclude it from APEI resources to avoid false
479 * conflict.
480 */
481 apei_resources_init(&nvs_resources);
482 rc = apei_get_nvs_resources(&nvs_resources);
483 if (rc)
484 goto nvs_res_fini;
485 rc = apei_resources_sub(resources, &nvs_resources);
486 if (rc)
487 goto nvs_res_fini;
488
489 if (arch_apei_filter_addr) {
490 apei_resources_init(&arch_res);
491 rc = apei_get_arch_resources(&arch_res);
492 if (rc)
493 goto arch_res_fini;
494 rc = apei_resources_sub(resources, &arch_res);
495 if (rc)
496 goto arch_res_fini;
497 }
498
499 rc = -EINVAL;
500 list_for_each_entry(res, &resources->iomem, list) {
501 r = request_mem_region(res->start, res->end - res->start,
502 desc);
503 if (!r) {
504 pr_err(APEI_PFX
505 "Can not request [mem %#010llx-%#010llx] for %s registers\n",
506 (unsigned long long)res->start,
507 (unsigned long long)res->end - 1, desc);
508 res_bak = res;
509 goto err_unmap_iomem;
510 }
511 }
512
513 list_for_each_entry(res, &resources->ioport, list) {
514 r = request_region(res->start, res->end - res->start, desc);
515 if (!r) {
516 pr_err(APEI_PFX
517 "Can not request [io %#06llx-%#06llx] for %s registers\n",
518 (unsigned long long)res->start,
519 (unsigned long long)res->end - 1, desc);
520 res_bak = res;
521 goto err_unmap_ioport;
522 }
523 }
524
525 rc = apei_resources_merge(&apei_resources_all, resources);
526 if (rc) {
527 pr_err(APEI_PFX "Fail to merge resources!\n");
528 goto err_unmap_ioport;
529 }
530
531 goto arch_res_fini;
532
533err_unmap_ioport:
534 list_for_each_entry(res, &resources->ioport, list) {
535 if (res == res_bak)
536 break;
537 release_region(res->start, res->end - res->start);
538 }
539 res_bak = NULL;
540err_unmap_iomem:
541 list_for_each_entry(res, &resources->iomem, list) {
542 if (res == res_bak)
543 break;
544 release_mem_region(res->start, res->end - res->start);
545 }
546arch_res_fini:
547 if (arch_apei_filter_addr)
548 apei_resources_fini(&arch_res);
549nvs_res_fini:
550 apei_resources_fini(&nvs_resources);
551 return rc;
552}
553EXPORT_SYMBOL_GPL(apei_resources_request);
554
555void apei_resources_release(struct apei_resources *resources)
556{
557 int rc;
558 struct apei_res *res;
559
560 list_for_each_entry(res, &resources->iomem, list)
561 release_mem_region(res->start, res->end - res->start);
562 list_for_each_entry(res, &resources->ioport, list)
563 release_region(res->start, res->end - res->start);
564
565 rc = apei_resources_sub(&apei_resources_all, resources);
566 if (rc)
567 pr_err(APEI_PFX "Fail to sub resources!\n");
568}
569EXPORT_SYMBOL_GPL(apei_resources_release);
570
571static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
572 u32 *access_bit_width)
573{
574 u32 bit_width, bit_offset, access_size_code, space_id;
575
576 bit_width = reg->bit_width;
577 bit_offset = reg->bit_offset;
578 access_size_code = reg->access_width;
579 space_id = reg->space_id;
580 *paddr = get_unaligned(®->address);
581 if (!*paddr) {
582 pr_warn(FW_BUG APEI_PFX
583 "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
584 *paddr, bit_width, bit_offset, access_size_code,
585 space_id);
586 return -EINVAL;
587 }
588
589 if (access_size_code < 1 || access_size_code > 4) {
590 pr_warn(FW_BUG APEI_PFX
591 "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
592 *paddr, bit_width, bit_offset, access_size_code,
593 space_id);
594 return -EINVAL;
595 }
596 *access_bit_width = 1UL << (access_size_code + 2);
597
598 /* Fixup common BIOS bug */
599 if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
600 *access_bit_width < 32)
601 *access_bit_width = 32;
602 else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
603 *access_bit_width < 64)
604 *access_bit_width = 64;
605
606 if ((bit_width + bit_offset) > *access_bit_width) {
607 pr_warn(FW_BUG APEI_PFX
608 "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
609 *paddr, bit_width, bit_offset, access_size_code,
610 space_id);
611 return -EINVAL;
612 }
613
614 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
615 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
616 pr_warn(FW_BUG APEI_PFX
617 "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
618 *paddr, bit_width, bit_offset, access_size_code,
619 space_id);
620 return -EINVAL;
621 }
622
623 return 0;
624}
625
626int apei_map_generic_address(struct acpi_generic_address *reg)
627{
628 int rc;
629 u32 access_bit_width;
630 u64 address;
631
632 rc = apei_check_gar(reg, &address, &access_bit_width);
633 if (rc)
634 return rc;
635
636 /* IO space doesn't need mapping */
637 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
638 return 0;
639
640 if (!acpi_os_map_generic_address(reg))
641 return -ENXIO;
642
643 return 0;
644}
645EXPORT_SYMBOL_GPL(apei_map_generic_address);
646
647/* read GAR in interrupt (including NMI) or process context */
648int apei_read(u64 *val, struct acpi_generic_address *reg)
649{
650 int rc;
651 u32 access_bit_width;
652 u64 address;
653 acpi_status status;
654
655 rc = apei_check_gar(reg, &address, &access_bit_width);
656 if (rc)
657 return rc;
658
659 *val = 0;
660 switch(reg->space_id) {
661 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
662 status = acpi_os_read_memory((acpi_physical_address) address,
663 val, access_bit_width);
664 if (ACPI_FAILURE(status))
665 return -EIO;
666 break;
667 case ACPI_ADR_SPACE_SYSTEM_IO:
668 status = acpi_os_read_port(address, (u32 *)val,
669 access_bit_width);
670 if (ACPI_FAILURE(status))
671 return -EIO;
672 break;
673 default:
674 return -EINVAL;
675 }
676
677 return 0;
678}
679EXPORT_SYMBOL_GPL(apei_read);
680
681/* write GAR in interrupt (including NMI) or process context */
682int apei_write(u64 val, struct acpi_generic_address *reg)
683{
684 int rc;
685 u32 access_bit_width;
686 u64 address;
687 acpi_status status;
688
689 rc = apei_check_gar(reg, &address, &access_bit_width);
690 if (rc)
691 return rc;
692
693 switch (reg->space_id) {
694 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
695 status = acpi_os_write_memory((acpi_physical_address) address,
696 val, access_bit_width);
697 if (ACPI_FAILURE(status))
698 return -EIO;
699 break;
700 case ACPI_ADR_SPACE_SYSTEM_IO:
701 status = acpi_os_write_port(address, val, access_bit_width);
702 if (ACPI_FAILURE(status))
703 return -EIO;
704 break;
705 default:
706 return -EINVAL;
707 }
708
709 return 0;
710}
711EXPORT_SYMBOL_GPL(apei_write);
712
713static int collect_res_callback(struct apei_exec_context *ctx,
714 struct acpi_whea_header *entry,
715 void *data)
716{
717 struct apei_resources *resources = data;
718 struct acpi_generic_address *reg = &entry->register_region;
719 u8 ins = entry->instruction;
720 u32 access_bit_width;
721 u64 paddr;
722 int rc;
723
724 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
725 return 0;
726
727 rc = apei_check_gar(reg, &paddr, &access_bit_width);
728 if (rc)
729 return rc;
730
731 switch (reg->space_id) {
732 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
733 return apei_res_add(&resources->iomem, paddr,
734 access_bit_width / 8);
735 case ACPI_ADR_SPACE_SYSTEM_IO:
736 return apei_res_add(&resources->ioport, paddr,
737 access_bit_width / 8);
738 default:
739 return -EINVAL;
740 }
741}
742
743/*
744 * Same register may be used by multiple instructions in GARs, so
745 * resources are collected before requesting.
746 */
747int apei_exec_collect_resources(struct apei_exec_context *ctx,
748 struct apei_resources *resources)
749{
750 return apei_exec_for_each_entry(ctx, collect_res_callback,
751 resources, NULL);
752}
753EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
754
755struct dentry *apei_get_debugfs_dir(void)
756{
757 static struct dentry *dapei;
758
759 if (!dapei)
760 dapei = debugfs_create_dir("apei", NULL);
761
762 return dapei;
763}
764EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
765
766int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr,
767 void *data)
768{
769 return 1;
770}
771EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff);
772
773void __weak arch_apei_report_mem_error(int sev,
774 struct cper_sec_mem_err *mem_err)
775{
776}
777EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
778
779int apei_osc_setup(void)
780{
781 static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
782 acpi_handle handle;
783 u32 capbuf[3];
784 struct acpi_osc_context context = {
785 .uuid_str = whea_uuid_str,
786 .rev = 1,
787 .cap.length = sizeof(capbuf),
788 .cap.pointer = capbuf,
789 };
790
791 capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
792 capbuf[OSC_SUPPORT_DWORD] = 1;
793 capbuf[OSC_CONTROL_DWORD] = 0;
794
795 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
796 || ACPI_FAILURE(acpi_run_osc(handle, &context)))
797 return -EIO;
798 else {
799 kfree(context.ret.pointer);
800 return 0;
801 }
802}
803EXPORT_SYMBOL_GPL(apei_osc_setup);