Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kobject.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/efi.h>
23#include <linux/of.h>
24#include <linux/initrd.h>
25#include <linux/io.h>
26#include <linux/kexec.h>
27#include <linux/platform_device.h>
28#include <linux/random.h>
29#include <linux/reboot.h>
30#include <linux/slab.h>
31#include <linux/acpi.h>
32#include <linux/ucs2_string.h>
33#include <linux/memblock.h>
34#include <linux/security.h>
35#include <linux/notifier.h>
36
37#include <asm/early_ioremap.h>
38
39struct efi __read_mostly efi = {
40 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
41 .acpi = EFI_INVALID_TABLE_ADDR,
42 .acpi20 = EFI_INVALID_TABLE_ADDR,
43 .smbios = EFI_INVALID_TABLE_ADDR,
44 .smbios3 = EFI_INVALID_TABLE_ADDR,
45 .esrt = EFI_INVALID_TABLE_ADDR,
46 .tpm_log = EFI_INVALID_TABLE_ADDR,
47 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
48#ifdef CONFIG_LOAD_UEFI_KEYS
49 .mokvar_table = EFI_INVALID_TABLE_ADDR,
50#endif
51#ifdef CONFIG_EFI_COCO_SECRET
52 .coco_secret = EFI_INVALID_TABLE_ADDR,
53#endif
54#ifdef CONFIG_UNACCEPTED_MEMORY
55 .unaccepted = EFI_INVALID_TABLE_ADDR,
56#endif
57};
58EXPORT_SYMBOL(efi);
59
60unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
61static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
62static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
63static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
64
65extern unsigned long screen_info_table;
66
67struct mm_struct efi_mm = {
68 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
69 .mm_users = ATOMIC_INIT(2),
70 .mm_count = ATOMIC_INIT(1),
71 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
72 MMAP_LOCK_INITIALIZER(efi_mm)
73 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
74 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
75 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
76};
77
78struct workqueue_struct *efi_rts_wq;
79
80static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
81static int __init setup_noefi(char *arg)
82{
83 disable_runtime = true;
84 return 0;
85}
86early_param("noefi", setup_noefi);
87
88bool efi_runtime_disabled(void)
89{
90 return disable_runtime;
91}
92
93bool __pure __efi_soft_reserve_enabled(void)
94{
95 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
96}
97
98static int __init parse_efi_cmdline(char *str)
99{
100 if (!str) {
101 pr_warn("need at least one option\n");
102 return -EINVAL;
103 }
104
105 if (parse_option_str(str, "debug"))
106 set_bit(EFI_DBG, &efi.flags);
107
108 if (parse_option_str(str, "noruntime"))
109 disable_runtime = true;
110
111 if (parse_option_str(str, "runtime"))
112 disable_runtime = false;
113
114 if (parse_option_str(str, "nosoftreserve"))
115 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
116
117 return 0;
118}
119early_param("efi", parse_efi_cmdline);
120
121struct kobject *efi_kobj;
122
123/*
124 * Let's not leave out systab information that snuck into
125 * the efivars driver
126 * Note, do not add more fields in systab sysfs file as it breaks sysfs
127 * one value per file rule!
128 */
129static ssize_t systab_show(struct kobject *kobj,
130 struct kobj_attribute *attr, char *buf)
131{
132 char *str = buf;
133
134 if (!kobj || !buf)
135 return -EINVAL;
136
137 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
138 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
139 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
140 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
141 /*
142 * If both SMBIOS and SMBIOS3 entry points are implemented, the
143 * SMBIOS3 entry point shall be preferred, so we list it first to
144 * let applications stop parsing after the first match.
145 */
146 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
147 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
148 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
149 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
150
151 if (IS_ENABLED(CONFIG_X86))
152 str = efi_systab_show_arch(str);
153
154 return str - buf;
155}
156
157static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
158
159static ssize_t fw_platform_size_show(struct kobject *kobj,
160 struct kobj_attribute *attr, char *buf)
161{
162 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
163}
164
165extern __weak struct kobj_attribute efi_attr_fw_vendor;
166extern __weak struct kobj_attribute efi_attr_runtime;
167extern __weak struct kobj_attribute efi_attr_config_table;
168static struct kobj_attribute efi_attr_fw_platform_size =
169 __ATTR_RO(fw_platform_size);
170
171static struct attribute *efi_subsys_attrs[] = {
172 &efi_attr_systab.attr,
173 &efi_attr_fw_platform_size.attr,
174 &efi_attr_fw_vendor.attr,
175 &efi_attr_runtime.attr,
176 &efi_attr_config_table.attr,
177 NULL,
178};
179
180umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
181 int n)
182{
183 return attr->mode;
184}
185
186static const struct attribute_group efi_subsys_attr_group = {
187 .attrs = efi_subsys_attrs,
188 .is_visible = efi_attr_is_visible,
189};
190
191struct blocking_notifier_head efivar_ops_nh;
192EXPORT_SYMBOL_GPL(efivar_ops_nh);
193
194static struct efivars generic_efivars;
195static struct efivar_operations generic_ops;
196
197static bool generic_ops_supported(void)
198{
199 unsigned long name_size;
200 efi_status_t status;
201 efi_char16_t name;
202 efi_guid_t guid;
203
204 name_size = sizeof(name);
205
206 if (!efi.get_next_variable)
207 return false;
208 status = efi.get_next_variable(&name_size, &name, &guid);
209 if (status == EFI_UNSUPPORTED)
210 return false;
211
212 return true;
213}
214
215static int generic_ops_register(void)
216{
217 if (!generic_ops_supported())
218 return 0;
219
220 generic_ops.get_variable = efi.get_variable;
221 generic_ops.get_next_variable = efi.get_next_variable;
222 generic_ops.query_variable_store = efi_query_variable_store;
223 generic_ops.query_variable_info = efi.query_variable_info;
224
225 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
226 generic_ops.set_variable = efi.set_variable;
227 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
228 }
229 return efivars_register(&generic_efivars, &generic_ops);
230}
231
232static void generic_ops_unregister(void)
233{
234 if (!generic_ops.get_variable)
235 return;
236
237 efivars_unregister(&generic_efivars);
238}
239
240void efivars_generic_ops_register(void)
241{
242 generic_ops_register();
243}
244EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
245
246void efivars_generic_ops_unregister(void)
247{
248 generic_ops_unregister();
249}
250EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
251
252#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
253#define EFIVAR_SSDT_NAME_MAX 16UL
254static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
255static int __init efivar_ssdt_setup(char *str)
256{
257 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
258
259 if (ret)
260 return ret;
261
262 if (strlen(str) < sizeof(efivar_ssdt))
263 memcpy(efivar_ssdt, str, strlen(str));
264 else
265 pr_warn("efivar_ssdt: name too long: %s\n", str);
266 return 1;
267}
268__setup("efivar_ssdt=", efivar_ssdt_setup);
269
270static __init int efivar_ssdt_load(void)
271{
272 unsigned long name_size = 256;
273 efi_char16_t *name = NULL;
274 efi_status_t status;
275 efi_guid_t guid;
276
277 if (!efivar_ssdt[0])
278 return 0;
279
280 name = kzalloc(name_size, GFP_KERNEL);
281 if (!name)
282 return -ENOMEM;
283
284 for (;;) {
285 char utf8_name[EFIVAR_SSDT_NAME_MAX];
286 unsigned long data_size = 0;
287 void *data;
288 int limit;
289
290 status = efi.get_next_variable(&name_size, name, &guid);
291 if (status == EFI_NOT_FOUND) {
292 break;
293 } else if (status == EFI_BUFFER_TOO_SMALL) {
294 efi_char16_t *name_tmp =
295 krealloc(name, name_size, GFP_KERNEL);
296 if (!name_tmp) {
297 kfree(name);
298 return -ENOMEM;
299 }
300 name = name_tmp;
301 continue;
302 }
303
304 limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
305 ucs2_as_utf8(utf8_name, name, limit - 1);
306 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
307 continue;
308
309 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
310
311 status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
312 if (status != EFI_BUFFER_TOO_SMALL || !data_size)
313 return -EIO;
314
315 data = kmalloc(data_size, GFP_KERNEL);
316 if (!data)
317 return -ENOMEM;
318
319 status = efi.get_variable(name, &guid, NULL, &data_size, data);
320 if (status == EFI_SUCCESS) {
321 acpi_status ret = acpi_load_table(data, NULL);
322 if (ret)
323 pr_err("failed to load table: %u\n", ret);
324 else
325 continue;
326 } else {
327 pr_err("failed to get var data: 0x%lx\n", status);
328 }
329 kfree(data);
330 }
331 return 0;
332}
333#else
334static inline int efivar_ssdt_load(void) { return 0; }
335#endif
336
337#ifdef CONFIG_DEBUG_FS
338
339#define EFI_DEBUGFS_MAX_BLOBS 32
340
341static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
342
343static void __init efi_debugfs_init(void)
344{
345 struct dentry *efi_debugfs;
346 efi_memory_desc_t *md;
347 char name[32];
348 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
349 int i = 0;
350
351 efi_debugfs = debugfs_create_dir("efi", NULL);
352 if (IS_ERR_OR_NULL(efi_debugfs))
353 return;
354
355 for_each_efi_memory_desc(md) {
356 switch (md->type) {
357 case EFI_BOOT_SERVICES_CODE:
358 snprintf(name, sizeof(name), "boot_services_code%d",
359 type_count[md->type]++);
360 break;
361 case EFI_BOOT_SERVICES_DATA:
362 snprintf(name, sizeof(name), "boot_services_data%d",
363 type_count[md->type]++);
364 break;
365 default:
366 continue;
367 }
368
369 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
370 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
371 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
372 break;
373 }
374
375 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
376 debugfs_blob[i].data = memremap(md->phys_addr,
377 debugfs_blob[i].size,
378 MEMREMAP_WB);
379 if (!debugfs_blob[i].data)
380 continue;
381
382 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
383 i++;
384 }
385}
386#else
387static inline void efi_debugfs_init(void) {}
388#endif
389
390/*
391 * We register the efi subsystem with the firmware subsystem and the
392 * efivars subsystem with the efi subsystem, if the system was booted with
393 * EFI.
394 */
395static int __init efisubsys_init(void)
396{
397 int error;
398
399 if (!efi_enabled(EFI_RUNTIME_SERVICES))
400 efi.runtime_supported_mask = 0;
401
402 if (!efi_enabled(EFI_BOOT))
403 return 0;
404
405 if (efi.runtime_supported_mask) {
406 /*
407 * Since we process only one efi_runtime_service() at a time, an
408 * ordered workqueue (which creates only one execution context)
409 * should suffice for all our needs.
410 */
411 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
412 if (!efi_rts_wq) {
413 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
414 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
415 efi.runtime_supported_mask = 0;
416 return 0;
417 }
418 }
419
420 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
421 platform_device_register_simple("rtc-efi", 0, NULL, 0);
422
423 /* We register the efi directory at /sys/firmware/efi */
424 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
425 if (!efi_kobj) {
426 pr_err("efi: Firmware registration failed.\n");
427 error = -ENOMEM;
428 goto err_destroy_wq;
429 }
430
431 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
432 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
433 error = generic_ops_register();
434 if (error)
435 goto err_put;
436 efivar_ssdt_load();
437 platform_device_register_simple("efivars", 0, NULL, 0);
438 }
439
440 BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
441
442 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
443 if (error) {
444 pr_err("efi: Sysfs attribute export failed with error %d.\n",
445 error);
446 goto err_unregister;
447 }
448
449 /* and the standard mountpoint for efivarfs */
450 error = sysfs_create_mount_point(efi_kobj, "efivars");
451 if (error) {
452 pr_err("efivars: Subsystem registration failed.\n");
453 goto err_remove_group;
454 }
455
456 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
457 efi_debugfs_init();
458
459#ifdef CONFIG_EFI_COCO_SECRET
460 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
461 platform_device_register_simple("efi_secret", 0, NULL, 0);
462#endif
463
464 return 0;
465
466err_remove_group:
467 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
468err_unregister:
469 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
470 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
471 generic_ops_unregister();
472err_put:
473 kobject_put(efi_kobj);
474 efi_kobj = NULL;
475err_destroy_wq:
476 if (efi_rts_wq)
477 destroy_workqueue(efi_rts_wq);
478
479 return error;
480}
481
482subsys_initcall(efisubsys_init);
483
484void __init efi_find_mirror(void)
485{
486 efi_memory_desc_t *md;
487 u64 mirror_size = 0, total_size = 0;
488
489 if (!efi_enabled(EFI_MEMMAP))
490 return;
491
492 for_each_efi_memory_desc(md) {
493 unsigned long long start = md->phys_addr;
494 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
495
496 total_size += size;
497 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
498 memblock_mark_mirror(start, size);
499 mirror_size += size;
500 }
501 }
502 if (mirror_size)
503 pr_info("Memory: %lldM/%lldM mirrored memory\n",
504 mirror_size>>20, total_size>>20);
505}
506
507/*
508 * Find the efi memory descriptor for a given physical address. Given a
509 * physical address, determine if it exists within an EFI Memory Map entry,
510 * and if so, populate the supplied memory descriptor with the appropriate
511 * data.
512 */
513int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
514{
515 efi_memory_desc_t *md;
516
517 if (!efi_enabled(EFI_MEMMAP)) {
518 pr_err_once("EFI_MEMMAP is not enabled.\n");
519 return -EINVAL;
520 }
521
522 if (!out_md) {
523 pr_err_once("out_md is null.\n");
524 return -EINVAL;
525 }
526
527 for_each_efi_memory_desc(md) {
528 u64 size;
529 u64 end;
530
531 /* skip bogus entries (including empty ones) */
532 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
533 (md->num_pages <= 0) ||
534 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
535 continue;
536
537 size = md->num_pages << EFI_PAGE_SHIFT;
538 end = md->phys_addr + size;
539 if (phys_addr >= md->phys_addr && phys_addr < end) {
540 memcpy(out_md, md, sizeof(*out_md));
541 return 0;
542 }
543 }
544 return -ENOENT;
545}
546
547extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
548 __weak __alias(__efi_mem_desc_lookup);
549
550/*
551 * Calculate the highest address of an efi memory descriptor.
552 */
553u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
554{
555 u64 size = md->num_pages << EFI_PAGE_SHIFT;
556 u64 end = md->phys_addr + size;
557 return end;
558}
559
560void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
561
562/**
563 * efi_mem_reserve - Reserve an EFI memory region
564 * @addr: Physical address to reserve
565 * @size: Size of reservation
566 *
567 * Mark a region as reserved from general kernel allocation and
568 * prevent it being released by efi_free_boot_services().
569 *
570 * This function should be called drivers once they've parsed EFI
571 * configuration tables to figure out where their data lives, e.g.
572 * efi_esrt_init().
573 */
574void __init efi_mem_reserve(phys_addr_t addr, u64 size)
575{
576 /* efi_mem_reserve() does not work under Xen */
577 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
578 return;
579
580 if (!memblock_is_region_reserved(addr, size))
581 memblock_reserve(addr, size);
582
583 /*
584 * Some architectures (x86) reserve all boot services ranges
585 * until efi_free_boot_services() because of buggy firmware
586 * implementations. This means the above memblock_reserve() is
587 * superfluous on x86 and instead what it needs to do is
588 * ensure the @start, @size is not freed.
589 */
590 efi_arch_mem_reserve(addr, size);
591}
592
593static const efi_config_table_type_t common_tables[] __initconst = {
594 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
595 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
596 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
597 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
598 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
599 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
600 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
601 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
602 {EFI_TCG2_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "TPMFinalLog" },
603 {EFI_CC_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "CCFinalLog" },
604 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
605 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
606 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
607#ifdef CONFIG_EFI_RCI2_TABLE
608 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
609#endif
610#ifdef CONFIG_LOAD_UEFI_KEYS
611 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
612#endif
613#ifdef CONFIG_EFI_COCO_SECRET
614 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
615#endif
616#ifdef CONFIG_UNACCEPTED_MEMORY
617 {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" },
618#endif
619#ifdef CONFIG_EFI_GENERIC_STUB
620 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table },
621#endif
622 {},
623};
624
625static __init int match_config_table(const efi_guid_t *guid,
626 unsigned long table,
627 const efi_config_table_type_t *table_types)
628{
629 int i;
630
631 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
632 if (efi_guidcmp(*guid, table_types[i].guid))
633 continue;
634
635 if (!efi_config_table_is_usable(guid, table)) {
636 if (table_types[i].name[0])
637 pr_cont("(%s=0x%lx unusable) ",
638 table_types[i].name, table);
639 return 1;
640 }
641
642 *(table_types[i].ptr) = table;
643 if (table_types[i].name[0])
644 pr_cont("%s=0x%lx ", table_types[i].name, table);
645 return 1;
646 }
647
648 return 0;
649}
650
651/**
652 * reserve_unaccepted - Map and reserve unaccepted configuration table
653 * @unaccepted: Pointer to unaccepted memory table
654 *
655 * memblock_add() makes sure that the table is mapped in direct mapping. During
656 * normal boot it happens automatically because the table is allocated from
657 * usable memory. But during crashkernel boot only memory specifically reserved
658 * for crash scenario is mapped. memblock_add() forces the table to be mapped
659 * in crashkernel case.
660 *
661 * Align the range to the nearest page borders. Ranges smaller than page size
662 * are not going to be mapped.
663 *
664 * memblock_reserve() makes sure that future allocations will not touch the
665 * table.
666 */
667
668static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
669{
670 phys_addr_t start, size;
671
672 start = PAGE_ALIGN_DOWN(efi.unaccepted);
673 size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
674
675 memblock_add(start, size);
676 memblock_reserve(start, size);
677}
678
679int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
680 int count,
681 const efi_config_table_type_t *arch_tables)
682{
683 const efi_config_table_64_t *tbl64 = (void *)config_tables;
684 const efi_config_table_32_t *tbl32 = (void *)config_tables;
685 const efi_guid_t *guid;
686 unsigned long table;
687 int i;
688
689 pr_info("");
690 for (i = 0; i < count; i++) {
691 if (!IS_ENABLED(CONFIG_X86)) {
692 guid = &config_tables[i].guid;
693 table = (unsigned long)config_tables[i].table;
694 } else if (efi_enabled(EFI_64BIT)) {
695 guid = &tbl64[i].guid;
696 table = tbl64[i].table;
697
698 if (IS_ENABLED(CONFIG_X86_32) &&
699 tbl64[i].table > U32_MAX) {
700 pr_cont("\n");
701 pr_err("Table located above 4GB, disabling EFI.\n");
702 return -EINVAL;
703 }
704 } else {
705 guid = &tbl32[i].guid;
706 table = tbl32[i].table;
707 }
708
709 if (!match_config_table(guid, table, common_tables) && arch_tables)
710 match_config_table(guid, table, arch_tables);
711 }
712 pr_cont("\n");
713 set_bit(EFI_CONFIG_TABLES, &efi.flags);
714
715 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
716 struct linux_efi_random_seed *seed;
717 u32 size = 0;
718
719 seed = early_memremap(efi_rng_seed, sizeof(*seed));
720 if (seed != NULL) {
721 size = min_t(u32, seed->size, SZ_1K); // sanity check
722 early_memunmap(seed, sizeof(*seed));
723 } else {
724 pr_err("Could not map UEFI random seed!\n");
725 }
726 if (size > 0) {
727 seed = early_memremap(efi_rng_seed,
728 sizeof(*seed) + size);
729 if (seed != NULL) {
730 add_bootloader_randomness(seed->bits, size);
731 memzero_explicit(seed->bits, size);
732 early_memunmap(seed, sizeof(*seed) + size);
733 } else {
734 pr_err("Could not map UEFI random seed!\n");
735 }
736 }
737 }
738
739 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
740 efi_memattr_init();
741
742 efi_tpm_eventlog_init();
743
744 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
745 unsigned long prsv = mem_reserve;
746
747 while (prsv) {
748 struct linux_efi_memreserve *rsv;
749 u8 *p;
750
751 /*
752 * Just map a full page: that is what we will get
753 * anyway, and it permits us to map the entire entry
754 * before knowing its size.
755 */
756 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
757 PAGE_SIZE);
758 if (p == NULL) {
759 pr_err("Could not map UEFI memreserve entry!\n");
760 return -ENOMEM;
761 }
762
763 rsv = (void *)(p + prsv % PAGE_SIZE);
764
765 /* reserve the entry itself */
766 memblock_reserve(prsv,
767 struct_size(rsv, entry, rsv->size));
768
769 for (i = 0; i < atomic_read(&rsv->count); i++) {
770 memblock_reserve(rsv->entry[i].base,
771 rsv->entry[i].size);
772 }
773
774 prsv = rsv->next;
775 early_memunmap(p, PAGE_SIZE);
776 }
777 }
778
779 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
780 efi_rt_properties_table_t *tbl;
781
782 tbl = early_memremap(rt_prop, sizeof(*tbl));
783 if (tbl) {
784 efi.runtime_supported_mask &= tbl->runtime_services_supported;
785 early_memunmap(tbl, sizeof(*tbl));
786 }
787 }
788
789 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
790 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
791 struct linux_efi_initrd *tbl;
792
793 tbl = early_memremap(initrd, sizeof(*tbl));
794 if (tbl) {
795 phys_initrd_start = tbl->base;
796 phys_initrd_size = tbl->size;
797 early_memunmap(tbl, sizeof(*tbl));
798 }
799 }
800
801 if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
802 efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
803 struct efi_unaccepted_memory *unaccepted;
804
805 unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
806 if (unaccepted) {
807
808 if (unaccepted->version == 1) {
809 reserve_unaccepted(unaccepted);
810 } else {
811 efi.unaccepted = EFI_INVALID_TABLE_ADDR;
812 }
813
814 early_memunmap(unaccepted, sizeof(*unaccepted));
815 }
816 }
817
818 return 0;
819}
820
821int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
822{
823 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
824 pr_err("System table signature incorrect!\n");
825 return -EINVAL;
826 }
827
828 return 0;
829}
830
831static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
832 size_t size)
833{
834 const efi_char16_t *ret;
835
836 ret = early_memremap_ro(fw_vendor, size);
837 if (!ret)
838 pr_err("Could not map the firmware vendor!\n");
839 return ret;
840}
841
842static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
843{
844 early_memunmap((void *)fw_vendor, size);
845}
846
847void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
848 unsigned long fw_vendor)
849{
850 char vendor[100] = "unknown";
851 const efi_char16_t *c16;
852 size_t i;
853 u16 rev;
854
855 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
856 if (c16) {
857 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
858 vendor[i] = c16[i];
859 vendor[i] = '\0';
860
861 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
862 }
863
864 rev = (u16)systab_hdr->revision;
865 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
866
867 rev %= 10;
868 if (rev)
869 pr_cont(".%u", rev);
870
871 pr_cont(" by %s\n", vendor);
872
873 if (IS_ENABLED(CONFIG_X86_64) &&
874 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
875 !strcmp(vendor, "Apple")) {
876 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
877 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
878 }
879}
880
881static __initdata char memory_type_name[][13] = {
882 "Reserved",
883 "Loader Code",
884 "Loader Data",
885 "Boot Code",
886 "Boot Data",
887 "Runtime Code",
888 "Runtime Data",
889 "Conventional",
890 "Unusable",
891 "ACPI Reclaim",
892 "ACPI Mem NVS",
893 "MMIO",
894 "MMIO Port",
895 "PAL Code",
896 "Persistent",
897 "Unaccepted",
898};
899
900char * __init efi_md_typeattr_format(char *buf, size_t size,
901 const efi_memory_desc_t *md)
902{
903 char *pos;
904 int type_len;
905 u64 attr;
906
907 pos = buf;
908 if (md->type >= ARRAY_SIZE(memory_type_name))
909 type_len = snprintf(pos, size, "[type=%u", md->type);
910 else
911 type_len = snprintf(pos, size, "[%-*s",
912 (int)(sizeof(memory_type_name[0]) - 1),
913 memory_type_name[md->type]);
914 if (type_len >= size)
915 return buf;
916
917 pos += type_len;
918 size -= type_len;
919
920 attr = md->attribute;
921 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
922 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
923 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
924 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
925 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
926 snprintf(pos, size, "|attr=0x%016llx]",
927 (unsigned long long)attr);
928 else
929 snprintf(pos, size,
930 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
931 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
932 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
933 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
934 attr & EFI_MEMORY_SP ? "SP" : "",
935 attr & EFI_MEMORY_NV ? "NV" : "",
936 attr & EFI_MEMORY_XP ? "XP" : "",
937 attr & EFI_MEMORY_RP ? "RP" : "",
938 attr & EFI_MEMORY_WP ? "WP" : "",
939 attr & EFI_MEMORY_RO ? "RO" : "",
940 attr & EFI_MEMORY_UCE ? "UCE" : "",
941 attr & EFI_MEMORY_WB ? "WB" : "",
942 attr & EFI_MEMORY_WT ? "WT" : "",
943 attr & EFI_MEMORY_WC ? "WC" : "",
944 attr & EFI_MEMORY_UC ? "UC" : "");
945 return buf;
946}
947
948/*
949 * efi_mem_attributes - lookup memmap attributes for physical address
950 * @phys_addr: the physical address to lookup
951 *
952 * Search in the EFI memory map for the region covering
953 * @phys_addr. Returns the EFI memory attributes if the region
954 * was found in the memory map, 0 otherwise.
955 */
956u64 efi_mem_attributes(unsigned long phys_addr)
957{
958 efi_memory_desc_t *md;
959
960 if (!efi_enabled(EFI_MEMMAP))
961 return 0;
962
963 for_each_efi_memory_desc(md) {
964 if ((md->phys_addr <= phys_addr) &&
965 (phys_addr < (md->phys_addr +
966 (md->num_pages << EFI_PAGE_SHIFT))))
967 return md->attribute;
968 }
969 return 0;
970}
971
972/*
973 * efi_mem_type - lookup memmap type for physical address
974 * @phys_addr: the physical address to lookup
975 *
976 * Search in the EFI memory map for the region covering @phys_addr.
977 * Returns the EFI memory type if the region was found in the memory
978 * map, -EINVAL otherwise.
979 */
980int efi_mem_type(unsigned long phys_addr)
981{
982 const efi_memory_desc_t *md;
983
984 if (!efi_enabled(EFI_MEMMAP))
985 return -ENOTSUPP;
986
987 for_each_efi_memory_desc(md) {
988 if ((md->phys_addr <= phys_addr) &&
989 (phys_addr < (md->phys_addr +
990 (md->num_pages << EFI_PAGE_SHIFT))))
991 return md->type;
992 }
993 return -EINVAL;
994}
995
996int efi_status_to_err(efi_status_t status)
997{
998 int err;
999
1000 switch (status) {
1001 case EFI_SUCCESS:
1002 err = 0;
1003 break;
1004 case EFI_INVALID_PARAMETER:
1005 err = -EINVAL;
1006 break;
1007 case EFI_OUT_OF_RESOURCES:
1008 err = -ENOSPC;
1009 break;
1010 case EFI_DEVICE_ERROR:
1011 err = -EIO;
1012 break;
1013 case EFI_WRITE_PROTECTED:
1014 err = -EROFS;
1015 break;
1016 case EFI_SECURITY_VIOLATION:
1017 err = -EACCES;
1018 break;
1019 case EFI_NOT_FOUND:
1020 err = -ENOENT;
1021 break;
1022 case EFI_ABORTED:
1023 err = -EINTR;
1024 break;
1025 default:
1026 err = -EINVAL;
1027 }
1028
1029 return err;
1030}
1031EXPORT_SYMBOL_GPL(efi_status_to_err);
1032
1033static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1034static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1035
1036static int __init efi_memreserve_map_root(void)
1037{
1038 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1039 return -ENODEV;
1040
1041 efi_memreserve_root = memremap(mem_reserve,
1042 sizeof(*efi_memreserve_root),
1043 MEMREMAP_WB);
1044 if (WARN_ON_ONCE(!efi_memreserve_root))
1045 return -ENOMEM;
1046 return 0;
1047}
1048
1049static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1050{
1051 struct resource *res, *parent;
1052 int ret;
1053
1054 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1055 if (!res)
1056 return -ENOMEM;
1057
1058 res->name = "reserved";
1059 res->flags = IORESOURCE_MEM;
1060 res->start = addr;
1061 res->end = addr + size - 1;
1062
1063 /* we expect a conflict with a 'System RAM' region */
1064 parent = request_resource_conflict(&iomem_resource, res);
1065 ret = parent ? request_resource(parent, res) : 0;
1066
1067 /*
1068 * Given that efi_mem_reserve_iomem() can be called at any
1069 * time, only call memblock_reserve() if the architecture
1070 * keeps the infrastructure around.
1071 */
1072 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1073 memblock_reserve(addr, size);
1074
1075 return ret;
1076}
1077
1078int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1079{
1080 struct linux_efi_memreserve *rsv;
1081 unsigned long prsv;
1082 int rc, index;
1083
1084 if (efi_memreserve_root == (void *)ULONG_MAX)
1085 return -ENODEV;
1086
1087 if (!efi_memreserve_root) {
1088 rc = efi_memreserve_map_root();
1089 if (rc)
1090 return rc;
1091 }
1092
1093 /* first try to find a slot in an existing linked list entry */
1094 for (prsv = efi_memreserve_root->next; prsv; ) {
1095 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1096 if (!rsv)
1097 return -ENOMEM;
1098 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1099 if (index < rsv->size) {
1100 rsv->entry[index].base = addr;
1101 rsv->entry[index].size = size;
1102
1103 memunmap(rsv);
1104 return efi_mem_reserve_iomem(addr, size);
1105 }
1106 prsv = rsv->next;
1107 memunmap(rsv);
1108 }
1109
1110 /* no slot found - allocate a new linked list entry */
1111 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1112 if (!rsv)
1113 return -ENOMEM;
1114
1115 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1116 if (rc) {
1117 free_page((unsigned long)rsv);
1118 return rc;
1119 }
1120
1121 /*
1122 * The memremap() call above assumes that a linux_efi_memreserve entry
1123 * never crosses a page boundary, so let's ensure that this remains true
1124 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1125 * using SZ_4K explicitly in the size calculation below.
1126 */
1127 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1128 atomic_set(&rsv->count, 1);
1129 rsv->entry[0].base = addr;
1130 rsv->entry[0].size = size;
1131
1132 spin_lock(&efi_mem_reserve_persistent_lock);
1133 rsv->next = efi_memreserve_root->next;
1134 efi_memreserve_root->next = __pa(rsv);
1135 spin_unlock(&efi_mem_reserve_persistent_lock);
1136
1137 return efi_mem_reserve_iomem(addr, size);
1138}
1139
1140static int __init efi_memreserve_root_init(void)
1141{
1142 if (efi_memreserve_root)
1143 return 0;
1144 if (efi_memreserve_map_root())
1145 efi_memreserve_root = (void *)ULONG_MAX;
1146 return 0;
1147}
1148early_initcall(efi_memreserve_root_init);
1149
1150#ifdef CONFIG_KEXEC
1151static int update_efi_random_seed(struct notifier_block *nb,
1152 unsigned long code, void *unused)
1153{
1154 struct linux_efi_random_seed *seed;
1155 u32 size = 0;
1156
1157 if (!kexec_in_progress)
1158 return NOTIFY_DONE;
1159
1160 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1161 if (seed != NULL) {
1162 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1163 memunmap(seed);
1164 } else {
1165 pr_err("Could not map UEFI random seed!\n");
1166 }
1167 if (size > 0) {
1168 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1169 MEMREMAP_WB);
1170 if (seed != NULL) {
1171 seed->size = size;
1172 get_random_bytes(seed->bits, seed->size);
1173 memunmap(seed);
1174 } else {
1175 pr_err("Could not map UEFI random seed!\n");
1176 }
1177 }
1178 return NOTIFY_DONE;
1179}
1180
1181static struct notifier_block efi_random_seed_nb = {
1182 .notifier_call = update_efi_random_seed,
1183};
1184
1185static int __init register_update_efi_random_seed(void)
1186{
1187 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1188 return 0;
1189 return register_reboot_notifier(&efi_random_seed_nb);
1190}
1191late_initcall(register_update_efi_random_seed);
1192#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kobject.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/efi.h>
23#include <linux/of.h>
24#include <linux/initrd.h>
25#include <linux/io.h>
26#include <linux/kexec.h>
27#include <linux/platform_device.h>
28#include <linux/random.h>
29#include <linux/reboot.h>
30#include <linux/slab.h>
31#include <linux/acpi.h>
32#include <linux/ucs2_string.h>
33#include <linux/memblock.h>
34#include <linux/security.h>
35
36#include <asm/early_ioremap.h>
37
38struct efi __read_mostly efi = {
39 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
40 .acpi = EFI_INVALID_TABLE_ADDR,
41 .acpi20 = EFI_INVALID_TABLE_ADDR,
42 .smbios = EFI_INVALID_TABLE_ADDR,
43 .smbios3 = EFI_INVALID_TABLE_ADDR,
44 .esrt = EFI_INVALID_TABLE_ADDR,
45 .tpm_log = EFI_INVALID_TABLE_ADDR,
46 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
47#ifdef CONFIG_LOAD_UEFI_KEYS
48 .mokvar_table = EFI_INVALID_TABLE_ADDR,
49#endif
50#ifdef CONFIG_EFI_COCO_SECRET
51 .coco_secret = EFI_INVALID_TABLE_ADDR,
52#endif
53};
54EXPORT_SYMBOL(efi);
55
56unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
57static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
58static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
59static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
60
61extern unsigned long screen_info_table;
62
63struct mm_struct efi_mm = {
64 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
65 .mm_users = ATOMIC_INIT(2),
66 .mm_count = ATOMIC_INIT(1),
67 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
68 MMAP_LOCK_INITIALIZER(efi_mm)
69 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
70 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
71 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
72};
73
74struct workqueue_struct *efi_rts_wq;
75
76static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
77static int __init setup_noefi(char *arg)
78{
79 disable_runtime = true;
80 return 0;
81}
82early_param("noefi", setup_noefi);
83
84bool efi_runtime_disabled(void)
85{
86 return disable_runtime;
87}
88
89bool __pure __efi_soft_reserve_enabled(void)
90{
91 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
92}
93
94static int __init parse_efi_cmdline(char *str)
95{
96 if (!str) {
97 pr_warn("need at least one option\n");
98 return -EINVAL;
99 }
100
101 if (parse_option_str(str, "debug"))
102 set_bit(EFI_DBG, &efi.flags);
103
104 if (parse_option_str(str, "noruntime"))
105 disable_runtime = true;
106
107 if (parse_option_str(str, "runtime"))
108 disable_runtime = false;
109
110 if (parse_option_str(str, "nosoftreserve"))
111 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
112
113 return 0;
114}
115early_param("efi", parse_efi_cmdline);
116
117struct kobject *efi_kobj;
118
119/*
120 * Let's not leave out systab information that snuck into
121 * the efivars driver
122 * Note, do not add more fields in systab sysfs file as it breaks sysfs
123 * one value per file rule!
124 */
125static ssize_t systab_show(struct kobject *kobj,
126 struct kobj_attribute *attr, char *buf)
127{
128 char *str = buf;
129
130 if (!kobj || !buf)
131 return -EINVAL;
132
133 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
134 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
135 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
136 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
137 /*
138 * If both SMBIOS and SMBIOS3 entry points are implemented, the
139 * SMBIOS3 entry point shall be preferred, so we list it first to
140 * let applications stop parsing after the first match.
141 */
142 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
143 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
144 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
145 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
146
147 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
148 str = efi_systab_show_arch(str);
149
150 return str - buf;
151}
152
153static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
154
155static ssize_t fw_platform_size_show(struct kobject *kobj,
156 struct kobj_attribute *attr, char *buf)
157{
158 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
159}
160
161extern __weak struct kobj_attribute efi_attr_fw_vendor;
162extern __weak struct kobj_attribute efi_attr_runtime;
163extern __weak struct kobj_attribute efi_attr_config_table;
164static struct kobj_attribute efi_attr_fw_platform_size =
165 __ATTR_RO(fw_platform_size);
166
167static struct attribute *efi_subsys_attrs[] = {
168 &efi_attr_systab.attr,
169 &efi_attr_fw_platform_size.attr,
170 &efi_attr_fw_vendor.attr,
171 &efi_attr_runtime.attr,
172 &efi_attr_config_table.attr,
173 NULL,
174};
175
176umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
177 int n)
178{
179 return attr->mode;
180}
181
182static const struct attribute_group efi_subsys_attr_group = {
183 .attrs = efi_subsys_attrs,
184 .is_visible = efi_attr_is_visible,
185};
186
187static struct efivars generic_efivars;
188static struct efivar_operations generic_ops;
189
190static int generic_ops_register(void)
191{
192 generic_ops.get_variable = efi.get_variable;
193 generic_ops.get_next_variable = efi.get_next_variable;
194 generic_ops.query_variable_store = efi_query_variable_store;
195
196 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
197 generic_ops.set_variable = efi.set_variable;
198 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
199 }
200 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
201}
202
203static void generic_ops_unregister(void)
204{
205 efivars_unregister(&generic_efivars);
206}
207
208#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
209#define EFIVAR_SSDT_NAME_MAX 16UL
210static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
211static int __init efivar_ssdt_setup(char *str)
212{
213 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
214
215 if (ret)
216 return ret;
217
218 if (strlen(str) < sizeof(efivar_ssdt))
219 memcpy(efivar_ssdt, str, strlen(str));
220 else
221 pr_warn("efivar_ssdt: name too long: %s\n", str);
222 return 1;
223}
224__setup("efivar_ssdt=", efivar_ssdt_setup);
225
226static __init int efivar_ssdt_load(void)
227{
228 unsigned long name_size = 256;
229 efi_char16_t *name = NULL;
230 efi_status_t status;
231 efi_guid_t guid;
232
233 if (!efivar_ssdt[0])
234 return 0;
235
236 name = kzalloc(name_size, GFP_KERNEL);
237 if (!name)
238 return -ENOMEM;
239
240 for (;;) {
241 char utf8_name[EFIVAR_SSDT_NAME_MAX];
242 unsigned long data_size = 0;
243 void *data;
244 int limit;
245
246 status = efi.get_next_variable(&name_size, name, &guid);
247 if (status == EFI_NOT_FOUND) {
248 break;
249 } else if (status == EFI_BUFFER_TOO_SMALL) {
250 name = krealloc(name, name_size, GFP_KERNEL);
251 if (!name)
252 return -ENOMEM;
253 continue;
254 }
255
256 limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
257 ucs2_as_utf8(utf8_name, name, limit - 1);
258 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
259 continue;
260
261 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
262
263 status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
264 if (status != EFI_BUFFER_TOO_SMALL || !data_size)
265 return -EIO;
266
267 data = kmalloc(data_size, GFP_KERNEL);
268 if (!data)
269 return -ENOMEM;
270
271 status = efi.get_variable(name, &guid, NULL, &data_size, data);
272 if (status == EFI_SUCCESS) {
273 acpi_status ret = acpi_load_table(data, NULL);
274 if (ret)
275 pr_err("failed to load table: %u\n", ret);
276 else
277 continue;
278 } else {
279 pr_err("failed to get var data: 0x%lx\n", status);
280 }
281 kfree(data);
282 }
283 return 0;
284}
285#else
286static inline int efivar_ssdt_load(void) { return 0; }
287#endif
288
289#ifdef CONFIG_DEBUG_FS
290
291#define EFI_DEBUGFS_MAX_BLOBS 32
292
293static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
294
295static void __init efi_debugfs_init(void)
296{
297 struct dentry *efi_debugfs;
298 efi_memory_desc_t *md;
299 char name[32];
300 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
301 int i = 0;
302
303 efi_debugfs = debugfs_create_dir("efi", NULL);
304 if (IS_ERR_OR_NULL(efi_debugfs))
305 return;
306
307 for_each_efi_memory_desc(md) {
308 switch (md->type) {
309 case EFI_BOOT_SERVICES_CODE:
310 snprintf(name, sizeof(name), "boot_services_code%d",
311 type_count[md->type]++);
312 break;
313 case EFI_BOOT_SERVICES_DATA:
314 snprintf(name, sizeof(name), "boot_services_data%d",
315 type_count[md->type]++);
316 break;
317 default:
318 continue;
319 }
320
321 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
322 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
323 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
324 break;
325 }
326
327 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
328 debugfs_blob[i].data = memremap(md->phys_addr,
329 debugfs_blob[i].size,
330 MEMREMAP_WB);
331 if (!debugfs_blob[i].data)
332 continue;
333
334 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
335 i++;
336 }
337}
338#else
339static inline void efi_debugfs_init(void) {}
340#endif
341
342static void refresh_nv_rng_seed(struct work_struct *work)
343{
344 u8 seed[EFI_RANDOM_SEED_SIZE];
345
346 get_random_bytes(seed, sizeof(seed));
347 efi.set_variable(L"RandomSeed", &LINUX_EFI_RANDOM_SEED_TABLE_GUID,
348 EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS |
349 EFI_VARIABLE_RUNTIME_ACCESS, sizeof(seed), seed);
350 memzero_explicit(seed, sizeof(seed));
351}
352static int refresh_nv_rng_seed_notification(struct notifier_block *nb, unsigned long action, void *data)
353{
354 static DECLARE_WORK(work, refresh_nv_rng_seed);
355 schedule_work(&work);
356 return NOTIFY_DONE;
357}
358static struct notifier_block refresh_nv_rng_seed_nb = { .notifier_call = refresh_nv_rng_seed_notification };
359
360/*
361 * We register the efi subsystem with the firmware subsystem and the
362 * efivars subsystem with the efi subsystem, if the system was booted with
363 * EFI.
364 */
365static int __init efisubsys_init(void)
366{
367 int error;
368
369 if (!efi_enabled(EFI_RUNTIME_SERVICES))
370 efi.runtime_supported_mask = 0;
371
372 if (!efi_enabled(EFI_BOOT))
373 return 0;
374
375 if (efi.runtime_supported_mask) {
376 /*
377 * Since we process only one efi_runtime_service() at a time, an
378 * ordered workqueue (which creates only one execution context)
379 * should suffice for all our needs.
380 */
381 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
382 if (!efi_rts_wq) {
383 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
384 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
385 efi.runtime_supported_mask = 0;
386 return 0;
387 }
388 }
389
390 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
391 platform_device_register_simple("rtc-efi", 0, NULL, 0);
392
393 /* We register the efi directory at /sys/firmware/efi */
394 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
395 if (!efi_kobj) {
396 pr_err("efi: Firmware registration failed.\n");
397 error = -ENOMEM;
398 goto err_destroy_wq;
399 }
400
401 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
402 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
403 error = generic_ops_register();
404 if (error)
405 goto err_put;
406 efivar_ssdt_load();
407 platform_device_register_simple("efivars", 0, NULL, 0);
408 }
409
410 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
411 if (error) {
412 pr_err("efi: Sysfs attribute export failed with error %d.\n",
413 error);
414 goto err_unregister;
415 }
416
417 /* and the standard mountpoint for efivarfs */
418 error = sysfs_create_mount_point(efi_kobj, "efivars");
419 if (error) {
420 pr_err("efivars: Subsystem registration failed.\n");
421 goto err_remove_group;
422 }
423
424 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
425 efi_debugfs_init();
426
427#ifdef CONFIG_EFI_COCO_SECRET
428 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
429 platform_device_register_simple("efi_secret", 0, NULL, 0);
430#endif
431
432 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
433 execute_with_initialized_rng(&refresh_nv_rng_seed_nb);
434
435 return 0;
436
437err_remove_group:
438 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
439err_unregister:
440 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
441 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
442 generic_ops_unregister();
443err_put:
444 kobject_put(efi_kobj);
445 efi_kobj = NULL;
446err_destroy_wq:
447 if (efi_rts_wq)
448 destroy_workqueue(efi_rts_wq);
449
450 return error;
451}
452
453subsys_initcall(efisubsys_init);
454
455void __init efi_find_mirror(void)
456{
457 efi_memory_desc_t *md;
458 u64 mirror_size = 0, total_size = 0;
459
460 if (!efi_enabled(EFI_MEMMAP))
461 return;
462
463 for_each_efi_memory_desc(md) {
464 unsigned long long start = md->phys_addr;
465 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
466
467 total_size += size;
468 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
469 memblock_mark_mirror(start, size);
470 mirror_size += size;
471 }
472 }
473 if (mirror_size)
474 pr_info("Memory: %lldM/%lldM mirrored memory\n",
475 mirror_size>>20, total_size>>20);
476}
477
478/*
479 * Find the efi memory descriptor for a given physical address. Given a
480 * physical address, determine if it exists within an EFI Memory Map entry,
481 * and if so, populate the supplied memory descriptor with the appropriate
482 * data.
483 */
484int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
485{
486 efi_memory_desc_t *md;
487
488 if (!efi_enabled(EFI_MEMMAP)) {
489 pr_err_once("EFI_MEMMAP is not enabled.\n");
490 return -EINVAL;
491 }
492
493 if (!out_md) {
494 pr_err_once("out_md is null.\n");
495 return -EINVAL;
496 }
497
498 for_each_efi_memory_desc(md) {
499 u64 size;
500 u64 end;
501
502 size = md->num_pages << EFI_PAGE_SHIFT;
503 end = md->phys_addr + size;
504 if (phys_addr >= md->phys_addr && phys_addr < end) {
505 memcpy(out_md, md, sizeof(*out_md));
506 return 0;
507 }
508 }
509 return -ENOENT;
510}
511
512/*
513 * Calculate the highest address of an efi memory descriptor.
514 */
515u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
516{
517 u64 size = md->num_pages << EFI_PAGE_SHIFT;
518 u64 end = md->phys_addr + size;
519 return end;
520}
521
522void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
523
524/**
525 * efi_mem_reserve - Reserve an EFI memory region
526 * @addr: Physical address to reserve
527 * @size: Size of reservation
528 *
529 * Mark a region as reserved from general kernel allocation and
530 * prevent it being released by efi_free_boot_services().
531 *
532 * This function should be called drivers once they've parsed EFI
533 * configuration tables to figure out where their data lives, e.g.
534 * efi_esrt_init().
535 */
536void __init efi_mem_reserve(phys_addr_t addr, u64 size)
537{
538 if (!memblock_is_region_reserved(addr, size))
539 memblock_reserve(addr, size);
540
541 /*
542 * Some architectures (x86) reserve all boot services ranges
543 * until efi_free_boot_services() because of buggy firmware
544 * implementations. This means the above memblock_reserve() is
545 * superfluous on x86 and instead what it needs to do is
546 * ensure the @start, @size is not freed.
547 */
548 efi_arch_mem_reserve(addr, size);
549}
550
551static const efi_config_table_type_t common_tables[] __initconst = {
552 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
553 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
554 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
555 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
556 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
557 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
558 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
559 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
560 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
561 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
562 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
563 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
564#ifdef CONFIG_EFI_RCI2_TABLE
565 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
566#endif
567#ifdef CONFIG_LOAD_UEFI_KEYS
568 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
569#endif
570#ifdef CONFIG_EFI_COCO_SECRET
571 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
572#endif
573#ifdef CONFIG_EFI_GENERIC_STUB
574 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table },
575#endif
576 {},
577};
578
579static __init int match_config_table(const efi_guid_t *guid,
580 unsigned long table,
581 const efi_config_table_type_t *table_types)
582{
583 int i;
584
585 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
586 if (!efi_guidcmp(*guid, table_types[i].guid)) {
587 *(table_types[i].ptr) = table;
588 if (table_types[i].name[0])
589 pr_cont("%s=0x%lx ",
590 table_types[i].name, table);
591 return 1;
592 }
593 }
594
595 return 0;
596}
597
598int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
599 int count,
600 const efi_config_table_type_t *arch_tables)
601{
602 const efi_config_table_64_t *tbl64 = (void *)config_tables;
603 const efi_config_table_32_t *tbl32 = (void *)config_tables;
604 const efi_guid_t *guid;
605 unsigned long table;
606 int i;
607
608 pr_info("");
609 for (i = 0; i < count; i++) {
610 if (!IS_ENABLED(CONFIG_X86)) {
611 guid = &config_tables[i].guid;
612 table = (unsigned long)config_tables[i].table;
613 } else if (efi_enabled(EFI_64BIT)) {
614 guid = &tbl64[i].guid;
615 table = tbl64[i].table;
616
617 if (IS_ENABLED(CONFIG_X86_32) &&
618 tbl64[i].table > U32_MAX) {
619 pr_cont("\n");
620 pr_err("Table located above 4GB, disabling EFI.\n");
621 return -EINVAL;
622 }
623 } else {
624 guid = &tbl32[i].guid;
625 table = tbl32[i].table;
626 }
627
628 if (!match_config_table(guid, table, common_tables) && arch_tables)
629 match_config_table(guid, table, arch_tables);
630 }
631 pr_cont("\n");
632 set_bit(EFI_CONFIG_TABLES, &efi.flags);
633
634 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
635 struct linux_efi_random_seed *seed;
636 u32 size = 0;
637
638 seed = early_memremap(efi_rng_seed, sizeof(*seed));
639 if (seed != NULL) {
640 size = min_t(u32, seed->size, SZ_1K); // sanity check
641 early_memunmap(seed, sizeof(*seed));
642 } else {
643 pr_err("Could not map UEFI random seed!\n");
644 }
645 if (size > 0) {
646 seed = early_memremap(efi_rng_seed,
647 sizeof(*seed) + size);
648 if (seed != NULL) {
649 add_bootloader_randomness(seed->bits, size);
650 memzero_explicit(seed->bits, size);
651 early_memunmap(seed, sizeof(*seed) + size);
652 } else {
653 pr_err("Could not map UEFI random seed!\n");
654 }
655 }
656 }
657
658 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
659 efi_memattr_init();
660
661 efi_tpm_eventlog_init();
662
663 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
664 unsigned long prsv = mem_reserve;
665
666 while (prsv) {
667 struct linux_efi_memreserve *rsv;
668 u8 *p;
669
670 /*
671 * Just map a full page: that is what we will get
672 * anyway, and it permits us to map the entire entry
673 * before knowing its size.
674 */
675 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
676 PAGE_SIZE);
677 if (p == NULL) {
678 pr_err("Could not map UEFI memreserve entry!\n");
679 return -ENOMEM;
680 }
681
682 rsv = (void *)(p + prsv % PAGE_SIZE);
683
684 /* reserve the entry itself */
685 memblock_reserve(prsv,
686 struct_size(rsv, entry, rsv->size));
687
688 for (i = 0; i < atomic_read(&rsv->count); i++) {
689 memblock_reserve(rsv->entry[i].base,
690 rsv->entry[i].size);
691 }
692
693 prsv = rsv->next;
694 early_memunmap(p, PAGE_SIZE);
695 }
696 }
697
698 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
699 efi_rt_properties_table_t *tbl;
700
701 tbl = early_memremap(rt_prop, sizeof(*tbl));
702 if (tbl) {
703 efi.runtime_supported_mask &= tbl->runtime_services_supported;
704 early_memunmap(tbl, sizeof(*tbl));
705 }
706 }
707
708 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
709 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
710 struct linux_efi_initrd *tbl;
711
712 tbl = early_memremap(initrd, sizeof(*tbl));
713 if (tbl) {
714 phys_initrd_start = tbl->base;
715 phys_initrd_size = tbl->size;
716 early_memunmap(tbl, sizeof(*tbl));
717 }
718 }
719
720 return 0;
721}
722
723int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
724 int min_major_version)
725{
726 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
727 pr_err("System table signature incorrect!\n");
728 return -EINVAL;
729 }
730
731 if ((systab_hdr->revision >> 16) < min_major_version)
732 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
733 systab_hdr->revision >> 16,
734 systab_hdr->revision & 0xffff,
735 min_major_version);
736
737 return 0;
738}
739
740#ifndef CONFIG_IA64
741static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
742 size_t size)
743{
744 const efi_char16_t *ret;
745
746 ret = early_memremap_ro(fw_vendor, size);
747 if (!ret)
748 pr_err("Could not map the firmware vendor!\n");
749 return ret;
750}
751
752static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
753{
754 early_memunmap((void *)fw_vendor, size);
755}
756#else
757#define map_fw_vendor(p, s) __va(p)
758#define unmap_fw_vendor(v, s)
759#endif
760
761void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
762 unsigned long fw_vendor)
763{
764 char vendor[100] = "unknown";
765 const efi_char16_t *c16;
766 size_t i;
767
768 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
769 if (c16) {
770 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
771 vendor[i] = c16[i];
772 vendor[i] = '\0';
773
774 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
775 }
776
777 pr_info("EFI v%u.%.02u by %s\n",
778 systab_hdr->revision >> 16,
779 systab_hdr->revision & 0xffff,
780 vendor);
781
782 if (IS_ENABLED(CONFIG_X86_64) &&
783 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
784 !strcmp(vendor, "Apple")) {
785 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
786 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
787 }
788}
789
790static __initdata char memory_type_name[][13] = {
791 "Reserved",
792 "Loader Code",
793 "Loader Data",
794 "Boot Code",
795 "Boot Data",
796 "Runtime Code",
797 "Runtime Data",
798 "Conventional",
799 "Unusable",
800 "ACPI Reclaim",
801 "ACPI Mem NVS",
802 "MMIO",
803 "MMIO Port",
804 "PAL Code",
805 "Persistent",
806};
807
808char * __init efi_md_typeattr_format(char *buf, size_t size,
809 const efi_memory_desc_t *md)
810{
811 char *pos;
812 int type_len;
813 u64 attr;
814
815 pos = buf;
816 if (md->type >= ARRAY_SIZE(memory_type_name))
817 type_len = snprintf(pos, size, "[type=%u", md->type);
818 else
819 type_len = snprintf(pos, size, "[%-*s",
820 (int)(sizeof(memory_type_name[0]) - 1),
821 memory_type_name[md->type]);
822 if (type_len >= size)
823 return buf;
824
825 pos += type_len;
826 size -= type_len;
827
828 attr = md->attribute;
829 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
830 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
831 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
832 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
833 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
834 snprintf(pos, size, "|attr=0x%016llx]",
835 (unsigned long long)attr);
836 else
837 snprintf(pos, size,
838 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
839 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
840 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
841 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
842 attr & EFI_MEMORY_SP ? "SP" : "",
843 attr & EFI_MEMORY_NV ? "NV" : "",
844 attr & EFI_MEMORY_XP ? "XP" : "",
845 attr & EFI_MEMORY_RP ? "RP" : "",
846 attr & EFI_MEMORY_WP ? "WP" : "",
847 attr & EFI_MEMORY_RO ? "RO" : "",
848 attr & EFI_MEMORY_UCE ? "UCE" : "",
849 attr & EFI_MEMORY_WB ? "WB" : "",
850 attr & EFI_MEMORY_WT ? "WT" : "",
851 attr & EFI_MEMORY_WC ? "WC" : "",
852 attr & EFI_MEMORY_UC ? "UC" : "");
853 return buf;
854}
855
856/*
857 * IA64 has a funky EFI memory map that doesn't work the same way as
858 * other architectures.
859 */
860#ifndef CONFIG_IA64
861/*
862 * efi_mem_attributes - lookup memmap attributes for physical address
863 * @phys_addr: the physical address to lookup
864 *
865 * Search in the EFI memory map for the region covering
866 * @phys_addr. Returns the EFI memory attributes if the region
867 * was found in the memory map, 0 otherwise.
868 */
869u64 efi_mem_attributes(unsigned long phys_addr)
870{
871 efi_memory_desc_t *md;
872
873 if (!efi_enabled(EFI_MEMMAP))
874 return 0;
875
876 for_each_efi_memory_desc(md) {
877 if ((md->phys_addr <= phys_addr) &&
878 (phys_addr < (md->phys_addr +
879 (md->num_pages << EFI_PAGE_SHIFT))))
880 return md->attribute;
881 }
882 return 0;
883}
884
885/*
886 * efi_mem_type - lookup memmap type for physical address
887 * @phys_addr: the physical address to lookup
888 *
889 * Search in the EFI memory map for the region covering @phys_addr.
890 * Returns the EFI memory type if the region was found in the memory
891 * map, -EINVAL otherwise.
892 */
893int efi_mem_type(unsigned long phys_addr)
894{
895 const efi_memory_desc_t *md;
896
897 if (!efi_enabled(EFI_MEMMAP))
898 return -ENOTSUPP;
899
900 for_each_efi_memory_desc(md) {
901 if ((md->phys_addr <= phys_addr) &&
902 (phys_addr < (md->phys_addr +
903 (md->num_pages << EFI_PAGE_SHIFT))))
904 return md->type;
905 }
906 return -EINVAL;
907}
908#endif
909
910int efi_status_to_err(efi_status_t status)
911{
912 int err;
913
914 switch (status) {
915 case EFI_SUCCESS:
916 err = 0;
917 break;
918 case EFI_INVALID_PARAMETER:
919 err = -EINVAL;
920 break;
921 case EFI_OUT_OF_RESOURCES:
922 err = -ENOSPC;
923 break;
924 case EFI_DEVICE_ERROR:
925 err = -EIO;
926 break;
927 case EFI_WRITE_PROTECTED:
928 err = -EROFS;
929 break;
930 case EFI_SECURITY_VIOLATION:
931 err = -EACCES;
932 break;
933 case EFI_NOT_FOUND:
934 err = -ENOENT;
935 break;
936 case EFI_ABORTED:
937 err = -EINTR;
938 break;
939 default:
940 err = -EINVAL;
941 }
942
943 return err;
944}
945EXPORT_SYMBOL_GPL(efi_status_to_err);
946
947static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
948static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
949
950static int __init efi_memreserve_map_root(void)
951{
952 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
953 return -ENODEV;
954
955 efi_memreserve_root = memremap(mem_reserve,
956 sizeof(*efi_memreserve_root),
957 MEMREMAP_WB);
958 if (WARN_ON_ONCE(!efi_memreserve_root))
959 return -ENOMEM;
960 return 0;
961}
962
963static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
964{
965 struct resource *res, *parent;
966 int ret;
967
968 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
969 if (!res)
970 return -ENOMEM;
971
972 res->name = "reserved";
973 res->flags = IORESOURCE_MEM;
974 res->start = addr;
975 res->end = addr + size - 1;
976
977 /* we expect a conflict with a 'System RAM' region */
978 parent = request_resource_conflict(&iomem_resource, res);
979 ret = parent ? request_resource(parent, res) : 0;
980
981 /*
982 * Given that efi_mem_reserve_iomem() can be called at any
983 * time, only call memblock_reserve() if the architecture
984 * keeps the infrastructure around.
985 */
986 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
987 memblock_reserve(addr, size);
988
989 return ret;
990}
991
992int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
993{
994 struct linux_efi_memreserve *rsv;
995 unsigned long prsv;
996 int rc, index;
997
998 if (efi_memreserve_root == (void *)ULONG_MAX)
999 return -ENODEV;
1000
1001 if (!efi_memreserve_root) {
1002 rc = efi_memreserve_map_root();
1003 if (rc)
1004 return rc;
1005 }
1006
1007 /* first try to find a slot in an existing linked list entry */
1008 for (prsv = efi_memreserve_root->next; prsv; ) {
1009 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1010 if (!rsv)
1011 return -ENOMEM;
1012 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1013 if (index < rsv->size) {
1014 rsv->entry[index].base = addr;
1015 rsv->entry[index].size = size;
1016
1017 memunmap(rsv);
1018 return efi_mem_reserve_iomem(addr, size);
1019 }
1020 prsv = rsv->next;
1021 memunmap(rsv);
1022 }
1023
1024 /* no slot found - allocate a new linked list entry */
1025 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1026 if (!rsv)
1027 return -ENOMEM;
1028
1029 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1030 if (rc) {
1031 free_page((unsigned long)rsv);
1032 return rc;
1033 }
1034
1035 /*
1036 * The memremap() call above assumes that a linux_efi_memreserve entry
1037 * never crosses a page boundary, so let's ensure that this remains true
1038 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1039 * using SZ_4K explicitly in the size calculation below.
1040 */
1041 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1042 atomic_set(&rsv->count, 1);
1043 rsv->entry[0].base = addr;
1044 rsv->entry[0].size = size;
1045
1046 spin_lock(&efi_mem_reserve_persistent_lock);
1047 rsv->next = efi_memreserve_root->next;
1048 efi_memreserve_root->next = __pa(rsv);
1049 spin_unlock(&efi_mem_reserve_persistent_lock);
1050
1051 return efi_mem_reserve_iomem(addr, size);
1052}
1053
1054static int __init efi_memreserve_root_init(void)
1055{
1056 if (efi_memreserve_root)
1057 return 0;
1058 if (efi_memreserve_map_root())
1059 efi_memreserve_root = (void *)ULONG_MAX;
1060 return 0;
1061}
1062early_initcall(efi_memreserve_root_init);
1063
1064#ifdef CONFIG_KEXEC
1065static int update_efi_random_seed(struct notifier_block *nb,
1066 unsigned long code, void *unused)
1067{
1068 struct linux_efi_random_seed *seed;
1069 u32 size = 0;
1070
1071 if (!kexec_in_progress)
1072 return NOTIFY_DONE;
1073
1074 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1075 if (seed != NULL) {
1076 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1077 memunmap(seed);
1078 } else {
1079 pr_err("Could not map UEFI random seed!\n");
1080 }
1081 if (size > 0) {
1082 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1083 MEMREMAP_WB);
1084 if (seed != NULL) {
1085 seed->size = size;
1086 get_random_bytes(seed->bits, seed->size);
1087 memunmap(seed);
1088 } else {
1089 pr_err("Could not map UEFI random seed!\n");
1090 }
1091 }
1092 return NOTIFY_DONE;
1093}
1094
1095static struct notifier_block efi_random_seed_nb = {
1096 .notifier_call = update_efi_random_seed,
1097};
1098
1099static int __init register_update_efi_random_seed(void)
1100{
1101 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1102 return 0;
1103 return register_reboot_notifier(&efi_random_seed_nb);
1104}
1105late_initcall(register_update_efi_random_seed);
1106#endif