Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kobject.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/efi.h>
23#include <linux/of.h>
24#include <linux/initrd.h>
25#include <linux/io.h>
26#include <linux/kexec.h>
27#include <linux/platform_device.h>
28#include <linux/random.h>
29#include <linux/reboot.h>
30#include <linux/slab.h>
31#include <linux/acpi.h>
32#include <linux/ucs2_string.h>
33#include <linux/memblock.h>
34#include <linux/security.h>
35
36#include <asm/early_ioremap.h>
37
38struct efi __read_mostly efi = {
39 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
40 .acpi = EFI_INVALID_TABLE_ADDR,
41 .acpi20 = EFI_INVALID_TABLE_ADDR,
42 .smbios = EFI_INVALID_TABLE_ADDR,
43 .smbios3 = EFI_INVALID_TABLE_ADDR,
44 .esrt = EFI_INVALID_TABLE_ADDR,
45 .tpm_log = EFI_INVALID_TABLE_ADDR,
46 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
47#ifdef CONFIG_LOAD_UEFI_KEYS
48 .mokvar_table = EFI_INVALID_TABLE_ADDR,
49#endif
50#ifdef CONFIG_EFI_COCO_SECRET
51 .coco_secret = EFI_INVALID_TABLE_ADDR,
52#endif
53};
54EXPORT_SYMBOL(efi);
55
56unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
57static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
58static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
59static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
60
61extern unsigned long screen_info_table;
62
63struct mm_struct efi_mm = {
64 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
65 .mm_users = ATOMIC_INIT(2),
66 .mm_count = ATOMIC_INIT(1),
67 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
68 MMAP_LOCK_INITIALIZER(efi_mm)
69 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
70 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
71 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
72};
73
74struct workqueue_struct *efi_rts_wq;
75
76static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
77static int __init setup_noefi(char *arg)
78{
79 disable_runtime = true;
80 return 0;
81}
82early_param("noefi", setup_noefi);
83
84bool efi_runtime_disabled(void)
85{
86 return disable_runtime;
87}
88
89bool __pure __efi_soft_reserve_enabled(void)
90{
91 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
92}
93
94static int __init parse_efi_cmdline(char *str)
95{
96 if (!str) {
97 pr_warn("need at least one option\n");
98 return -EINVAL;
99 }
100
101 if (parse_option_str(str, "debug"))
102 set_bit(EFI_DBG, &efi.flags);
103
104 if (parse_option_str(str, "noruntime"))
105 disable_runtime = true;
106
107 if (parse_option_str(str, "runtime"))
108 disable_runtime = false;
109
110 if (parse_option_str(str, "nosoftreserve"))
111 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
112
113 return 0;
114}
115early_param("efi", parse_efi_cmdline);
116
117struct kobject *efi_kobj;
118
119/*
120 * Let's not leave out systab information that snuck into
121 * the efivars driver
122 * Note, do not add more fields in systab sysfs file as it breaks sysfs
123 * one value per file rule!
124 */
125static ssize_t systab_show(struct kobject *kobj,
126 struct kobj_attribute *attr, char *buf)
127{
128 char *str = buf;
129
130 if (!kobj || !buf)
131 return -EINVAL;
132
133 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
134 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
135 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
136 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
137 /*
138 * If both SMBIOS and SMBIOS3 entry points are implemented, the
139 * SMBIOS3 entry point shall be preferred, so we list it first to
140 * let applications stop parsing after the first match.
141 */
142 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
143 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
144 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
145 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
146
147 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
148 str = efi_systab_show_arch(str);
149
150 return str - buf;
151}
152
153static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
154
155static ssize_t fw_platform_size_show(struct kobject *kobj,
156 struct kobj_attribute *attr, char *buf)
157{
158 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
159}
160
161extern __weak struct kobj_attribute efi_attr_fw_vendor;
162extern __weak struct kobj_attribute efi_attr_runtime;
163extern __weak struct kobj_attribute efi_attr_config_table;
164static struct kobj_attribute efi_attr_fw_platform_size =
165 __ATTR_RO(fw_platform_size);
166
167static struct attribute *efi_subsys_attrs[] = {
168 &efi_attr_systab.attr,
169 &efi_attr_fw_platform_size.attr,
170 &efi_attr_fw_vendor.attr,
171 &efi_attr_runtime.attr,
172 &efi_attr_config_table.attr,
173 NULL,
174};
175
176umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
177 int n)
178{
179 return attr->mode;
180}
181
182static const struct attribute_group efi_subsys_attr_group = {
183 .attrs = efi_subsys_attrs,
184 .is_visible = efi_attr_is_visible,
185};
186
187static struct efivars generic_efivars;
188static struct efivar_operations generic_ops;
189
190static int generic_ops_register(void)
191{
192 generic_ops.get_variable = efi.get_variable;
193 generic_ops.get_next_variable = efi.get_next_variable;
194 generic_ops.query_variable_store = efi_query_variable_store;
195
196 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
197 generic_ops.set_variable = efi.set_variable;
198 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
199 }
200 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
201}
202
203static void generic_ops_unregister(void)
204{
205 efivars_unregister(&generic_efivars);
206}
207
208#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
209#define EFIVAR_SSDT_NAME_MAX 16UL
210static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
211static int __init efivar_ssdt_setup(char *str)
212{
213 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
214
215 if (ret)
216 return ret;
217
218 if (strlen(str) < sizeof(efivar_ssdt))
219 memcpy(efivar_ssdt, str, strlen(str));
220 else
221 pr_warn("efivar_ssdt: name too long: %s\n", str);
222 return 1;
223}
224__setup("efivar_ssdt=", efivar_ssdt_setup);
225
226static __init int efivar_ssdt_load(void)
227{
228 unsigned long name_size = 256;
229 efi_char16_t *name = NULL;
230 efi_status_t status;
231 efi_guid_t guid;
232
233 if (!efivar_ssdt[0])
234 return 0;
235
236 name = kzalloc(name_size, GFP_KERNEL);
237 if (!name)
238 return -ENOMEM;
239
240 for (;;) {
241 char utf8_name[EFIVAR_SSDT_NAME_MAX];
242 unsigned long data_size = 0;
243 void *data;
244 int limit;
245
246 status = efi.get_next_variable(&name_size, name, &guid);
247 if (status == EFI_NOT_FOUND) {
248 break;
249 } else if (status == EFI_BUFFER_TOO_SMALL) {
250 name = krealloc(name, name_size, GFP_KERNEL);
251 if (!name)
252 return -ENOMEM;
253 continue;
254 }
255
256 limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
257 ucs2_as_utf8(utf8_name, name, limit - 1);
258 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
259 continue;
260
261 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
262
263 status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
264 if (status != EFI_BUFFER_TOO_SMALL || !data_size)
265 return -EIO;
266
267 data = kmalloc(data_size, GFP_KERNEL);
268 if (!data)
269 return -ENOMEM;
270
271 status = efi.get_variable(name, &guid, NULL, &data_size, data);
272 if (status == EFI_SUCCESS) {
273 acpi_status ret = acpi_load_table(data, NULL);
274 if (ret)
275 pr_err("failed to load table: %u\n", ret);
276 else
277 continue;
278 } else {
279 pr_err("failed to get var data: 0x%lx\n", status);
280 }
281 kfree(data);
282 }
283 return 0;
284}
285#else
286static inline int efivar_ssdt_load(void) { return 0; }
287#endif
288
289#ifdef CONFIG_DEBUG_FS
290
291#define EFI_DEBUGFS_MAX_BLOBS 32
292
293static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
294
295static void __init efi_debugfs_init(void)
296{
297 struct dentry *efi_debugfs;
298 efi_memory_desc_t *md;
299 char name[32];
300 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
301 int i = 0;
302
303 efi_debugfs = debugfs_create_dir("efi", NULL);
304 if (IS_ERR_OR_NULL(efi_debugfs))
305 return;
306
307 for_each_efi_memory_desc(md) {
308 switch (md->type) {
309 case EFI_BOOT_SERVICES_CODE:
310 snprintf(name, sizeof(name), "boot_services_code%d",
311 type_count[md->type]++);
312 break;
313 case EFI_BOOT_SERVICES_DATA:
314 snprintf(name, sizeof(name), "boot_services_data%d",
315 type_count[md->type]++);
316 break;
317 default:
318 continue;
319 }
320
321 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
322 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
323 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
324 break;
325 }
326
327 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
328 debugfs_blob[i].data = memremap(md->phys_addr,
329 debugfs_blob[i].size,
330 MEMREMAP_WB);
331 if (!debugfs_blob[i].data)
332 continue;
333
334 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
335 i++;
336 }
337}
338#else
339static inline void efi_debugfs_init(void) {}
340#endif
341
342static void refresh_nv_rng_seed(struct work_struct *work)
343{
344 u8 seed[EFI_RANDOM_SEED_SIZE];
345
346 get_random_bytes(seed, sizeof(seed));
347 efi.set_variable(L"RandomSeed", &LINUX_EFI_RANDOM_SEED_TABLE_GUID,
348 EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS |
349 EFI_VARIABLE_RUNTIME_ACCESS, sizeof(seed), seed);
350 memzero_explicit(seed, sizeof(seed));
351}
352static int refresh_nv_rng_seed_notification(struct notifier_block *nb, unsigned long action, void *data)
353{
354 static DECLARE_WORK(work, refresh_nv_rng_seed);
355 schedule_work(&work);
356 return NOTIFY_DONE;
357}
358static struct notifier_block refresh_nv_rng_seed_nb = { .notifier_call = refresh_nv_rng_seed_notification };
359
360/*
361 * We register the efi subsystem with the firmware subsystem and the
362 * efivars subsystem with the efi subsystem, if the system was booted with
363 * EFI.
364 */
365static int __init efisubsys_init(void)
366{
367 int error;
368
369 if (!efi_enabled(EFI_RUNTIME_SERVICES))
370 efi.runtime_supported_mask = 0;
371
372 if (!efi_enabled(EFI_BOOT))
373 return 0;
374
375 if (efi.runtime_supported_mask) {
376 /*
377 * Since we process only one efi_runtime_service() at a time, an
378 * ordered workqueue (which creates only one execution context)
379 * should suffice for all our needs.
380 */
381 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
382 if (!efi_rts_wq) {
383 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
384 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
385 efi.runtime_supported_mask = 0;
386 return 0;
387 }
388 }
389
390 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
391 platform_device_register_simple("rtc-efi", 0, NULL, 0);
392
393 /* We register the efi directory at /sys/firmware/efi */
394 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
395 if (!efi_kobj) {
396 pr_err("efi: Firmware registration failed.\n");
397 error = -ENOMEM;
398 goto err_destroy_wq;
399 }
400
401 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
402 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
403 error = generic_ops_register();
404 if (error)
405 goto err_put;
406 efivar_ssdt_load();
407 platform_device_register_simple("efivars", 0, NULL, 0);
408 }
409
410 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
411 if (error) {
412 pr_err("efi: Sysfs attribute export failed with error %d.\n",
413 error);
414 goto err_unregister;
415 }
416
417 /* and the standard mountpoint for efivarfs */
418 error = sysfs_create_mount_point(efi_kobj, "efivars");
419 if (error) {
420 pr_err("efivars: Subsystem registration failed.\n");
421 goto err_remove_group;
422 }
423
424 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
425 efi_debugfs_init();
426
427#ifdef CONFIG_EFI_COCO_SECRET
428 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
429 platform_device_register_simple("efi_secret", 0, NULL, 0);
430#endif
431
432 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
433 execute_with_initialized_rng(&refresh_nv_rng_seed_nb);
434
435 return 0;
436
437err_remove_group:
438 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
439err_unregister:
440 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
441 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
442 generic_ops_unregister();
443err_put:
444 kobject_put(efi_kobj);
445 efi_kobj = NULL;
446err_destroy_wq:
447 if (efi_rts_wq)
448 destroy_workqueue(efi_rts_wq);
449
450 return error;
451}
452
453subsys_initcall(efisubsys_init);
454
455void __init efi_find_mirror(void)
456{
457 efi_memory_desc_t *md;
458 u64 mirror_size = 0, total_size = 0;
459
460 if (!efi_enabled(EFI_MEMMAP))
461 return;
462
463 for_each_efi_memory_desc(md) {
464 unsigned long long start = md->phys_addr;
465 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
466
467 total_size += size;
468 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
469 memblock_mark_mirror(start, size);
470 mirror_size += size;
471 }
472 }
473 if (mirror_size)
474 pr_info("Memory: %lldM/%lldM mirrored memory\n",
475 mirror_size>>20, total_size>>20);
476}
477
478/*
479 * Find the efi memory descriptor for a given physical address. Given a
480 * physical address, determine if it exists within an EFI Memory Map entry,
481 * and if so, populate the supplied memory descriptor with the appropriate
482 * data.
483 */
484int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
485{
486 efi_memory_desc_t *md;
487
488 if (!efi_enabled(EFI_MEMMAP)) {
489 pr_err_once("EFI_MEMMAP is not enabled.\n");
490 return -EINVAL;
491 }
492
493 if (!out_md) {
494 pr_err_once("out_md is null.\n");
495 return -EINVAL;
496 }
497
498 for_each_efi_memory_desc(md) {
499 u64 size;
500 u64 end;
501
502 size = md->num_pages << EFI_PAGE_SHIFT;
503 end = md->phys_addr + size;
504 if (phys_addr >= md->phys_addr && phys_addr < end) {
505 memcpy(out_md, md, sizeof(*out_md));
506 return 0;
507 }
508 }
509 return -ENOENT;
510}
511
512/*
513 * Calculate the highest address of an efi memory descriptor.
514 */
515u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
516{
517 u64 size = md->num_pages << EFI_PAGE_SHIFT;
518 u64 end = md->phys_addr + size;
519 return end;
520}
521
522void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
523
524/**
525 * efi_mem_reserve - Reserve an EFI memory region
526 * @addr: Physical address to reserve
527 * @size: Size of reservation
528 *
529 * Mark a region as reserved from general kernel allocation and
530 * prevent it being released by efi_free_boot_services().
531 *
532 * This function should be called drivers once they've parsed EFI
533 * configuration tables to figure out where their data lives, e.g.
534 * efi_esrt_init().
535 */
536void __init efi_mem_reserve(phys_addr_t addr, u64 size)
537{
538 if (!memblock_is_region_reserved(addr, size))
539 memblock_reserve(addr, size);
540
541 /*
542 * Some architectures (x86) reserve all boot services ranges
543 * until efi_free_boot_services() because of buggy firmware
544 * implementations. This means the above memblock_reserve() is
545 * superfluous on x86 and instead what it needs to do is
546 * ensure the @start, @size is not freed.
547 */
548 efi_arch_mem_reserve(addr, size);
549}
550
551static const efi_config_table_type_t common_tables[] __initconst = {
552 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
553 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
554 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
555 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
556 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
557 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
558 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
559 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
560 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
561 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
562 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
563 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
564#ifdef CONFIG_EFI_RCI2_TABLE
565 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
566#endif
567#ifdef CONFIG_LOAD_UEFI_KEYS
568 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
569#endif
570#ifdef CONFIG_EFI_COCO_SECRET
571 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
572#endif
573#ifdef CONFIG_EFI_GENERIC_STUB
574 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table },
575#endif
576 {},
577};
578
579static __init int match_config_table(const efi_guid_t *guid,
580 unsigned long table,
581 const efi_config_table_type_t *table_types)
582{
583 int i;
584
585 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
586 if (!efi_guidcmp(*guid, table_types[i].guid)) {
587 *(table_types[i].ptr) = table;
588 if (table_types[i].name[0])
589 pr_cont("%s=0x%lx ",
590 table_types[i].name, table);
591 return 1;
592 }
593 }
594
595 return 0;
596}
597
598int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
599 int count,
600 const efi_config_table_type_t *arch_tables)
601{
602 const efi_config_table_64_t *tbl64 = (void *)config_tables;
603 const efi_config_table_32_t *tbl32 = (void *)config_tables;
604 const efi_guid_t *guid;
605 unsigned long table;
606 int i;
607
608 pr_info("");
609 for (i = 0; i < count; i++) {
610 if (!IS_ENABLED(CONFIG_X86)) {
611 guid = &config_tables[i].guid;
612 table = (unsigned long)config_tables[i].table;
613 } else if (efi_enabled(EFI_64BIT)) {
614 guid = &tbl64[i].guid;
615 table = tbl64[i].table;
616
617 if (IS_ENABLED(CONFIG_X86_32) &&
618 tbl64[i].table > U32_MAX) {
619 pr_cont("\n");
620 pr_err("Table located above 4GB, disabling EFI.\n");
621 return -EINVAL;
622 }
623 } else {
624 guid = &tbl32[i].guid;
625 table = tbl32[i].table;
626 }
627
628 if (!match_config_table(guid, table, common_tables) && arch_tables)
629 match_config_table(guid, table, arch_tables);
630 }
631 pr_cont("\n");
632 set_bit(EFI_CONFIG_TABLES, &efi.flags);
633
634 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
635 struct linux_efi_random_seed *seed;
636 u32 size = 0;
637
638 seed = early_memremap(efi_rng_seed, sizeof(*seed));
639 if (seed != NULL) {
640 size = min_t(u32, seed->size, SZ_1K); // sanity check
641 early_memunmap(seed, sizeof(*seed));
642 } else {
643 pr_err("Could not map UEFI random seed!\n");
644 }
645 if (size > 0) {
646 seed = early_memremap(efi_rng_seed,
647 sizeof(*seed) + size);
648 if (seed != NULL) {
649 add_bootloader_randomness(seed->bits, size);
650 memzero_explicit(seed->bits, size);
651 early_memunmap(seed, sizeof(*seed) + size);
652 } else {
653 pr_err("Could not map UEFI random seed!\n");
654 }
655 }
656 }
657
658 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
659 efi_memattr_init();
660
661 efi_tpm_eventlog_init();
662
663 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
664 unsigned long prsv = mem_reserve;
665
666 while (prsv) {
667 struct linux_efi_memreserve *rsv;
668 u8 *p;
669
670 /*
671 * Just map a full page: that is what we will get
672 * anyway, and it permits us to map the entire entry
673 * before knowing its size.
674 */
675 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
676 PAGE_SIZE);
677 if (p == NULL) {
678 pr_err("Could not map UEFI memreserve entry!\n");
679 return -ENOMEM;
680 }
681
682 rsv = (void *)(p + prsv % PAGE_SIZE);
683
684 /* reserve the entry itself */
685 memblock_reserve(prsv,
686 struct_size(rsv, entry, rsv->size));
687
688 for (i = 0; i < atomic_read(&rsv->count); i++) {
689 memblock_reserve(rsv->entry[i].base,
690 rsv->entry[i].size);
691 }
692
693 prsv = rsv->next;
694 early_memunmap(p, PAGE_SIZE);
695 }
696 }
697
698 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
699 efi_rt_properties_table_t *tbl;
700
701 tbl = early_memremap(rt_prop, sizeof(*tbl));
702 if (tbl) {
703 efi.runtime_supported_mask &= tbl->runtime_services_supported;
704 early_memunmap(tbl, sizeof(*tbl));
705 }
706 }
707
708 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
709 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
710 struct linux_efi_initrd *tbl;
711
712 tbl = early_memremap(initrd, sizeof(*tbl));
713 if (tbl) {
714 phys_initrd_start = tbl->base;
715 phys_initrd_size = tbl->size;
716 early_memunmap(tbl, sizeof(*tbl));
717 }
718 }
719
720 return 0;
721}
722
723int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
724 int min_major_version)
725{
726 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
727 pr_err("System table signature incorrect!\n");
728 return -EINVAL;
729 }
730
731 if ((systab_hdr->revision >> 16) < min_major_version)
732 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
733 systab_hdr->revision >> 16,
734 systab_hdr->revision & 0xffff,
735 min_major_version);
736
737 return 0;
738}
739
740#ifndef CONFIG_IA64
741static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
742 size_t size)
743{
744 const efi_char16_t *ret;
745
746 ret = early_memremap_ro(fw_vendor, size);
747 if (!ret)
748 pr_err("Could not map the firmware vendor!\n");
749 return ret;
750}
751
752static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
753{
754 early_memunmap((void *)fw_vendor, size);
755}
756#else
757#define map_fw_vendor(p, s) __va(p)
758#define unmap_fw_vendor(v, s)
759#endif
760
761void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
762 unsigned long fw_vendor)
763{
764 char vendor[100] = "unknown";
765 const efi_char16_t *c16;
766 size_t i;
767
768 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
769 if (c16) {
770 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
771 vendor[i] = c16[i];
772 vendor[i] = '\0';
773
774 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
775 }
776
777 pr_info("EFI v%u.%.02u by %s\n",
778 systab_hdr->revision >> 16,
779 systab_hdr->revision & 0xffff,
780 vendor);
781
782 if (IS_ENABLED(CONFIG_X86_64) &&
783 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
784 !strcmp(vendor, "Apple")) {
785 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
786 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
787 }
788}
789
790static __initdata char memory_type_name[][13] = {
791 "Reserved",
792 "Loader Code",
793 "Loader Data",
794 "Boot Code",
795 "Boot Data",
796 "Runtime Code",
797 "Runtime Data",
798 "Conventional",
799 "Unusable",
800 "ACPI Reclaim",
801 "ACPI Mem NVS",
802 "MMIO",
803 "MMIO Port",
804 "PAL Code",
805 "Persistent",
806};
807
808char * __init efi_md_typeattr_format(char *buf, size_t size,
809 const efi_memory_desc_t *md)
810{
811 char *pos;
812 int type_len;
813 u64 attr;
814
815 pos = buf;
816 if (md->type >= ARRAY_SIZE(memory_type_name))
817 type_len = snprintf(pos, size, "[type=%u", md->type);
818 else
819 type_len = snprintf(pos, size, "[%-*s",
820 (int)(sizeof(memory_type_name[0]) - 1),
821 memory_type_name[md->type]);
822 if (type_len >= size)
823 return buf;
824
825 pos += type_len;
826 size -= type_len;
827
828 attr = md->attribute;
829 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
830 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
831 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
832 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
833 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
834 snprintf(pos, size, "|attr=0x%016llx]",
835 (unsigned long long)attr);
836 else
837 snprintf(pos, size,
838 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
839 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
840 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
841 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
842 attr & EFI_MEMORY_SP ? "SP" : "",
843 attr & EFI_MEMORY_NV ? "NV" : "",
844 attr & EFI_MEMORY_XP ? "XP" : "",
845 attr & EFI_MEMORY_RP ? "RP" : "",
846 attr & EFI_MEMORY_WP ? "WP" : "",
847 attr & EFI_MEMORY_RO ? "RO" : "",
848 attr & EFI_MEMORY_UCE ? "UCE" : "",
849 attr & EFI_MEMORY_WB ? "WB" : "",
850 attr & EFI_MEMORY_WT ? "WT" : "",
851 attr & EFI_MEMORY_WC ? "WC" : "",
852 attr & EFI_MEMORY_UC ? "UC" : "");
853 return buf;
854}
855
856/*
857 * IA64 has a funky EFI memory map that doesn't work the same way as
858 * other architectures.
859 */
860#ifndef CONFIG_IA64
861/*
862 * efi_mem_attributes - lookup memmap attributes for physical address
863 * @phys_addr: the physical address to lookup
864 *
865 * Search in the EFI memory map for the region covering
866 * @phys_addr. Returns the EFI memory attributes if the region
867 * was found in the memory map, 0 otherwise.
868 */
869u64 efi_mem_attributes(unsigned long phys_addr)
870{
871 efi_memory_desc_t *md;
872
873 if (!efi_enabled(EFI_MEMMAP))
874 return 0;
875
876 for_each_efi_memory_desc(md) {
877 if ((md->phys_addr <= phys_addr) &&
878 (phys_addr < (md->phys_addr +
879 (md->num_pages << EFI_PAGE_SHIFT))))
880 return md->attribute;
881 }
882 return 0;
883}
884
885/*
886 * efi_mem_type - lookup memmap type for physical address
887 * @phys_addr: the physical address to lookup
888 *
889 * Search in the EFI memory map for the region covering @phys_addr.
890 * Returns the EFI memory type if the region was found in the memory
891 * map, -EINVAL otherwise.
892 */
893int efi_mem_type(unsigned long phys_addr)
894{
895 const efi_memory_desc_t *md;
896
897 if (!efi_enabled(EFI_MEMMAP))
898 return -ENOTSUPP;
899
900 for_each_efi_memory_desc(md) {
901 if ((md->phys_addr <= phys_addr) &&
902 (phys_addr < (md->phys_addr +
903 (md->num_pages << EFI_PAGE_SHIFT))))
904 return md->type;
905 }
906 return -EINVAL;
907}
908#endif
909
910int efi_status_to_err(efi_status_t status)
911{
912 int err;
913
914 switch (status) {
915 case EFI_SUCCESS:
916 err = 0;
917 break;
918 case EFI_INVALID_PARAMETER:
919 err = -EINVAL;
920 break;
921 case EFI_OUT_OF_RESOURCES:
922 err = -ENOSPC;
923 break;
924 case EFI_DEVICE_ERROR:
925 err = -EIO;
926 break;
927 case EFI_WRITE_PROTECTED:
928 err = -EROFS;
929 break;
930 case EFI_SECURITY_VIOLATION:
931 err = -EACCES;
932 break;
933 case EFI_NOT_FOUND:
934 err = -ENOENT;
935 break;
936 case EFI_ABORTED:
937 err = -EINTR;
938 break;
939 default:
940 err = -EINVAL;
941 }
942
943 return err;
944}
945EXPORT_SYMBOL_GPL(efi_status_to_err);
946
947static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
948static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
949
950static int __init efi_memreserve_map_root(void)
951{
952 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
953 return -ENODEV;
954
955 efi_memreserve_root = memremap(mem_reserve,
956 sizeof(*efi_memreserve_root),
957 MEMREMAP_WB);
958 if (WARN_ON_ONCE(!efi_memreserve_root))
959 return -ENOMEM;
960 return 0;
961}
962
963static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
964{
965 struct resource *res, *parent;
966 int ret;
967
968 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
969 if (!res)
970 return -ENOMEM;
971
972 res->name = "reserved";
973 res->flags = IORESOURCE_MEM;
974 res->start = addr;
975 res->end = addr + size - 1;
976
977 /* we expect a conflict with a 'System RAM' region */
978 parent = request_resource_conflict(&iomem_resource, res);
979 ret = parent ? request_resource(parent, res) : 0;
980
981 /*
982 * Given that efi_mem_reserve_iomem() can be called at any
983 * time, only call memblock_reserve() if the architecture
984 * keeps the infrastructure around.
985 */
986 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
987 memblock_reserve(addr, size);
988
989 return ret;
990}
991
992int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
993{
994 struct linux_efi_memreserve *rsv;
995 unsigned long prsv;
996 int rc, index;
997
998 if (efi_memreserve_root == (void *)ULONG_MAX)
999 return -ENODEV;
1000
1001 if (!efi_memreserve_root) {
1002 rc = efi_memreserve_map_root();
1003 if (rc)
1004 return rc;
1005 }
1006
1007 /* first try to find a slot in an existing linked list entry */
1008 for (prsv = efi_memreserve_root->next; prsv; ) {
1009 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1010 if (!rsv)
1011 return -ENOMEM;
1012 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1013 if (index < rsv->size) {
1014 rsv->entry[index].base = addr;
1015 rsv->entry[index].size = size;
1016
1017 memunmap(rsv);
1018 return efi_mem_reserve_iomem(addr, size);
1019 }
1020 prsv = rsv->next;
1021 memunmap(rsv);
1022 }
1023
1024 /* no slot found - allocate a new linked list entry */
1025 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1026 if (!rsv)
1027 return -ENOMEM;
1028
1029 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1030 if (rc) {
1031 free_page((unsigned long)rsv);
1032 return rc;
1033 }
1034
1035 /*
1036 * The memremap() call above assumes that a linux_efi_memreserve entry
1037 * never crosses a page boundary, so let's ensure that this remains true
1038 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1039 * using SZ_4K explicitly in the size calculation below.
1040 */
1041 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1042 atomic_set(&rsv->count, 1);
1043 rsv->entry[0].base = addr;
1044 rsv->entry[0].size = size;
1045
1046 spin_lock(&efi_mem_reserve_persistent_lock);
1047 rsv->next = efi_memreserve_root->next;
1048 efi_memreserve_root->next = __pa(rsv);
1049 spin_unlock(&efi_mem_reserve_persistent_lock);
1050
1051 return efi_mem_reserve_iomem(addr, size);
1052}
1053
1054static int __init efi_memreserve_root_init(void)
1055{
1056 if (efi_memreserve_root)
1057 return 0;
1058 if (efi_memreserve_map_root())
1059 efi_memreserve_root = (void *)ULONG_MAX;
1060 return 0;
1061}
1062early_initcall(efi_memreserve_root_init);
1063
1064#ifdef CONFIG_KEXEC
1065static int update_efi_random_seed(struct notifier_block *nb,
1066 unsigned long code, void *unused)
1067{
1068 struct linux_efi_random_seed *seed;
1069 u32 size = 0;
1070
1071 if (!kexec_in_progress)
1072 return NOTIFY_DONE;
1073
1074 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1075 if (seed != NULL) {
1076 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1077 memunmap(seed);
1078 } else {
1079 pr_err("Could not map UEFI random seed!\n");
1080 }
1081 if (size > 0) {
1082 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1083 MEMREMAP_WB);
1084 if (seed != NULL) {
1085 seed->size = size;
1086 get_random_bytes(seed->bits, seed->size);
1087 memunmap(seed);
1088 } else {
1089 pr_err("Could not map UEFI random seed!\n");
1090 }
1091 }
1092 return NOTIFY_DONE;
1093}
1094
1095static struct notifier_block efi_random_seed_nb = {
1096 .notifier_call = update_efi_random_seed,
1097};
1098
1099static int __init register_update_efi_random_seed(void)
1100{
1101 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1102 return 0;
1103 return register_reboot_notifier(&efi_random_seed_nb);
1104}
1105late_initcall(register_update_efi_random_seed);
1106#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kobject.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/efi.h>
23#include <linux/of.h>
24#include <linux/io.h>
25#include <linux/kexec.h>
26#include <linux/platform_device.h>
27#include <linux/random.h>
28#include <linux/reboot.h>
29#include <linux/slab.h>
30#include <linux/acpi.h>
31#include <linux/ucs2_string.h>
32#include <linux/memblock.h>
33#include <linux/security.h>
34
35#include <asm/early_ioremap.h>
36
37struct efi __read_mostly efi = {
38 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 .acpi = EFI_INVALID_TABLE_ADDR,
40 .acpi20 = EFI_INVALID_TABLE_ADDR,
41 .smbios = EFI_INVALID_TABLE_ADDR,
42 .smbios3 = EFI_INVALID_TABLE_ADDR,
43 .esrt = EFI_INVALID_TABLE_ADDR,
44 .tpm_log = EFI_INVALID_TABLE_ADDR,
45 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
46#ifdef CONFIG_LOAD_UEFI_KEYS
47 .mokvar_table = EFI_INVALID_TABLE_ADDR,
48#endif
49};
50EXPORT_SYMBOL(efi);
51
52unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
53static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
54static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
55
56struct mm_struct efi_mm = {
57 .mm_rb = RB_ROOT,
58 .mm_users = ATOMIC_INIT(2),
59 .mm_count = ATOMIC_INIT(1),
60 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
61 MMAP_LOCK_INITIALIZER(efi_mm)
62 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
63 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
64 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
65};
66
67struct workqueue_struct *efi_rts_wq;
68
69static bool disable_runtime;
70static int __init setup_noefi(char *arg)
71{
72 disable_runtime = true;
73 return 0;
74}
75early_param("noefi", setup_noefi);
76
77bool efi_runtime_disabled(void)
78{
79 return disable_runtime;
80}
81
82bool __pure __efi_soft_reserve_enabled(void)
83{
84 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
85}
86
87static int __init parse_efi_cmdline(char *str)
88{
89 if (!str) {
90 pr_warn("need at least one option\n");
91 return -EINVAL;
92 }
93
94 if (parse_option_str(str, "debug"))
95 set_bit(EFI_DBG, &efi.flags);
96
97 if (parse_option_str(str, "noruntime"))
98 disable_runtime = true;
99
100 if (parse_option_str(str, "nosoftreserve"))
101 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
102
103 return 0;
104}
105early_param("efi", parse_efi_cmdline);
106
107struct kobject *efi_kobj;
108
109/*
110 * Let's not leave out systab information that snuck into
111 * the efivars driver
112 * Note, do not add more fields in systab sysfs file as it breaks sysfs
113 * one value per file rule!
114 */
115static ssize_t systab_show(struct kobject *kobj,
116 struct kobj_attribute *attr, char *buf)
117{
118 char *str = buf;
119
120 if (!kobj || !buf)
121 return -EINVAL;
122
123 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
124 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
125 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
126 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
127 /*
128 * If both SMBIOS and SMBIOS3 entry points are implemented, the
129 * SMBIOS3 entry point shall be preferred, so we list it first to
130 * let applications stop parsing after the first match.
131 */
132 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
133 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
134 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
135 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
136
137 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
138 str = efi_systab_show_arch(str);
139
140 return str - buf;
141}
142
143static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
144
145static ssize_t fw_platform_size_show(struct kobject *kobj,
146 struct kobj_attribute *attr, char *buf)
147{
148 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
149}
150
151extern __weak struct kobj_attribute efi_attr_fw_vendor;
152extern __weak struct kobj_attribute efi_attr_runtime;
153extern __weak struct kobj_attribute efi_attr_config_table;
154static struct kobj_attribute efi_attr_fw_platform_size =
155 __ATTR_RO(fw_platform_size);
156
157static struct attribute *efi_subsys_attrs[] = {
158 &efi_attr_systab.attr,
159 &efi_attr_fw_platform_size.attr,
160 &efi_attr_fw_vendor.attr,
161 &efi_attr_runtime.attr,
162 &efi_attr_config_table.attr,
163 NULL,
164};
165
166umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
167 int n)
168{
169 return attr->mode;
170}
171
172static const struct attribute_group efi_subsys_attr_group = {
173 .attrs = efi_subsys_attrs,
174 .is_visible = efi_attr_is_visible,
175};
176
177static struct efivars generic_efivars;
178static struct efivar_operations generic_ops;
179
180static int generic_ops_register(void)
181{
182 generic_ops.get_variable = efi.get_variable;
183 generic_ops.get_next_variable = efi.get_next_variable;
184 generic_ops.query_variable_store = efi_query_variable_store;
185
186 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
187 generic_ops.set_variable = efi.set_variable;
188 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
189 }
190 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
191}
192
193static void generic_ops_unregister(void)
194{
195 efivars_unregister(&generic_efivars);
196}
197
198#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
199#define EFIVAR_SSDT_NAME_MAX 16
200static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
201static int __init efivar_ssdt_setup(char *str)
202{
203 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
204
205 if (ret)
206 return ret;
207
208 if (strlen(str) < sizeof(efivar_ssdt))
209 memcpy(efivar_ssdt, str, strlen(str));
210 else
211 pr_warn("efivar_ssdt: name too long: %s\n", str);
212 return 0;
213}
214__setup("efivar_ssdt=", efivar_ssdt_setup);
215
216static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
217 unsigned long name_size, void *data)
218{
219 struct efivar_entry *entry;
220 struct list_head *list = data;
221 char utf8_name[EFIVAR_SSDT_NAME_MAX];
222 int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
223
224 ucs2_as_utf8(utf8_name, name, limit - 1);
225 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
226 return 0;
227
228 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
229 if (!entry)
230 return 0;
231
232 memcpy(entry->var.VariableName, name, name_size);
233 memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
234
235 efivar_entry_add(entry, list);
236
237 return 0;
238}
239
240static __init int efivar_ssdt_load(void)
241{
242 LIST_HEAD(entries);
243 struct efivar_entry *entry, *aux;
244 unsigned long size;
245 void *data;
246 int ret;
247
248 if (!efivar_ssdt[0])
249 return 0;
250
251 ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
252
253 list_for_each_entry_safe(entry, aux, &entries, list) {
254 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
255 &entry->var.VendorGuid);
256
257 list_del(&entry->list);
258
259 ret = efivar_entry_size(entry, &size);
260 if (ret) {
261 pr_err("failed to get var size\n");
262 goto free_entry;
263 }
264
265 data = kmalloc(size, GFP_KERNEL);
266 if (!data) {
267 ret = -ENOMEM;
268 goto free_entry;
269 }
270
271 ret = efivar_entry_get(entry, NULL, &size, data);
272 if (ret) {
273 pr_err("failed to get var data\n");
274 goto free_data;
275 }
276
277 ret = acpi_load_table(data, NULL);
278 if (ret) {
279 pr_err("failed to load table: %d\n", ret);
280 goto free_data;
281 }
282
283 goto free_entry;
284
285free_data:
286 kfree(data);
287
288free_entry:
289 kfree(entry);
290 }
291
292 return ret;
293}
294#else
295static inline int efivar_ssdt_load(void) { return 0; }
296#endif
297
298#ifdef CONFIG_DEBUG_FS
299
300#define EFI_DEBUGFS_MAX_BLOBS 32
301
302static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
303
304static void __init efi_debugfs_init(void)
305{
306 struct dentry *efi_debugfs;
307 efi_memory_desc_t *md;
308 char name[32];
309 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
310 int i = 0;
311
312 efi_debugfs = debugfs_create_dir("efi", NULL);
313 if (IS_ERR_OR_NULL(efi_debugfs))
314 return;
315
316 for_each_efi_memory_desc(md) {
317 switch (md->type) {
318 case EFI_BOOT_SERVICES_CODE:
319 snprintf(name, sizeof(name), "boot_services_code%d",
320 type_count[md->type]++);
321 break;
322 case EFI_BOOT_SERVICES_DATA:
323 snprintf(name, sizeof(name), "boot_services_data%d",
324 type_count[md->type]++);
325 break;
326 default:
327 continue;
328 }
329
330 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
331 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
332 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
333 break;
334 }
335
336 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
337 debugfs_blob[i].data = memremap(md->phys_addr,
338 debugfs_blob[i].size,
339 MEMREMAP_WB);
340 if (!debugfs_blob[i].data)
341 continue;
342
343 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
344 i++;
345 }
346}
347#else
348static inline void efi_debugfs_init(void) {}
349#endif
350
351/*
352 * We register the efi subsystem with the firmware subsystem and the
353 * efivars subsystem with the efi subsystem, if the system was booted with
354 * EFI.
355 */
356static int __init efisubsys_init(void)
357{
358 int error;
359
360 if (!efi_enabled(EFI_RUNTIME_SERVICES))
361 efi.runtime_supported_mask = 0;
362
363 if (!efi_enabled(EFI_BOOT))
364 return 0;
365
366 if (efi.runtime_supported_mask) {
367 /*
368 * Since we process only one efi_runtime_service() at a time, an
369 * ordered workqueue (which creates only one execution context)
370 * should suffice for all our needs.
371 */
372 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
373 if (!efi_rts_wq) {
374 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
375 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
376 efi.runtime_supported_mask = 0;
377 return 0;
378 }
379 }
380
381 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
382 platform_device_register_simple("rtc-efi", 0, NULL, 0);
383
384 /* We register the efi directory at /sys/firmware/efi */
385 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
386 if (!efi_kobj) {
387 pr_err("efi: Firmware registration failed.\n");
388 destroy_workqueue(efi_rts_wq);
389 return -ENOMEM;
390 }
391
392 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
393 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
394 error = generic_ops_register();
395 if (error)
396 goto err_put;
397 efivar_ssdt_load();
398 platform_device_register_simple("efivars", 0, NULL, 0);
399 }
400
401 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
402 if (error) {
403 pr_err("efi: Sysfs attribute export failed with error %d.\n",
404 error);
405 goto err_unregister;
406 }
407
408 error = efi_runtime_map_init(efi_kobj);
409 if (error)
410 goto err_remove_group;
411
412 /* and the standard mountpoint for efivarfs */
413 error = sysfs_create_mount_point(efi_kobj, "efivars");
414 if (error) {
415 pr_err("efivars: Subsystem registration failed.\n");
416 goto err_remove_group;
417 }
418
419 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
420 efi_debugfs_init();
421
422 return 0;
423
424err_remove_group:
425 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
426err_unregister:
427 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
428 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
429 generic_ops_unregister();
430err_put:
431 kobject_put(efi_kobj);
432 destroy_workqueue(efi_rts_wq);
433 return error;
434}
435
436subsys_initcall(efisubsys_init);
437
438/*
439 * Find the efi memory descriptor for a given physical address. Given a
440 * physical address, determine if it exists within an EFI Memory Map entry,
441 * and if so, populate the supplied memory descriptor with the appropriate
442 * data.
443 */
444int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
445{
446 efi_memory_desc_t *md;
447
448 if (!efi_enabled(EFI_MEMMAP)) {
449 pr_err_once("EFI_MEMMAP is not enabled.\n");
450 return -EINVAL;
451 }
452
453 if (!out_md) {
454 pr_err_once("out_md is null.\n");
455 return -EINVAL;
456 }
457
458 for_each_efi_memory_desc(md) {
459 u64 size;
460 u64 end;
461
462 size = md->num_pages << EFI_PAGE_SHIFT;
463 end = md->phys_addr + size;
464 if (phys_addr >= md->phys_addr && phys_addr < end) {
465 memcpy(out_md, md, sizeof(*out_md));
466 return 0;
467 }
468 }
469 return -ENOENT;
470}
471
472/*
473 * Calculate the highest address of an efi memory descriptor.
474 */
475u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
476{
477 u64 size = md->num_pages << EFI_PAGE_SHIFT;
478 u64 end = md->phys_addr + size;
479 return end;
480}
481
482void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
483
484/**
485 * efi_mem_reserve - Reserve an EFI memory region
486 * @addr: Physical address to reserve
487 * @size: Size of reservation
488 *
489 * Mark a region as reserved from general kernel allocation and
490 * prevent it being released by efi_free_boot_services().
491 *
492 * This function should be called drivers once they've parsed EFI
493 * configuration tables to figure out where their data lives, e.g.
494 * efi_esrt_init().
495 */
496void __init efi_mem_reserve(phys_addr_t addr, u64 size)
497{
498 if (!memblock_is_region_reserved(addr, size))
499 memblock_reserve(addr, size);
500
501 /*
502 * Some architectures (x86) reserve all boot services ranges
503 * until efi_free_boot_services() because of buggy firmware
504 * implementations. This means the above memblock_reserve() is
505 * superfluous on x86 and instead what it needs to do is
506 * ensure the @start, @size is not freed.
507 */
508 efi_arch_mem_reserve(addr, size);
509}
510
511static const efi_config_table_type_t common_tables[] __initconst = {
512 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
513 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
514 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
515 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
516 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
517 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
518 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
519 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
520 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
521 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
522 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
523#ifdef CONFIG_EFI_RCI2_TABLE
524 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
525#endif
526#ifdef CONFIG_LOAD_UEFI_KEYS
527 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
528#endif
529 {},
530};
531
532static __init int match_config_table(const efi_guid_t *guid,
533 unsigned long table,
534 const efi_config_table_type_t *table_types)
535{
536 int i;
537
538 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
539 if (!efi_guidcmp(*guid, table_types[i].guid)) {
540 *(table_types[i].ptr) = table;
541 if (table_types[i].name[0])
542 pr_cont("%s=0x%lx ",
543 table_types[i].name, table);
544 return 1;
545 }
546 }
547
548 return 0;
549}
550
551int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
552 int count,
553 const efi_config_table_type_t *arch_tables)
554{
555 const efi_config_table_64_t *tbl64 = (void *)config_tables;
556 const efi_config_table_32_t *tbl32 = (void *)config_tables;
557 const efi_guid_t *guid;
558 unsigned long table;
559 int i;
560
561 pr_info("");
562 for (i = 0; i < count; i++) {
563 if (!IS_ENABLED(CONFIG_X86)) {
564 guid = &config_tables[i].guid;
565 table = (unsigned long)config_tables[i].table;
566 } else if (efi_enabled(EFI_64BIT)) {
567 guid = &tbl64[i].guid;
568 table = tbl64[i].table;
569
570 if (IS_ENABLED(CONFIG_X86_32) &&
571 tbl64[i].table > U32_MAX) {
572 pr_cont("\n");
573 pr_err("Table located above 4GB, disabling EFI.\n");
574 return -EINVAL;
575 }
576 } else {
577 guid = &tbl32[i].guid;
578 table = tbl32[i].table;
579 }
580
581 if (!match_config_table(guid, table, common_tables) && arch_tables)
582 match_config_table(guid, table, arch_tables);
583 }
584 pr_cont("\n");
585 set_bit(EFI_CONFIG_TABLES, &efi.flags);
586
587 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
588 struct linux_efi_random_seed *seed;
589 u32 size = 0;
590
591 seed = early_memremap(efi_rng_seed, sizeof(*seed));
592 if (seed != NULL) {
593 size = READ_ONCE(seed->size);
594 early_memunmap(seed, sizeof(*seed));
595 } else {
596 pr_err("Could not map UEFI random seed!\n");
597 }
598 if (size > 0) {
599 seed = early_memremap(efi_rng_seed,
600 sizeof(*seed) + size);
601 if (seed != NULL) {
602 pr_notice("seeding entropy pool\n");
603 add_bootloader_randomness(seed->bits, size);
604 early_memunmap(seed, sizeof(*seed) + size);
605 } else {
606 pr_err("Could not map UEFI random seed!\n");
607 }
608 }
609 }
610
611 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
612 efi_memattr_init();
613
614 efi_tpm_eventlog_init();
615
616 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
617 unsigned long prsv = mem_reserve;
618
619 while (prsv) {
620 struct linux_efi_memreserve *rsv;
621 u8 *p;
622
623 /*
624 * Just map a full page: that is what we will get
625 * anyway, and it permits us to map the entire entry
626 * before knowing its size.
627 */
628 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
629 PAGE_SIZE);
630 if (p == NULL) {
631 pr_err("Could not map UEFI memreserve entry!\n");
632 return -ENOMEM;
633 }
634
635 rsv = (void *)(p + prsv % PAGE_SIZE);
636
637 /* reserve the entry itself */
638 memblock_reserve(prsv,
639 struct_size(rsv, entry, rsv->size));
640
641 for (i = 0; i < atomic_read(&rsv->count); i++) {
642 memblock_reserve(rsv->entry[i].base,
643 rsv->entry[i].size);
644 }
645
646 prsv = rsv->next;
647 early_memunmap(p, PAGE_SIZE);
648 }
649 }
650
651 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
652 efi_rt_properties_table_t *tbl;
653
654 tbl = early_memremap(rt_prop, sizeof(*tbl));
655 if (tbl) {
656 efi.runtime_supported_mask &= tbl->runtime_services_supported;
657 early_memunmap(tbl, sizeof(*tbl));
658 }
659 }
660
661 return 0;
662}
663
664int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
665 int min_major_version)
666{
667 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
668 pr_err("System table signature incorrect!\n");
669 return -EINVAL;
670 }
671
672 if ((systab_hdr->revision >> 16) < min_major_version)
673 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
674 systab_hdr->revision >> 16,
675 systab_hdr->revision & 0xffff,
676 min_major_version);
677
678 return 0;
679}
680
681#ifndef CONFIG_IA64
682static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
683 size_t size)
684{
685 const efi_char16_t *ret;
686
687 ret = early_memremap_ro(fw_vendor, size);
688 if (!ret)
689 pr_err("Could not map the firmware vendor!\n");
690 return ret;
691}
692
693static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
694{
695 early_memunmap((void *)fw_vendor, size);
696}
697#else
698#define map_fw_vendor(p, s) __va(p)
699#define unmap_fw_vendor(v, s)
700#endif
701
702void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
703 unsigned long fw_vendor)
704{
705 char vendor[100] = "unknown";
706 const efi_char16_t *c16;
707 size_t i;
708
709 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
710 if (c16) {
711 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
712 vendor[i] = c16[i];
713 vendor[i] = '\0';
714
715 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
716 }
717
718 pr_info("EFI v%u.%.02u by %s\n",
719 systab_hdr->revision >> 16,
720 systab_hdr->revision & 0xffff,
721 vendor);
722}
723
724static __initdata char memory_type_name[][13] = {
725 "Reserved",
726 "Loader Code",
727 "Loader Data",
728 "Boot Code",
729 "Boot Data",
730 "Runtime Code",
731 "Runtime Data",
732 "Conventional",
733 "Unusable",
734 "ACPI Reclaim",
735 "ACPI Mem NVS",
736 "MMIO",
737 "MMIO Port",
738 "PAL Code",
739 "Persistent",
740};
741
742char * __init efi_md_typeattr_format(char *buf, size_t size,
743 const efi_memory_desc_t *md)
744{
745 char *pos;
746 int type_len;
747 u64 attr;
748
749 pos = buf;
750 if (md->type >= ARRAY_SIZE(memory_type_name))
751 type_len = snprintf(pos, size, "[type=%u", md->type);
752 else
753 type_len = snprintf(pos, size, "[%-*s",
754 (int)(sizeof(memory_type_name[0]) - 1),
755 memory_type_name[md->type]);
756 if (type_len >= size)
757 return buf;
758
759 pos += type_len;
760 size -= type_len;
761
762 attr = md->attribute;
763 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
764 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
765 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
766 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
767 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
768 snprintf(pos, size, "|attr=0x%016llx]",
769 (unsigned long long)attr);
770 else
771 snprintf(pos, size,
772 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
773 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
774 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
775 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
776 attr & EFI_MEMORY_SP ? "SP" : "",
777 attr & EFI_MEMORY_NV ? "NV" : "",
778 attr & EFI_MEMORY_XP ? "XP" : "",
779 attr & EFI_MEMORY_RP ? "RP" : "",
780 attr & EFI_MEMORY_WP ? "WP" : "",
781 attr & EFI_MEMORY_RO ? "RO" : "",
782 attr & EFI_MEMORY_UCE ? "UCE" : "",
783 attr & EFI_MEMORY_WB ? "WB" : "",
784 attr & EFI_MEMORY_WT ? "WT" : "",
785 attr & EFI_MEMORY_WC ? "WC" : "",
786 attr & EFI_MEMORY_UC ? "UC" : "");
787 return buf;
788}
789
790/*
791 * IA64 has a funky EFI memory map that doesn't work the same way as
792 * other architectures.
793 */
794#ifndef CONFIG_IA64
795/*
796 * efi_mem_attributes - lookup memmap attributes for physical address
797 * @phys_addr: the physical address to lookup
798 *
799 * Search in the EFI memory map for the region covering
800 * @phys_addr. Returns the EFI memory attributes if the region
801 * was found in the memory map, 0 otherwise.
802 */
803u64 efi_mem_attributes(unsigned long phys_addr)
804{
805 efi_memory_desc_t *md;
806
807 if (!efi_enabled(EFI_MEMMAP))
808 return 0;
809
810 for_each_efi_memory_desc(md) {
811 if ((md->phys_addr <= phys_addr) &&
812 (phys_addr < (md->phys_addr +
813 (md->num_pages << EFI_PAGE_SHIFT))))
814 return md->attribute;
815 }
816 return 0;
817}
818
819/*
820 * efi_mem_type - lookup memmap type for physical address
821 * @phys_addr: the physical address to lookup
822 *
823 * Search in the EFI memory map for the region covering @phys_addr.
824 * Returns the EFI memory type if the region was found in the memory
825 * map, -EINVAL otherwise.
826 */
827int efi_mem_type(unsigned long phys_addr)
828{
829 const efi_memory_desc_t *md;
830
831 if (!efi_enabled(EFI_MEMMAP))
832 return -ENOTSUPP;
833
834 for_each_efi_memory_desc(md) {
835 if ((md->phys_addr <= phys_addr) &&
836 (phys_addr < (md->phys_addr +
837 (md->num_pages << EFI_PAGE_SHIFT))))
838 return md->type;
839 }
840 return -EINVAL;
841}
842#endif
843
844int efi_status_to_err(efi_status_t status)
845{
846 int err;
847
848 switch (status) {
849 case EFI_SUCCESS:
850 err = 0;
851 break;
852 case EFI_INVALID_PARAMETER:
853 err = -EINVAL;
854 break;
855 case EFI_OUT_OF_RESOURCES:
856 err = -ENOSPC;
857 break;
858 case EFI_DEVICE_ERROR:
859 err = -EIO;
860 break;
861 case EFI_WRITE_PROTECTED:
862 err = -EROFS;
863 break;
864 case EFI_SECURITY_VIOLATION:
865 err = -EACCES;
866 break;
867 case EFI_NOT_FOUND:
868 err = -ENOENT;
869 break;
870 case EFI_ABORTED:
871 err = -EINTR;
872 break;
873 default:
874 err = -EINVAL;
875 }
876
877 return err;
878}
879
880static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
881static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
882
883static int __init efi_memreserve_map_root(void)
884{
885 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
886 return -ENODEV;
887
888 efi_memreserve_root = memremap(mem_reserve,
889 sizeof(*efi_memreserve_root),
890 MEMREMAP_WB);
891 if (WARN_ON_ONCE(!efi_memreserve_root))
892 return -ENOMEM;
893 return 0;
894}
895
896static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
897{
898 struct resource *res, *parent;
899 int ret;
900
901 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
902 if (!res)
903 return -ENOMEM;
904
905 res->name = "reserved";
906 res->flags = IORESOURCE_MEM;
907 res->start = addr;
908 res->end = addr + size - 1;
909
910 /* we expect a conflict with a 'System RAM' region */
911 parent = request_resource_conflict(&iomem_resource, res);
912 ret = parent ? request_resource(parent, res) : 0;
913
914 /*
915 * Given that efi_mem_reserve_iomem() can be called at any
916 * time, only call memblock_reserve() if the architecture
917 * keeps the infrastructure around.
918 */
919 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
920 memblock_reserve(addr, size);
921
922 return ret;
923}
924
925int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
926{
927 struct linux_efi_memreserve *rsv;
928 unsigned long prsv;
929 int rc, index;
930
931 if (efi_memreserve_root == (void *)ULONG_MAX)
932 return -ENODEV;
933
934 if (!efi_memreserve_root) {
935 rc = efi_memreserve_map_root();
936 if (rc)
937 return rc;
938 }
939
940 /* first try to find a slot in an existing linked list entry */
941 for (prsv = efi_memreserve_root->next; prsv; ) {
942 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
943 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
944 if (index < rsv->size) {
945 rsv->entry[index].base = addr;
946 rsv->entry[index].size = size;
947
948 memunmap(rsv);
949 return efi_mem_reserve_iomem(addr, size);
950 }
951 prsv = rsv->next;
952 memunmap(rsv);
953 }
954
955 /* no slot found - allocate a new linked list entry */
956 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
957 if (!rsv)
958 return -ENOMEM;
959
960 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
961 if (rc) {
962 free_page((unsigned long)rsv);
963 return rc;
964 }
965
966 /*
967 * The memremap() call above assumes that a linux_efi_memreserve entry
968 * never crosses a page boundary, so let's ensure that this remains true
969 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
970 * using SZ_4K explicitly in the size calculation below.
971 */
972 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
973 atomic_set(&rsv->count, 1);
974 rsv->entry[0].base = addr;
975 rsv->entry[0].size = size;
976
977 spin_lock(&efi_mem_reserve_persistent_lock);
978 rsv->next = efi_memreserve_root->next;
979 efi_memreserve_root->next = __pa(rsv);
980 spin_unlock(&efi_mem_reserve_persistent_lock);
981
982 return efi_mem_reserve_iomem(addr, size);
983}
984
985static int __init efi_memreserve_root_init(void)
986{
987 if (efi_memreserve_root)
988 return 0;
989 if (efi_memreserve_map_root())
990 efi_memreserve_root = (void *)ULONG_MAX;
991 return 0;
992}
993early_initcall(efi_memreserve_root_init);
994
995#ifdef CONFIG_KEXEC
996static int update_efi_random_seed(struct notifier_block *nb,
997 unsigned long code, void *unused)
998{
999 struct linux_efi_random_seed *seed;
1000 u32 size = 0;
1001
1002 if (!kexec_in_progress)
1003 return NOTIFY_DONE;
1004
1005 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1006 if (seed != NULL) {
1007 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1008 memunmap(seed);
1009 } else {
1010 pr_err("Could not map UEFI random seed!\n");
1011 }
1012 if (size > 0) {
1013 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1014 MEMREMAP_WB);
1015 if (seed != NULL) {
1016 seed->size = size;
1017 get_random_bytes(seed->bits, seed->size);
1018 memunmap(seed);
1019 } else {
1020 pr_err("Could not map UEFI random seed!\n");
1021 }
1022 }
1023 return NOTIFY_DONE;
1024}
1025
1026static struct notifier_block efi_random_seed_nb = {
1027 .notifier_call = update_efi_random_seed,
1028};
1029
1030static int __init register_update_efi_random_seed(void)
1031{
1032 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1033 return 0;
1034 return register_reboot_notifier(&efi_random_seed_nb);
1035}
1036late_initcall(register_update_efi_random_seed);
1037#endif