Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * efi.c - EFI subsystem
   4 *
   5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
   6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
   7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
   8 *
   9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
  10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
  11 * The existance of /sys/firmware/efi may also be used by userspace to
  12 * determine that the system supports EFI.
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/kobject.h>
  18#include <linux/module.h>
  19#include <linux/init.h>
  20#include <linux/debugfs.h>
  21#include <linux/device.h>
  22#include <linux/efi.h>
  23#include <linux/of.h>
 
  24#include <linux/io.h>
  25#include <linux/kexec.h>
  26#include <linux/platform_device.h>
  27#include <linux/random.h>
  28#include <linux/reboot.h>
  29#include <linux/slab.h>
  30#include <linux/acpi.h>
  31#include <linux/ucs2_string.h>
  32#include <linux/memblock.h>
  33#include <linux/security.h>
 
  34
  35#include <asm/early_ioremap.h>
  36
  37struct efi __read_mostly efi = {
  38	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
  39	.acpi			= EFI_INVALID_TABLE_ADDR,
  40	.acpi20			= EFI_INVALID_TABLE_ADDR,
  41	.smbios			= EFI_INVALID_TABLE_ADDR,
  42	.smbios3		= EFI_INVALID_TABLE_ADDR,
  43	.esrt			= EFI_INVALID_TABLE_ADDR,
  44	.tpm_log		= EFI_INVALID_TABLE_ADDR,
  45	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
 
 
 
 
 
 
 
 
 
  46};
  47EXPORT_SYMBOL(efi);
  48
  49unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
  50static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
  51static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
 
 
 
  52
  53struct mm_struct efi_mm = {
  54	.mm_rb			= RB_ROOT,
  55	.mm_users		= ATOMIC_INIT(2),
  56	.mm_count		= ATOMIC_INIT(1),
 
  57	MMAP_LOCK_INITIALIZER(efi_mm)
  58	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
  59	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
  60	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
  61};
  62
  63struct workqueue_struct *efi_rts_wq;
  64
  65static bool disable_runtime;
  66static int __init setup_noefi(char *arg)
  67{
  68	disable_runtime = true;
  69	return 0;
  70}
  71early_param("noefi", setup_noefi);
  72
  73bool efi_runtime_disabled(void)
  74{
  75	return disable_runtime;
  76}
  77
  78bool __pure __efi_soft_reserve_enabled(void)
  79{
  80	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
  81}
  82
  83static int __init parse_efi_cmdline(char *str)
  84{
  85	if (!str) {
  86		pr_warn("need at least one option\n");
  87		return -EINVAL;
  88	}
  89
  90	if (parse_option_str(str, "debug"))
  91		set_bit(EFI_DBG, &efi.flags);
  92
  93	if (parse_option_str(str, "noruntime"))
  94		disable_runtime = true;
  95
 
 
 
  96	if (parse_option_str(str, "nosoftreserve"))
  97		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
  98
  99	return 0;
 100}
 101early_param("efi", parse_efi_cmdline);
 102
 103struct kobject *efi_kobj;
 104
 105/*
 106 * Let's not leave out systab information that snuck into
 107 * the efivars driver
 108 * Note, do not add more fields in systab sysfs file as it breaks sysfs
 109 * one value per file rule!
 110 */
 111static ssize_t systab_show(struct kobject *kobj,
 112			   struct kobj_attribute *attr, char *buf)
 113{
 114	char *str = buf;
 115
 116	if (!kobj || !buf)
 117		return -EINVAL;
 118
 119	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
 120		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
 121	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
 122		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
 123	/*
 124	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
 125	 * SMBIOS3 entry point shall be preferred, so we list it first to
 126	 * let applications stop parsing after the first match.
 127	 */
 128	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
 129		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
 130	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
 131		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
 132
 133	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
 134		str = efi_systab_show_arch(str);
 135
 136	return str - buf;
 137}
 138
 139static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
 140
 141static ssize_t fw_platform_size_show(struct kobject *kobj,
 142				     struct kobj_attribute *attr, char *buf)
 143{
 144	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
 145}
 146
 147extern __weak struct kobj_attribute efi_attr_fw_vendor;
 148extern __weak struct kobj_attribute efi_attr_runtime;
 149extern __weak struct kobj_attribute efi_attr_config_table;
 150static struct kobj_attribute efi_attr_fw_platform_size =
 151	__ATTR_RO(fw_platform_size);
 152
 153static struct attribute *efi_subsys_attrs[] = {
 154	&efi_attr_systab.attr,
 155	&efi_attr_fw_platform_size.attr,
 156	&efi_attr_fw_vendor.attr,
 157	&efi_attr_runtime.attr,
 158	&efi_attr_config_table.attr,
 159	NULL,
 160};
 161
 162umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
 163				   int n)
 164{
 165	return attr->mode;
 166}
 167
 168static const struct attribute_group efi_subsys_attr_group = {
 169	.attrs = efi_subsys_attrs,
 170	.is_visible = efi_attr_is_visible,
 171};
 172
 
 
 
 173static struct efivars generic_efivars;
 174static struct efivar_operations generic_ops;
 175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 176static int generic_ops_register(void)
 177{
 
 
 
 178	generic_ops.get_variable = efi.get_variable;
 179	generic_ops.get_next_variable = efi.get_next_variable;
 180	generic_ops.query_variable_store = efi_query_variable_store;
 
 181
 182	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
 183		generic_ops.set_variable = efi.set_variable;
 184		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
 185	}
 186	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
 187}
 188
 189static void generic_ops_unregister(void)
 190{
 
 
 
 191	efivars_unregister(&generic_efivars);
 192}
 193
 
 
 
 
 
 
 
 
 
 
 
 
 194#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
 195#define EFIVAR_SSDT_NAME_MAX	16
 196static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
 197static int __init efivar_ssdt_setup(char *str)
 198{
 199	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
 200
 201	if (ret)
 202		return ret;
 203
 204	if (strlen(str) < sizeof(efivar_ssdt))
 205		memcpy(efivar_ssdt, str, strlen(str));
 206	else
 207		pr_warn("efivar_ssdt: name too long: %s\n", str);
 208	return 0;
 209}
 210__setup("efivar_ssdt=", efivar_ssdt_setup);
 211
 212static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
 213				   unsigned long name_size, void *data)
 214{
 215	struct efivar_entry *entry;
 216	struct list_head *list = data;
 217	char utf8_name[EFIVAR_SSDT_NAME_MAX];
 218	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
 219
 220	ucs2_as_utf8(utf8_name, name, limit - 1);
 221	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
 222		return 0;
 223
 224	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 225	if (!entry)
 226		return 0;
 227
 228	memcpy(entry->var.VariableName, name, name_size);
 229	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
 230
 231	efivar_entry_add(entry, list);
 232
 233	return 0;
 234}
 235
 236static __init int efivar_ssdt_load(void)
 237{
 238	LIST_HEAD(entries);
 239	struct efivar_entry *entry, *aux;
 240	unsigned long size;
 241	void *data;
 242	int ret;
 243
 244	if (!efivar_ssdt[0])
 245		return 0;
 246
 247	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248
 249	list_for_each_entry_safe(entry, aux, &entries, list) {
 250		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
 251			&entry->var.VendorGuid);
 
 252
 253		list_del(&entry->list);
 254
 255		ret = efivar_entry_size(entry, &size);
 256		if (ret) {
 257			pr_err("failed to get var size\n");
 258			goto free_entry;
 259		}
 260
 261		data = kmalloc(size, GFP_KERNEL);
 262		if (!data) {
 263			ret = -ENOMEM;
 264			goto free_entry;
 265		}
 266
 267		ret = efivar_entry_get(entry, NULL, &size, data);
 268		if (ret) {
 269			pr_err("failed to get var data\n");
 270			goto free_data;
 271		}
 272
 273		ret = acpi_load_table(data, NULL);
 274		if (ret) {
 275			pr_err("failed to load table: %d\n", ret);
 276			goto free_data;
 
 
 
 
 
 
 
 
 
 
 
 277		}
 278
 279		goto free_entry;
 280
 281free_data:
 282		kfree(data);
 283
 284free_entry:
 285		kfree(entry);
 286	}
 287
 
 288	return ret;
 289}
 290#else
 291static inline int efivar_ssdt_load(void) { return 0; }
 292#endif
 293
 294#ifdef CONFIG_DEBUG_FS
 295
 296#define EFI_DEBUGFS_MAX_BLOBS 32
 297
 298static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
 299
 300static void __init efi_debugfs_init(void)
 301{
 302	struct dentry *efi_debugfs;
 303	efi_memory_desc_t *md;
 304	char name[32];
 305	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
 306	int i = 0;
 307
 308	efi_debugfs = debugfs_create_dir("efi", NULL);
 309	if (IS_ERR_OR_NULL(efi_debugfs))
 310		return;
 311
 312	for_each_efi_memory_desc(md) {
 313		switch (md->type) {
 314		case EFI_BOOT_SERVICES_CODE:
 315			snprintf(name, sizeof(name), "boot_services_code%d",
 316				 type_count[md->type]++);
 317			break;
 318		case EFI_BOOT_SERVICES_DATA:
 319			snprintf(name, sizeof(name), "boot_services_data%d",
 320				 type_count[md->type]++);
 321			break;
 322		default:
 323			continue;
 324		}
 325
 326		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
 327			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
 328				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
 329			break;
 330		}
 331
 332		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
 333		debugfs_blob[i].data = memremap(md->phys_addr,
 334						debugfs_blob[i].size,
 335						MEMREMAP_WB);
 336		if (!debugfs_blob[i].data)
 337			continue;
 338
 339		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
 340		i++;
 341	}
 342}
 343#else
 344static inline void efi_debugfs_init(void) {}
 345#endif
 346
 347/*
 348 * We register the efi subsystem with the firmware subsystem and the
 349 * efivars subsystem with the efi subsystem, if the system was booted with
 350 * EFI.
 351 */
 352static int __init efisubsys_init(void)
 353{
 354	int error;
 355
 356	if (!efi_enabled(EFI_RUNTIME_SERVICES))
 357		efi.runtime_supported_mask = 0;
 358
 359	if (!efi_enabled(EFI_BOOT))
 360		return 0;
 361
 362	if (efi.runtime_supported_mask) {
 363		/*
 364		 * Since we process only one efi_runtime_service() at a time, an
 365		 * ordered workqueue (which creates only one execution context)
 366		 * should suffice for all our needs.
 367		 */
 368		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
 369		if (!efi_rts_wq) {
 370			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
 371			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 372			efi.runtime_supported_mask = 0;
 373			return 0;
 374		}
 375	}
 376
 377	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
 378		platform_device_register_simple("rtc-efi", 0, NULL, 0);
 379
 380	/* We register the efi directory at /sys/firmware/efi */
 381	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
 382	if (!efi_kobj) {
 383		pr_err("efi: Firmware registration failed.\n");
 384		destroy_workqueue(efi_rts_wq);
 385		return -ENOMEM;
 386	}
 387
 388	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
 389				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
 390		efivar_ssdt_load();
 391		error = generic_ops_register();
 392		if (error)
 393			goto err_put;
 
 
 
 394		platform_device_register_simple("efivars", 0, NULL, 0);
 395	}
 396
 
 
 397	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
 398	if (error) {
 399		pr_err("efi: Sysfs attribute export failed with error %d.\n",
 400		       error);
 401		goto err_unregister;
 402	}
 403
 404	error = efi_runtime_map_init(efi_kobj);
 405	if (error)
 406		goto err_remove_group;
 407
 408	/* and the standard mountpoint for efivarfs */
 409	error = sysfs_create_mount_point(efi_kobj, "efivars");
 410	if (error) {
 411		pr_err("efivars: Subsystem registration failed.\n");
 412		goto err_remove_group;
 413	}
 414
 415	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
 416		efi_debugfs_init();
 417
 
 
 
 
 
 418	return 0;
 419
 420err_remove_group:
 421	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
 422err_unregister:
 423	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
 424				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
 425		generic_ops_unregister();
 426err_put:
 427	kobject_put(efi_kobj);
 428	destroy_workqueue(efi_rts_wq);
 
 
 
 
 429	return error;
 430}
 431
 432subsys_initcall(efisubsys_init);
 433
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 434/*
 435 * Find the efi memory descriptor for a given physical address.  Given a
 436 * physical address, determine if it exists within an EFI Memory Map entry,
 437 * and if so, populate the supplied memory descriptor with the appropriate
 438 * data.
 439 */
 440int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 441{
 442	efi_memory_desc_t *md;
 443
 444	if (!efi_enabled(EFI_MEMMAP)) {
 445		pr_err_once("EFI_MEMMAP is not enabled.\n");
 446		return -EINVAL;
 447	}
 448
 449	if (!out_md) {
 450		pr_err_once("out_md is null.\n");
 451		return -EINVAL;
 452        }
 453
 454	for_each_efi_memory_desc(md) {
 455		u64 size;
 456		u64 end;
 457
 
 
 
 
 
 
 458		size = md->num_pages << EFI_PAGE_SHIFT;
 459		end = md->phys_addr + size;
 460		if (phys_addr >= md->phys_addr && phys_addr < end) {
 461			memcpy(out_md, md, sizeof(*out_md));
 462			return 0;
 463		}
 464	}
 465	return -ENOENT;
 466}
 467
 
 
 
 468/*
 469 * Calculate the highest address of an efi memory descriptor.
 470 */
 471u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
 472{
 473	u64 size = md->num_pages << EFI_PAGE_SHIFT;
 474	u64 end = md->phys_addr + size;
 475	return end;
 476}
 477
 478void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
 479
 480/**
 481 * efi_mem_reserve - Reserve an EFI memory region
 482 * @addr: Physical address to reserve
 483 * @size: Size of reservation
 484 *
 485 * Mark a region as reserved from general kernel allocation and
 486 * prevent it being released by efi_free_boot_services().
 487 *
 488 * This function should be called drivers once they've parsed EFI
 489 * configuration tables to figure out where their data lives, e.g.
 490 * efi_esrt_init().
 491 */
 492void __init efi_mem_reserve(phys_addr_t addr, u64 size)
 493{
 
 
 
 
 494	if (!memblock_is_region_reserved(addr, size))
 495		memblock_reserve(addr, size);
 496
 497	/*
 498	 * Some architectures (x86) reserve all boot services ranges
 499	 * until efi_free_boot_services() because of buggy firmware
 500	 * implementations. This means the above memblock_reserve() is
 501	 * superfluous on x86 and instead what it needs to do is
 502	 * ensure the @start, @size is not freed.
 503	 */
 504	efi_arch_mem_reserve(addr, size);
 505}
 506
 507static const efi_config_table_type_t common_tables[] __initconst = {
 508	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
 509	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
 510	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
 511	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
 512	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
 513	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
 514	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
 515	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
 516	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
 
 517	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
 
 518	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
 519#ifdef CONFIG_EFI_RCI2_TABLE
 520	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
 521#endif
 
 
 
 
 
 
 
 
 
 
 
 
 522	{},
 523};
 524
 525static __init int match_config_table(const efi_guid_t *guid,
 526				     unsigned long table,
 527				     const efi_config_table_type_t *table_types)
 528{
 529	int i;
 530
 531	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
 532		if (!efi_guidcmp(*guid, table_types[i].guid)) {
 533			*(table_types[i].ptr) = table;
 
 
 534			if (table_types[i].name[0])
 535				pr_cont("%s=0x%lx ",
 536					table_types[i].name, table);
 537			return 1;
 538		}
 
 
 
 
 
 539	}
 540
 541	return 0;
 542}
 543
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
 545				   int count,
 546				   const efi_config_table_type_t *arch_tables)
 547{
 548	const efi_config_table_64_t *tbl64 = (void *)config_tables;
 549	const efi_config_table_32_t *tbl32 = (void *)config_tables;
 550	const efi_guid_t *guid;
 551	unsigned long table;
 552	int i;
 553
 554	pr_info("");
 555	for (i = 0; i < count; i++) {
 556		if (!IS_ENABLED(CONFIG_X86)) {
 557			guid = &config_tables[i].guid;
 558			table = (unsigned long)config_tables[i].table;
 559		} else if (efi_enabled(EFI_64BIT)) {
 560			guid = &tbl64[i].guid;
 561			table = tbl64[i].table;
 562
 563			if (IS_ENABLED(CONFIG_X86_32) &&
 564			    tbl64[i].table > U32_MAX) {
 565				pr_cont("\n");
 566				pr_err("Table located above 4GB, disabling EFI.\n");
 567				return -EINVAL;
 568			}
 569		} else {
 570			guid = &tbl32[i].guid;
 571			table = tbl32[i].table;
 572		}
 573
 574		if (!match_config_table(guid, table, common_tables) && arch_tables)
 575			match_config_table(guid, table, arch_tables);
 576	}
 577	pr_cont("\n");
 578	set_bit(EFI_CONFIG_TABLES, &efi.flags);
 579
 580	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
 581		struct linux_efi_random_seed *seed;
 582		u32 size = 0;
 583
 584		seed = early_memremap(efi_rng_seed, sizeof(*seed));
 585		if (seed != NULL) {
 586			size = READ_ONCE(seed->size);
 587			early_memunmap(seed, sizeof(*seed));
 588		} else {
 589			pr_err("Could not map UEFI random seed!\n");
 590		}
 591		if (size > 0) {
 592			seed = early_memremap(efi_rng_seed,
 593					      sizeof(*seed) + size);
 594			if (seed != NULL) {
 595				pr_notice("seeding entropy pool\n");
 596				add_bootloader_randomness(seed->bits, size);
 
 597				early_memunmap(seed, sizeof(*seed) + size);
 598			} else {
 599				pr_err("Could not map UEFI random seed!\n");
 600			}
 601		}
 602	}
 603
 604	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
 605		efi_memattr_init();
 606
 607	efi_tpm_eventlog_init();
 608
 609	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
 610		unsigned long prsv = mem_reserve;
 611
 612		while (prsv) {
 613			struct linux_efi_memreserve *rsv;
 614			u8 *p;
 615
 616			/*
 617			 * Just map a full page: that is what we will get
 618			 * anyway, and it permits us to map the entire entry
 619			 * before knowing its size.
 620			 */
 621			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
 622					   PAGE_SIZE);
 623			if (p == NULL) {
 624				pr_err("Could not map UEFI memreserve entry!\n");
 625				return -ENOMEM;
 626			}
 627
 628			rsv = (void *)(p + prsv % PAGE_SIZE);
 629
 630			/* reserve the entry itself */
 631			memblock_reserve(prsv,
 632					 struct_size(rsv, entry, rsv->size));
 633
 634			for (i = 0; i < atomic_read(&rsv->count); i++) {
 635				memblock_reserve(rsv->entry[i].base,
 636						 rsv->entry[i].size);
 637			}
 638
 639			prsv = rsv->next;
 640			early_memunmap(p, PAGE_SIZE);
 641		}
 642	}
 643
 644	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
 645		efi_rt_properties_table_t *tbl;
 646
 647		tbl = early_memremap(rt_prop, sizeof(*tbl));
 648		if (tbl) {
 649			efi.runtime_supported_mask &= tbl->runtime_services_supported;
 650			early_memunmap(tbl, sizeof(*tbl));
 651		}
 652	}
 653
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654	return 0;
 655}
 656
 657int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
 658				   int min_major_version)
 659{
 660	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
 661		pr_err("System table signature incorrect!\n");
 662		return -EINVAL;
 663	}
 664
 665	if ((systab_hdr->revision >> 16) < min_major_version)
 666		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
 667		       systab_hdr->revision >> 16,
 668		       systab_hdr->revision & 0xffff,
 669		       min_major_version);
 670
 671	return 0;
 672}
 673
 674#ifndef CONFIG_IA64
 675static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
 676						size_t size)
 677{
 678	const efi_char16_t *ret;
 679
 680	ret = early_memremap_ro(fw_vendor, size);
 681	if (!ret)
 682		pr_err("Could not map the firmware vendor!\n");
 683	return ret;
 684}
 685
 686static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
 687{
 688	early_memunmap((void *)fw_vendor, size);
 689}
 690#else
 691#define map_fw_vendor(p, s)	__va(p)
 692#define unmap_fw_vendor(v, s)
 693#endif
 694
 695void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
 696				     unsigned long fw_vendor)
 697{
 698	char vendor[100] = "unknown";
 699	const efi_char16_t *c16;
 700	size_t i;
 
 701
 702	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
 703	if (c16) {
 704		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
 705			vendor[i] = c16[i];
 706		vendor[i] = '\0';
 707
 708		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
 709	}
 710
 711	pr_info("EFI v%u.%.02u by %s\n",
 712		systab_hdr->revision >> 16,
 713		systab_hdr->revision & 0xffff,
 714		vendor);
 
 
 
 
 
 
 
 
 
 
 
 715}
 716
 717static __initdata char memory_type_name[][20] = {
 718	"Reserved",
 719	"Loader Code",
 720	"Loader Data",
 721	"Boot Code",
 722	"Boot Data",
 723	"Runtime Code",
 724	"Runtime Data",
 725	"Conventional Memory",
 726	"Unusable Memory",
 727	"ACPI Reclaim Memory",
 728	"ACPI Memory NVS",
 729	"Memory Mapped I/O",
 730	"MMIO Port Space",
 731	"PAL Code",
 732	"Persistent Memory",
 
 733};
 734
 735char * __init efi_md_typeattr_format(char *buf, size_t size,
 736				     const efi_memory_desc_t *md)
 737{
 738	char *pos;
 739	int type_len;
 740	u64 attr;
 741
 742	pos = buf;
 743	if (md->type >= ARRAY_SIZE(memory_type_name))
 744		type_len = snprintf(pos, size, "[type=%u", md->type);
 745	else
 746		type_len = snprintf(pos, size, "[%-*s",
 747				    (int)(sizeof(memory_type_name[0]) - 1),
 748				    memory_type_name[md->type]);
 749	if (type_len >= size)
 750		return buf;
 751
 752	pos += type_len;
 753	size -= type_len;
 754
 755	attr = md->attribute;
 756	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
 757		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
 758		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
 759		     EFI_MEMORY_NV | EFI_MEMORY_SP |
 760		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
 
 761		snprintf(pos, size, "|attr=0x%016llx]",
 762			 (unsigned long long)attr);
 763	else
 764		snprintf(pos, size,
 765			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
 766			 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
 767			 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
 768			 attr & EFI_MEMORY_SP      ? "SP"  : "",
 769			 attr & EFI_MEMORY_NV      ? "NV"  : "",
 770			 attr & EFI_MEMORY_XP      ? "XP"  : "",
 771			 attr & EFI_MEMORY_RP      ? "RP"  : "",
 772			 attr & EFI_MEMORY_WP      ? "WP"  : "",
 773			 attr & EFI_MEMORY_RO      ? "RO"  : "",
 774			 attr & EFI_MEMORY_UCE     ? "UCE" : "",
 775			 attr & EFI_MEMORY_WB      ? "WB"  : "",
 776			 attr & EFI_MEMORY_WT      ? "WT"  : "",
 777			 attr & EFI_MEMORY_WC      ? "WC"  : "",
 778			 attr & EFI_MEMORY_UC      ? "UC"  : "");
 
 
 779	return buf;
 780}
 781
 782/*
 783 * IA64 has a funky EFI memory map that doesn't work the same way as
 784 * other architectures.
 785 */
 786#ifndef CONFIG_IA64
 787/*
 788 * efi_mem_attributes - lookup memmap attributes for physical address
 789 * @phys_addr: the physical address to lookup
 790 *
 791 * Search in the EFI memory map for the region covering
 792 * @phys_addr. Returns the EFI memory attributes if the region
 793 * was found in the memory map, 0 otherwise.
 794 */
 795u64 efi_mem_attributes(unsigned long phys_addr)
 796{
 797	efi_memory_desc_t *md;
 798
 799	if (!efi_enabled(EFI_MEMMAP))
 800		return 0;
 801
 802	for_each_efi_memory_desc(md) {
 803		if ((md->phys_addr <= phys_addr) &&
 804		    (phys_addr < (md->phys_addr +
 805		    (md->num_pages << EFI_PAGE_SHIFT))))
 806			return md->attribute;
 807	}
 808	return 0;
 809}
 810
 811/*
 812 * efi_mem_type - lookup memmap type for physical address
 813 * @phys_addr: the physical address to lookup
 814 *
 815 * Search in the EFI memory map for the region covering @phys_addr.
 816 * Returns the EFI memory type if the region was found in the memory
 817 * map, -EINVAL otherwise.
 818 */
 819int efi_mem_type(unsigned long phys_addr)
 820{
 821	const efi_memory_desc_t *md;
 822
 823	if (!efi_enabled(EFI_MEMMAP))
 824		return -ENOTSUPP;
 825
 826	for_each_efi_memory_desc(md) {
 827		if ((md->phys_addr <= phys_addr) &&
 828		    (phys_addr < (md->phys_addr +
 829				  (md->num_pages << EFI_PAGE_SHIFT))))
 830			return md->type;
 831	}
 832	return -EINVAL;
 833}
 834#endif
 835
 836int efi_status_to_err(efi_status_t status)
 837{
 838	int err;
 839
 840	switch (status) {
 841	case EFI_SUCCESS:
 842		err = 0;
 843		break;
 844	case EFI_INVALID_PARAMETER:
 845		err = -EINVAL;
 846		break;
 847	case EFI_OUT_OF_RESOURCES:
 848		err = -ENOSPC;
 849		break;
 850	case EFI_DEVICE_ERROR:
 851		err = -EIO;
 852		break;
 853	case EFI_WRITE_PROTECTED:
 854		err = -EROFS;
 855		break;
 856	case EFI_SECURITY_VIOLATION:
 857		err = -EACCES;
 858		break;
 859	case EFI_NOT_FOUND:
 860		err = -ENOENT;
 861		break;
 862	case EFI_ABORTED:
 863		err = -EINTR;
 864		break;
 865	default:
 866		err = -EINVAL;
 867	}
 868
 869	return err;
 870}
 
 871
 872static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
 873static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
 874
 875static int __init efi_memreserve_map_root(void)
 876{
 877	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
 878		return -ENODEV;
 879
 880	efi_memreserve_root = memremap(mem_reserve,
 881				       sizeof(*efi_memreserve_root),
 882				       MEMREMAP_WB);
 883	if (WARN_ON_ONCE(!efi_memreserve_root))
 884		return -ENOMEM;
 885	return 0;
 886}
 887
 888static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
 889{
 890	struct resource *res, *parent;
 
 891
 892	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
 893	if (!res)
 894		return -ENOMEM;
 895
 896	res->name	= "reserved";
 897	res->flags	= IORESOURCE_MEM;
 898	res->start	= addr;
 899	res->end	= addr + size - 1;
 900
 901	/* we expect a conflict with a 'System RAM' region */
 902	parent = request_resource_conflict(&iomem_resource, res);
 903	return parent ? request_resource(parent, res) : 0;
 
 
 
 
 
 
 
 
 
 
 904}
 905
 906int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
 907{
 908	struct linux_efi_memreserve *rsv;
 909	unsigned long prsv;
 910	int rc, index;
 911
 912	if (efi_memreserve_root == (void *)ULONG_MAX)
 913		return -ENODEV;
 914
 915	if (!efi_memreserve_root) {
 916		rc = efi_memreserve_map_root();
 917		if (rc)
 918			return rc;
 919	}
 920
 921	/* first try to find a slot in an existing linked list entry */
 922	for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
 923		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
 
 
 924		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
 925		if (index < rsv->size) {
 926			rsv->entry[index].base = addr;
 927			rsv->entry[index].size = size;
 928
 929			memunmap(rsv);
 930			return efi_mem_reserve_iomem(addr, size);
 931		}
 
 932		memunmap(rsv);
 933	}
 934
 935	/* no slot found - allocate a new linked list entry */
 936	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
 937	if (!rsv)
 938		return -ENOMEM;
 939
 940	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
 941	if (rc) {
 942		free_page((unsigned long)rsv);
 943		return rc;
 944	}
 945
 946	/*
 947	 * The memremap() call above assumes that a linux_efi_memreserve entry
 948	 * never crosses a page boundary, so let's ensure that this remains true
 949	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
 950	 * using SZ_4K explicitly in the size calculation below.
 951	 */
 952	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
 953	atomic_set(&rsv->count, 1);
 954	rsv->entry[0].base = addr;
 955	rsv->entry[0].size = size;
 956
 957	spin_lock(&efi_mem_reserve_persistent_lock);
 958	rsv->next = efi_memreserve_root->next;
 959	efi_memreserve_root->next = __pa(rsv);
 960	spin_unlock(&efi_mem_reserve_persistent_lock);
 961
 962	return efi_mem_reserve_iomem(addr, size);
 963}
 964
 965static int __init efi_memreserve_root_init(void)
 966{
 967	if (efi_memreserve_root)
 968		return 0;
 969	if (efi_memreserve_map_root())
 970		efi_memreserve_root = (void *)ULONG_MAX;
 971	return 0;
 972}
 973early_initcall(efi_memreserve_root_init);
 974
 975#ifdef CONFIG_KEXEC
 976static int update_efi_random_seed(struct notifier_block *nb,
 977				  unsigned long code, void *unused)
 978{
 979	struct linux_efi_random_seed *seed;
 980	u32 size = 0;
 981
 982	if (!kexec_in_progress)
 983		return NOTIFY_DONE;
 984
 985	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
 986	if (seed != NULL) {
 987		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
 988		memunmap(seed);
 989	} else {
 990		pr_err("Could not map UEFI random seed!\n");
 991	}
 992	if (size > 0) {
 993		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
 994				MEMREMAP_WB);
 995		if (seed != NULL) {
 996			seed->size = size;
 997			get_random_bytes(seed->bits, seed->size);
 998			memunmap(seed);
 999		} else {
1000			pr_err("Could not map UEFI random seed!\n");
1001		}
1002	}
1003	return NOTIFY_DONE;
1004}
1005
1006static struct notifier_block efi_random_seed_nb = {
1007	.notifier_call = update_efi_random_seed,
1008};
1009
1010static int __init register_update_efi_random_seed(void)
1011{
1012	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1013		return 0;
1014	return register_reboot_notifier(&efi_random_seed_nb);
1015}
1016late_initcall(register_update_efi_random_seed);
1017#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * efi.c - EFI subsystem
   4 *
   5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
   6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
   7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
   8 *
   9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
  10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
  11 * The existance of /sys/firmware/efi may also be used by userspace to
  12 * determine that the system supports EFI.
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/kobject.h>
  18#include <linux/module.h>
  19#include <linux/init.h>
  20#include <linux/debugfs.h>
  21#include <linux/device.h>
  22#include <linux/efi.h>
  23#include <linux/of.h>
  24#include <linux/initrd.h>
  25#include <linux/io.h>
  26#include <linux/kexec.h>
  27#include <linux/platform_device.h>
  28#include <linux/random.h>
  29#include <linux/reboot.h>
  30#include <linux/slab.h>
  31#include <linux/acpi.h>
  32#include <linux/ucs2_string.h>
  33#include <linux/memblock.h>
  34#include <linux/security.h>
  35#include <linux/notifier.h>
  36
  37#include <asm/early_ioremap.h>
  38
  39struct efi __read_mostly efi = {
  40	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
  41	.acpi			= EFI_INVALID_TABLE_ADDR,
  42	.acpi20			= EFI_INVALID_TABLE_ADDR,
  43	.smbios			= EFI_INVALID_TABLE_ADDR,
  44	.smbios3		= EFI_INVALID_TABLE_ADDR,
  45	.esrt			= EFI_INVALID_TABLE_ADDR,
  46	.tpm_log		= EFI_INVALID_TABLE_ADDR,
  47	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
  48#ifdef CONFIG_LOAD_UEFI_KEYS
  49	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
  50#endif
  51#ifdef CONFIG_EFI_COCO_SECRET
  52	.coco_secret		= EFI_INVALID_TABLE_ADDR,
  53#endif
  54#ifdef CONFIG_UNACCEPTED_MEMORY
  55	.unaccepted		= EFI_INVALID_TABLE_ADDR,
  56#endif
  57};
  58EXPORT_SYMBOL(efi);
  59
  60unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
  61static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
  62static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
  63static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
  64
  65extern unsigned long screen_info_table;
  66
  67struct mm_struct efi_mm = {
  68	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
  69	.mm_users		= ATOMIC_INIT(2),
  70	.mm_count		= ATOMIC_INIT(1),
  71	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
  72	MMAP_LOCK_INITIALIZER(efi_mm)
  73	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
  74	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
  75	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
  76};
  77
  78struct workqueue_struct *efi_rts_wq;
  79
  80static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
  81static int __init setup_noefi(char *arg)
  82{
  83	disable_runtime = true;
  84	return 0;
  85}
  86early_param("noefi", setup_noefi);
  87
  88bool efi_runtime_disabled(void)
  89{
  90	return disable_runtime;
  91}
  92
  93bool __pure __efi_soft_reserve_enabled(void)
  94{
  95	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
  96}
  97
  98static int __init parse_efi_cmdline(char *str)
  99{
 100	if (!str) {
 101		pr_warn("need at least one option\n");
 102		return -EINVAL;
 103	}
 104
 105	if (parse_option_str(str, "debug"))
 106		set_bit(EFI_DBG, &efi.flags);
 107
 108	if (parse_option_str(str, "noruntime"))
 109		disable_runtime = true;
 110
 111	if (parse_option_str(str, "runtime"))
 112		disable_runtime = false;
 113
 114	if (parse_option_str(str, "nosoftreserve"))
 115		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
 116
 117	return 0;
 118}
 119early_param("efi", parse_efi_cmdline);
 120
 121struct kobject *efi_kobj;
 122
 123/*
 124 * Let's not leave out systab information that snuck into
 125 * the efivars driver
 126 * Note, do not add more fields in systab sysfs file as it breaks sysfs
 127 * one value per file rule!
 128 */
 129static ssize_t systab_show(struct kobject *kobj,
 130			   struct kobj_attribute *attr, char *buf)
 131{
 132	char *str = buf;
 133
 134	if (!kobj || !buf)
 135		return -EINVAL;
 136
 137	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
 138		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
 139	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
 140		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
 141	/*
 142	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
 143	 * SMBIOS3 entry point shall be preferred, so we list it first to
 144	 * let applications stop parsing after the first match.
 145	 */
 146	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
 147		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
 148	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
 149		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
 150
 151	if (IS_ENABLED(CONFIG_X86))
 152		str = efi_systab_show_arch(str);
 153
 154	return str - buf;
 155}
 156
 157static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
 158
 159static ssize_t fw_platform_size_show(struct kobject *kobj,
 160				     struct kobj_attribute *attr, char *buf)
 161{
 162	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
 163}
 164
 165extern __weak struct kobj_attribute efi_attr_fw_vendor;
 166extern __weak struct kobj_attribute efi_attr_runtime;
 167extern __weak struct kobj_attribute efi_attr_config_table;
 168static struct kobj_attribute efi_attr_fw_platform_size =
 169	__ATTR_RO(fw_platform_size);
 170
 171static struct attribute *efi_subsys_attrs[] = {
 172	&efi_attr_systab.attr,
 173	&efi_attr_fw_platform_size.attr,
 174	&efi_attr_fw_vendor.attr,
 175	&efi_attr_runtime.attr,
 176	&efi_attr_config_table.attr,
 177	NULL,
 178};
 179
 180umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
 181				   int n)
 182{
 183	return attr->mode;
 184}
 185
 186static const struct attribute_group efi_subsys_attr_group = {
 187	.attrs = efi_subsys_attrs,
 188	.is_visible = efi_attr_is_visible,
 189};
 190
 191struct blocking_notifier_head efivar_ops_nh;
 192EXPORT_SYMBOL_GPL(efivar_ops_nh);
 193
 194static struct efivars generic_efivars;
 195static struct efivar_operations generic_ops;
 196
 197static bool generic_ops_supported(void)
 198{
 199	unsigned long name_size;
 200	efi_status_t status;
 201	efi_char16_t name;
 202	efi_guid_t guid;
 203
 204	name_size = sizeof(name);
 205
 206	if (!efi.get_next_variable)
 207		return false;
 208	status = efi.get_next_variable(&name_size, &name, &guid);
 209	if (status == EFI_UNSUPPORTED)
 210		return false;
 211
 212	return true;
 213}
 214
 215static int generic_ops_register(void)
 216{
 217	if (!generic_ops_supported())
 218		return 0;
 219
 220	generic_ops.get_variable = efi.get_variable;
 221	generic_ops.get_next_variable = efi.get_next_variable;
 222	generic_ops.query_variable_store = efi_query_variable_store;
 223	generic_ops.query_variable_info = efi.query_variable_info;
 224
 225	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
 226		generic_ops.set_variable = efi.set_variable;
 227		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
 228	}
 229	return efivars_register(&generic_efivars, &generic_ops);
 230}
 231
 232static void generic_ops_unregister(void)
 233{
 234	if (!generic_ops.get_variable)
 235		return;
 236
 237	efivars_unregister(&generic_efivars);
 238}
 239
 240void efivars_generic_ops_register(void)
 241{
 242	generic_ops_register();
 243}
 244EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
 245
 246void efivars_generic_ops_unregister(void)
 247{
 248	generic_ops_unregister();
 249}
 250EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
 251
 252#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
 253#define EFIVAR_SSDT_NAME_MAX	16UL
 254static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
 255static int __init efivar_ssdt_setup(char *str)
 256{
 257	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
 258
 259	if (ret)
 260		return ret;
 261
 262	if (strlen(str) < sizeof(efivar_ssdt))
 263		memcpy(efivar_ssdt, str, strlen(str));
 264	else
 265		pr_warn("efivar_ssdt: name too long: %s\n", str);
 266	return 1;
 267}
 268__setup("efivar_ssdt=", efivar_ssdt_setup);
 269
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 270static __init int efivar_ssdt_load(void)
 271{
 272	unsigned long name_size = 256;
 273	efi_char16_t *name = NULL;
 274	efi_status_t status;
 275	efi_guid_t guid;
 276	int ret = 0;
 277
 278	if (!efivar_ssdt[0])
 279		return 0;
 280
 281	name = kzalloc(name_size, GFP_KERNEL);
 282	if (!name)
 283		return -ENOMEM;
 284
 285	for (;;) {
 286		char utf8_name[EFIVAR_SSDT_NAME_MAX];
 287		unsigned long data_size = 0;
 288		void *data;
 289		int limit;
 290
 291		status = efi.get_next_variable(&name_size, name, &guid);
 292		if (status == EFI_NOT_FOUND) {
 293			break;
 294		} else if (status == EFI_BUFFER_TOO_SMALL) {
 295			efi_char16_t *name_tmp =
 296				krealloc(name, name_size, GFP_KERNEL);
 297			if (!name_tmp) {
 298				ret = -ENOMEM;
 299				goto out;
 300			}
 301			name = name_tmp;
 302			continue;
 303		}
 304
 305		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
 306		ucs2_as_utf8(utf8_name, name, limit - 1);
 307		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
 308			continue;
 309
 310		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
 311
 312		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
 313		if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
 314			ret = -EIO;
 315			goto out;
 316		}
 317
 318		data = kmalloc(data_size, GFP_KERNEL);
 319		if (!data) {
 320			ret = -ENOMEM;
 321			goto out;
 
 
 
 
 
 
 322		}
 323
 324		status = efi.get_variable(name, &guid, NULL, &data_size, data);
 325		if (status == EFI_SUCCESS) {
 326			acpi_status acpi_ret = acpi_load_table(data, NULL);
 327			if (ACPI_FAILURE(acpi_ret)) {
 328				pr_err("efivar_ssdt: failed to load table: %u\n",
 329				       acpi_ret);
 330			} else {
 331				/*
 332				 * The @data will be in use by ACPI engine,
 333				 * do not free it!
 334				 */
 335				continue;
 336			}
 337		} else {
 338			pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
 339		}
 
 
 
 
 340		kfree(data);
 
 
 
 341	}
 342out:
 343	kfree(name);
 344	return ret;
 345}
 346#else
 347static inline int efivar_ssdt_load(void) { return 0; }
 348#endif
 349
 350#ifdef CONFIG_DEBUG_FS
 351
 352#define EFI_DEBUGFS_MAX_BLOBS 32
 353
 354static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
 355
 356static void __init efi_debugfs_init(void)
 357{
 358	struct dentry *efi_debugfs;
 359	efi_memory_desc_t *md;
 360	char name[32];
 361	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
 362	int i = 0;
 363
 364	efi_debugfs = debugfs_create_dir("efi", NULL);
 365	if (IS_ERR(efi_debugfs))
 366		return;
 367
 368	for_each_efi_memory_desc(md) {
 369		switch (md->type) {
 370		case EFI_BOOT_SERVICES_CODE:
 371			snprintf(name, sizeof(name), "boot_services_code%d",
 372				 type_count[md->type]++);
 373			break;
 374		case EFI_BOOT_SERVICES_DATA:
 375			snprintf(name, sizeof(name), "boot_services_data%d",
 376				 type_count[md->type]++);
 377			break;
 378		default:
 379			continue;
 380		}
 381
 382		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
 383			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
 384				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
 385			break;
 386		}
 387
 388		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
 389		debugfs_blob[i].data = memremap(md->phys_addr,
 390						debugfs_blob[i].size,
 391						MEMREMAP_WB);
 392		if (!debugfs_blob[i].data)
 393			continue;
 394
 395		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
 396		i++;
 397	}
 398}
 399#else
 400static inline void efi_debugfs_init(void) {}
 401#endif
 402
 403/*
 404 * We register the efi subsystem with the firmware subsystem and the
 405 * efivars subsystem with the efi subsystem, if the system was booted with
 406 * EFI.
 407 */
 408static int __init efisubsys_init(void)
 409{
 410	int error;
 411
 412	if (!efi_enabled(EFI_RUNTIME_SERVICES))
 413		efi.runtime_supported_mask = 0;
 414
 415	if (!efi_enabled(EFI_BOOT))
 416		return 0;
 417
 418	if (efi.runtime_supported_mask) {
 419		/*
 420		 * Since we process only one efi_runtime_service() at a time, an
 421		 * ordered workqueue (which creates only one execution context)
 422		 * should suffice for all our needs.
 423		 */
 424		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
 425		if (!efi_rts_wq) {
 426			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
 427			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 428			efi.runtime_supported_mask = 0;
 429			return 0;
 430		}
 431	}
 432
 433	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
 434		platform_device_register_simple("rtc-efi", 0, NULL, 0);
 435
 436	/* We register the efi directory at /sys/firmware/efi */
 437	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
 438	if (!efi_kobj) {
 439		pr_err("efi: Firmware registration failed.\n");
 440		error = -ENOMEM;
 441		goto err_destroy_wq;
 442	}
 443
 444	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
 445				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
 
 446		error = generic_ops_register();
 447		if (error)
 448			goto err_put;
 449		error = efivar_ssdt_load();
 450		if (error)
 451			pr_err("efi: failed to load SSDT, error %d.\n", error);
 452		platform_device_register_simple("efivars", 0, NULL, 0);
 453	}
 454
 455	BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
 456
 457	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
 458	if (error) {
 459		pr_err("efi: Sysfs attribute export failed with error %d.\n",
 460		       error);
 461		goto err_unregister;
 462	}
 463
 
 
 
 
 464	/* and the standard mountpoint for efivarfs */
 465	error = sysfs_create_mount_point(efi_kobj, "efivars");
 466	if (error) {
 467		pr_err("efivars: Subsystem registration failed.\n");
 468		goto err_remove_group;
 469	}
 470
 471	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
 472		efi_debugfs_init();
 473
 474#ifdef CONFIG_EFI_COCO_SECRET
 475	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
 476		platform_device_register_simple("efi_secret", 0, NULL, 0);
 477#endif
 478
 479	return 0;
 480
 481err_remove_group:
 482	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
 483err_unregister:
 484	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
 485				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
 486		generic_ops_unregister();
 487err_put:
 488	kobject_put(efi_kobj);
 489	efi_kobj = NULL;
 490err_destroy_wq:
 491	if (efi_rts_wq)
 492		destroy_workqueue(efi_rts_wq);
 493
 494	return error;
 495}
 496
 497subsys_initcall(efisubsys_init);
 498
 499void __init efi_find_mirror(void)
 500{
 501	efi_memory_desc_t *md;
 502	u64 mirror_size = 0, total_size = 0;
 503
 504	if (!efi_enabled(EFI_MEMMAP))
 505		return;
 506
 507	for_each_efi_memory_desc(md) {
 508		unsigned long long start = md->phys_addr;
 509		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 510
 511		total_size += size;
 512		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
 513			memblock_mark_mirror(start, size);
 514			mirror_size += size;
 515		}
 516	}
 517	if (mirror_size)
 518		pr_info("Memory: %lldM/%lldM mirrored memory\n",
 519			mirror_size>>20, total_size>>20);
 520}
 521
 522/*
 523 * Find the efi memory descriptor for a given physical address.  Given a
 524 * physical address, determine if it exists within an EFI Memory Map entry,
 525 * and if so, populate the supplied memory descriptor with the appropriate
 526 * data.
 527 */
 528int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 529{
 530	efi_memory_desc_t *md;
 531
 532	if (!efi_enabled(EFI_MEMMAP)) {
 533		pr_err_once("EFI_MEMMAP is not enabled.\n");
 534		return -EINVAL;
 535	}
 536
 537	if (!out_md) {
 538		pr_err_once("out_md is null.\n");
 539		return -EINVAL;
 540        }
 541
 542	for_each_efi_memory_desc(md) {
 543		u64 size;
 544		u64 end;
 545
 546		/* skip bogus entries (including empty ones) */
 547		if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
 548		    (md->num_pages <= 0) ||
 549		    (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
 550			continue;
 551
 552		size = md->num_pages << EFI_PAGE_SHIFT;
 553		end = md->phys_addr + size;
 554		if (phys_addr >= md->phys_addr && phys_addr < end) {
 555			memcpy(out_md, md, sizeof(*out_md));
 556			return 0;
 557		}
 558	}
 559	return -ENOENT;
 560}
 561
 562extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 563	__weak __alias(__efi_mem_desc_lookup);
 564
 565/*
 566 * Calculate the highest address of an efi memory descriptor.
 567 */
 568u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
 569{
 570	u64 size = md->num_pages << EFI_PAGE_SHIFT;
 571	u64 end = md->phys_addr + size;
 572	return end;
 573}
 574
 575void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
 576
 577/**
 578 * efi_mem_reserve - Reserve an EFI memory region
 579 * @addr: Physical address to reserve
 580 * @size: Size of reservation
 581 *
 582 * Mark a region as reserved from general kernel allocation and
 583 * prevent it being released by efi_free_boot_services().
 584 *
 585 * This function should be called drivers once they've parsed EFI
 586 * configuration tables to figure out where their data lives, e.g.
 587 * efi_esrt_init().
 588 */
 589void __init efi_mem_reserve(phys_addr_t addr, u64 size)
 590{
 591	/* efi_mem_reserve() does not work under Xen */
 592	if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
 593		return;
 594
 595	if (!memblock_is_region_reserved(addr, size))
 596		memblock_reserve(addr, size);
 597
 598	/*
 599	 * Some architectures (x86) reserve all boot services ranges
 600	 * until efi_free_boot_services() because of buggy firmware
 601	 * implementations. This means the above memblock_reserve() is
 602	 * superfluous on x86 and instead what it needs to do is
 603	 * ensure the @start, @size is not freed.
 604	 */
 605	efi_arch_mem_reserve(addr, size);
 606}
 607
 608static const efi_config_table_type_t common_tables[] __initconst = {
 609	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
 610	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
 611	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
 612	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
 613	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
 614	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
 615	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
 616	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
 617	{EFI_TCG2_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"TPMFinalLog"	},
 618	{EFI_CC_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"CCFinalLog"	},
 619	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
 620	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
 621	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
 622#ifdef CONFIG_EFI_RCI2_TABLE
 623	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
 624#endif
 625#ifdef CONFIG_LOAD_UEFI_KEYS
 626	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
 627#endif
 628#ifdef CONFIG_EFI_COCO_SECRET
 629	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
 630#endif
 631#ifdef CONFIG_UNACCEPTED_MEMORY
 632	{LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID,	&efi.unaccepted,	"Unaccepted"	},
 633#endif
 634#ifdef CONFIG_EFI_GENERIC_STUB
 635	{LINUX_EFI_SCREEN_INFO_TABLE_GUID,	&screen_info_table			},
 636#endif
 637	{},
 638};
 639
 640static __init int match_config_table(const efi_guid_t *guid,
 641				     unsigned long table,
 642				     const efi_config_table_type_t *table_types)
 643{
 644	int i;
 645
 646	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
 647		if (efi_guidcmp(*guid, table_types[i].guid))
 648			continue;
 649
 650		if (!efi_config_table_is_usable(guid, table)) {
 651			if (table_types[i].name[0])
 652				pr_cont("(%s=0x%lx unusable) ",
 653					table_types[i].name, table);
 654			return 1;
 655		}
 656
 657		*(table_types[i].ptr) = table;
 658		if (table_types[i].name[0])
 659			pr_cont("%s=0x%lx ", table_types[i].name, table);
 660		return 1;
 661	}
 662
 663	return 0;
 664}
 665
 666/**
 667 * reserve_unaccepted - Map and reserve unaccepted configuration table
 668 * @unaccepted: Pointer to unaccepted memory table
 669 *
 670 * memblock_add() makes sure that the table is mapped in direct mapping. During
 671 * normal boot it happens automatically because the table is allocated from
 672 * usable memory. But during crashkernel boot only memory specifically reserved
 673 * for crash scenario is mapped. memblock_add() forces the table to be mapped
 674 * in crashkernel case.
 675 *
 676 * Align the range to the nearest page borders. Ranges smaller than page size
 677 * are not going to be mapped.
 678 *
 679 * memblock_reserve() makes sure that future allocations will not touch the
 680 * table.
 681 */
 682
 683static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
 684{
 685	phys_addr_t start, size;
 686
 687	start = PAGE_ALIGN_DOWN(efi.unaccepted);
 688	size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
 689
 690	memblock_add(start, size);
 691	memblock_reserve(start, size);
 692}
 693
 694int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
 695				   int count,
 696				   const efi_config_table_type_t *arch_tables)
 697{
 698	const efi_config_table_64_t *tbl64 = (void *)config_tables;
 699	const efi_config_table_32_t *tbl32 = (void *)config_tables;
 700	const efi_guid_t *guid;
 701	unsigned long table;
 702	int i;
 703
 704	pr_info("");
 705	for (i = 0; i < count; i++) {
 706		if (!IS_ENABLED(CONFIG_X86)) {
 707			guid = &config_tables[i].guid;
 708			table = (unsigned long)config_tables[i].table;
 709		} else if (efi_enabled(EFI_64BIT)) {
 710			guid = &tbl64[i].guid;
 711			table = tbl64[i].table;
 712
 713			if (IS_ENABLED(CONFIG_X86_32) &&
 714			    tbl64[i].table > U32_MAX) {
 715				pr_cont("\n");
 716				pr_err("Table located above 4GB, disabling EFI.\n");
 717				return -EINVAL;
 718			}
 719		} else {
 720			guid = &tbl32[i].guid;
 721			table = tbl32[i].table;
 722		}
 723
 724		if (!match_config_table(guid, table, common_tables) && arch_tables)
 725			match_config_table(guid, table, arch_tables);
 726	}
 727	pr_cont("\n");
 728	set_bit(EFI_CONFIG_TABLES, &efi.flags);
 729
 730	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
 731		struct linux_efi_random_seed *seed;
 732		u32 size = 0;
 733
 734		seed = early_memremap(efi_rng_seed, sizeof(*seed));
 735		if (seed != NULL) {
 736			size = min_t(u32, seed->size, SZ_1K); // sanity check
 737			early_memunmap(seed, sizeof(*seed));
 738		} else {
 739			pr_err("Could not map UEFI random seed!\n");
 740		}
 741		if (size > 0) {
 742			seed = early_memremap(efi_rng_seed,
 743					      sizeof(*seed) + size);
 744			if (seed != NULL) {
 
 745				add_bootloader_randomness(seed->bits, size);
 746				memzero_explicit(seed->bits, size);
 747				early_memunmap(seed, sizeof(*seed) + size);
 748			} else {
 749				pr_err("Could not map UEFI random seed!\n");
 750			}
 751		}
 752	}
 753
 754	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
 755		efi_memattr_init();
 756
 757	efi_tpm_eventlog_init();
 758
 759	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
 760		unsigned long prsv = mem_reserve;
 761
 762		while (prsv) {
 763			struct linux_efi_memreserve *rsv;
 764			u8 *p;
 765
 766			/*
 767			 * Just map a full page: that is what we will get
 768			 * anyway, and it permits us to map the entire entry
 769			 * before knowing its size.
 770			 */
 771			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
 772					   PAGE_SIZE);
 773			if (p == NULL) {
 774				pr_err("Could not map UEFI memreserve entry!\n");
 775				return -ENOMEM;
 776			}
 777
 778			rsv = (void *)(p + prsv % PAGE_SIZE);
 779
 780			/* reserve the entry itself */
 781			memblock_reserve(prsv,
 782					 struct_size(rsv, entry, rsv->size));
 783
 784			for (i = 0; i < atomic_read(&rsv->count); i++) {
 785				memblock_reserve(rsv->entry[i].base,
 786						 rsv->entry[i].size);
 787			}
 788
 789			prsv = rsv->next;
 790			early_memunmap(p, PAGE_SIZE);
 791		}
 792	}
 793
 794	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
 795		efi_rt_properties_table_t *tbl;
 796
 797		tbl = early_memremap(rt_prop, sizeof(*tbl));
 798		if (tbl) {
 799			efi.runtime_supported_mask &= tbl->runtime_services_supported;
 800			early_memunmap(tbl, sizeof(*tbl));
 801		}
 802	}
 803
 804	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
 805	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
 806		struct linux_efi_initrd *tbl;
 807
 808		tbl = early_memremap(initrd, sizeof(*tbl));
 809		if (tbl) {
 810			phys_initrd_start = tbl->base;
 811			phys_initrd_size = tbl->size;
 812			early_memunmap(tbl, sizeof(*tbl));
 813		}
 814	}
 815
 816	if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
 817	    efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
 818		struct efi_unaccepted_memory *unaccepted;
 819
 820		unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
 821		if (unaccepted) {
 822
 823			if (unaccepted->version == 1) {
 824				reserve_unaccepted(unaccepted);
 825			} else {
 826				efi.unaccepted = EFI_INVALID_TABLE_ADDR;
 827			}
 828
 829			early_memunmap(unaccepted, sizeof(*unaccepted));
 830		}
 831	}
 832
 833	return 0;
 834}
 835
 836int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
 
 837{
 838	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
 839		pr_err("System table signature incorrect!\n");
 840		return -EINVAL;
 841	}
 842
 
 
 
 
 
 
 843	return 0;
 844}
 845
 
 846static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
 847						size_t size)
 848{
 849	const efi_char16_t *ret;
 850
 851	ret = early_memremap_ro(fw_vendor, size);
 852	if (!ret)
 853		pr_err("Could not map the firmware vendor!\n");
 854	return ret;
 855}
 856
 857static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
 858{
 859	early_memunmap((void *)fw_vendor, size);
 860}
 
 
 
 
 861
 862void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
 863				     unsigned long fw_vendor)
 864{
 865	char vendor[100] = "unknown";
 866	const efi_char16_t *c16;
 867	size_t i;
 868	u16 rev;
 869
 870	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
 871	if (c16) {
 872		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
 873			vendor[i] = c16[i];
 874		vendor[i] = '\0';
 875
 876		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
 877	}
 878
 879	rev = (u16)systab_hdr->revision;
 880	pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
 881
 882	rev %= 10;
 883	if (rev)
 884		pr_cont(".%u", rev);
 885
 886	pr_cont(" by %s\n", vendor);
 887
 888	if (IS_ENABLED(CONFIG_X86_64) &&
 889	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
 890	    !strcmp(vendor, "Apple")) {
 891		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
 892		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
 893	}
 894}
 895
 896static __initdata char memory_type_name[][13] = {
 897	"Reserved",
 898	"Loader Code",
 899	"Loader Data",
 900	"Boot Code",
 901	"Boot Data",
 902	"Runtime Code",
 903	"Runtime Data",
 904	"Conventional",
 905	"Unusable",
 906	"ACPI Reclaim",
 907	"ACPI Mem NVS",
 908	"MMIO",
 909	"MMIO Port",
 910	"PAL Code",
 911	"Persistent",
 912	"Unaccepted",
 913};
 914
 915char * __init efi_md_typeattr_format(char *buf, size_t size,
 916				     const efi_memory_desc_t *md)
 917{
 918	char *pos;
 919	int type_len;
 920	u64 attr;
 921
 922	pos = buf;
 923	if (md->type >= ARRAY_SIZE(memory_type_name))
 924		type_len = snprintf(pos, size, "[type=%u", md->type);
 925	else
 926		type_len = snprintf(pos, size, "[%-*s",
 927				    (int)(sizeof(memory_type_name[0]) - 1),
 928				    memory_type_name[md->type]);
 929	if (type_len >= size)
 930		return buf;
 931
 932	pos += type_len;
 933	size -= type_len;
 934
 935	attr = md->attribute;
 936	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
 937		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
 938		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
 939		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
 940		     EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
 941		     EFI_MEMORY_RUNTIME))
 942		snprintf(pos, size, "|attr=0x%016llx]",
 943			 (unsigned long long)attr);
 944	else
 945		snprintf(pos, size,
 946			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
 947			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
 948			 attr & EFI_MEMORY_HOT_PLUGGABLE	? "HP"  : "",
 949			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
 950			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
 951			 attr & EFI_MEMORY_SP			? "SP"  : "",
 952			 attr & EFI_MEMORY_NV			? "NV"  : "",
 953			 attr & EFI_MEMORY_XP			? "XP"  : "",
 954			 attr & EFI_MEMORY_RP			? "RP"  : "",
 955			 attr & EFI_MEMORY_WP			? "WP"  : "",
 956			 attr & EFI_MEMORY_RO			? "RO"  : "",
 957			 attr & EFI_MEMORY_UCE			? "UCE" : "",
 958			 attr & EFI_MEMORY_WB			? "WB"  : "",
 959			 attr & EFI_MEMORY_WT			? "WT"  : "",
 960			 attr & EFI_MEMORY_WC			? "WC"  : "",
 961			 attr & EFI_MEMORY_UC			? "UC"  : "");
 962	return buf;
 963}
 964
 965/*
 
 
 
 
 
 966 * efi_mem_attributes - lookup memmap attributes for physical address
 967 * @phys_addr: the physical address to lookup
 968 *
 969 * Search in the EFI memory map for the region covering
 970 * @phys_addr. Returns the EFI memory attributes if the region
 971 * was found in the memory map, 0 otherwise.
 972 */
 973u64 efi_mem_attributes(unsigned long phys_addr)
 974{
 975	efi_memory_desc_t *md;
 976
 977	if (!efi_enabled(EFI_MEMMAP))
 978		return 0;
 979
 980	for_each_efi_memory_desc(md) {
 981		if ((md->phys_addr <= phys_addr) &&
 982		    (phys_addr < (md->phys_addr +
 983		    (md->num_pages << EFI_PAGE_SHIFT))))
 984			return md->attribute;
 985	}
 986	return 0;
 987}
 988
 989/*
 990 * efi_mem_type - lookup memmap type for physical address
 991 * @phys_addr: the physical address to lookup
 992 *
 993 * Search in the EFI memory map for the region covering @phys_addr.
 994 * Returns the EFI memory type if the region was found in the memory
 995 * map, -EINVAL otherwise.
 996 */
 997int efi_mem_type(unsigned long phys_addr)
 998{
 999	const efi_memory_desc_t *md;
1000
1001	if (!efi_enabled(EFI_MEMMAP))
1002		return -ENOTSUPP;
1003
1004	for_each_efi_memory_desc(md) {
1005		if ((md->phys_addr <= phys_addr) &&
1006		    (phys_addr < (md->phys_addr +
1007				  (md->num_pages << EFI_PAGE_SHIFT))))
1008			return md->type;
1009	}
1010	return -EINVAL;
1011}
 
1012
1013int efi_status_to_err(efi_status_t status)
1014{
1015	int err;
1016
1017	switch (status) {
1018	case EFI_SUCCESS:
1019		err = 0;
1020		break;
1021	case EFI_INVALID_PARAMETER:
1022		err = -EINVAL;
1023		break;
1024	case EFI_OUT_OF_RESOURCES:
1025		err = -ENOSPC;
1026		break;
1027	case EFI_DEVICE_ERROR:
1028		err = -EIO;
1029		break;
1030	case EFI_WRITE_PROTECTED:
1031		err = -EROFS;
1032		break;
1033	case EFI_SECURITY_VIOLATION:
1034		err = -EACCES;
1035		break;
1036	case EFI_NOT_FOUND:
1037		err = -ENOENT;
1038		break;
1039	case EFI_ABORTED:
1040		err = -EINTR;
1041		break;
1042	default:
1043		err = -EINVAL;
1044	}
1045
1046	return err;
1047}
1048EXPORT_SYMBOL_GPL(efi_status_to_err);
1049
1050static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1051static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1052
1053static int __init efi_memreserve_map_root(void)
1054{
1055	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1056		return -ENODEV;
1057
1058	efi_memreserve_root = memremap(mem_reserve,
1059				       sizeof(*efi_memreserve_root),
1060				       MEMREMAP_WB);
1061	if (WARN_ON_ONCE(!efi_memreserve_root))
1062		return -ENOMEM;
1063	return 0;
1064}
1065
1066static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1067{
1068	struct resource *res, *parent;
1069	int ret;
1070
1071	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1072	if (!res)
1073		return -ENOMEM;
1074
1075	res->name	= "reserved";
1076	res->flags	= IORESOURCE_MEM;
1077	res->start	= addr;
1078	res->end	= addr + size - 1;
1079
1080	/* we expect a conflict with a 'System RAM' region */
1081	parent = request_resource_conflict(&iomem_resource, res);
1082	ret = parent ? request_resource(parent, res) : 0;
1083
1084	/*
1085	 * Given that efi_mem_reserve_iomem() can be called at any
1086	 * time, only call memblock_reserve() if the architecture
1087	 * keeps the infrastructure around.
1088	 */
1089	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1090		memblock_reserve(addr, size);
1091
1092	return ret;
1093}
1094
1095int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1096{
1097	struct linux_efi_memreserve *rsv;
1098	unsigned long prsv;
1099	int rc, index;
1100
1101	if (efi_memreserve_root == (void *)ULONG_MAX)
1102		return -ENODEV;
1103
1104	if (!efi_memreserve_root) {
1105		rc = efi_memreserve_map_root();
1106		if (rc)
1107			return rc;
1108	}
1109
1110	/* first try to find a slot in an existing linked list entry */
1111	for (prsv = efi_memreserve_root->next; prsv; ) {
1112		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1113		if (!rsv)
1114			return -ENOMEM;
1115		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1116		if (index < rsv->size) {
1117			rsv->entry[index].base = addr;
1118			rsv->entry[index].size = size;
1119
1120			memunmap(rsv);
1121			return efi_mem_reserve_iomem(addr, size);
1122		}
1123		prsv = rsv->next;
1124		memunmap(rsv);
1125	}
1126
1127	/* no slot found - allocate a new linked list entry */
1128	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1129	if (!rsv)
1130		return -ENOMEM;
1131
1132	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1133	if (rc) {
1134		free_page((unsigned long)rsv);
1135		return rc;
1136	}
1137
1138	/*
1139	 * The memremap() call above assumes that a linux_efi_memreserve entry
1140	 * never crosses a page boundary, so let's ensure that this remains true
1141	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1142	 * using SZ_4K explicitly in the size calculation below.
1143	 */
1144	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1145	atomic_set(&rsv->count, 1);
1146	rsv->entry[0].base = addr;
1147	rsv->entry[0].size = size;
1148
1149	spin_lock(&efi_mem_reserve_persistent_lock);
1150	rsv->next = efi_memreserve_root->next;
1151	efi_memreserve_root->next = __pa(rsv);
1152	spin_unlock(&efi_mem_reserve_persistent_lock);
1153
1154	return efi_mem_reserve_iomem(addr, size);
1155}
1156
1157static int __init efi_memreserve_root_init(void)
1158{
1159	if (efi_memreserve_root)
1160		return 0;
1161	if (efi_memreserve_map_root())
1162		efi_memreserve_root = (void *)ULONG_MAX;
1163	return 0;
1164}
1165early_initcall(efi_memreserve_root_init);
1166
1167#ifdef CONFIG_KEXEC
1168static int update_efi_random_seed(struct notifier_block *nb,
1169				  unsigned long code, void *unused)
1170{
1171	struct linux_efi_random_seed *seed;
1172	u32 size = 0;
1173
1174	if (!kexec_in_progress)
1175		return NOTIFY_DONE;
1176
1177	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1178	if (seed != NULL) {
1179		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1180		memunmap(seed);
1181	} else {
1182		pr_err("Could not map UEFI random seed!\n");
1183	}
1184	if (size > 0) {
1185		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1186				MEMREMAP_WB);
1187		if (seed != NULL) {
1188			seed->size = size;
1189			get_random_bytes(seed->bits, seed->size);
1190			memunmap(seed);
1191		} else {
1192			pr_err("Could not map UEFI random seed!\n");
1193		}
1194	}
1195	return NOTIFY_DONE;
1196}
1197
1198static struct notifier_block efi_random_seed_nb = {
1199	.notifier_call = update_efi_random_seed,
1200};
1201
1202static int __init register_update_efi_random_seed(void)
1203{
1204	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1205		return 0;
1206	return register_reboot_notifier(&efi_random_seed_nb);
1207}
1208late_initcall(register_update_efi_random_seed);
1209#endif