Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_EFI_H
  3#define _ASM_X86_EFI_H
  4
  5#include <asm/fpu/api.h>
  6#include <asm/processor-flags.h>
  7#include <asm/tlb.h>
  8#include <asm/nospec-branch.h>
  9#include <asm/mmu_context.h>
 10#include <asm/ibt.h>
 11#include <linux/build_bug.h>
 12#include <linux/kernel.h>
 13#include <linux/pgtable.h>
 14
 15extern unsigned long efi_fw_vendor, efi_config_table;
 16extern unsigned long efi_mixed_mode_stack_pa;
 17
 18/*
 19 * We map the EFI regions needed for runtime services non-contiguously,
 20 * with preserved alignment on virtual addresses starting from -4G down
 21 * for a total max space of 64G. This way, we provide for stable runtime
 22 * services addresses across kernels so that a kexec'd kernel can still
 23 * use them.
 24 *
 25 * This is the main reason why we're doing stable VA mappings for RT
 26 * services.
 
 
 
 
 
 
 27 */
 
 28
 29#define EFI32_LOADER_SIGNATURE	"EL32"
 30#define EFI64_LOADER_SIGNATURE	"EL64"
 31
 32#define ARCH_EFI_IRQ_FLAGS_MASK	X86_EFLAGS_IF
 33
 34/*
 35 * The EFI services are called through variadic functions in many cases. These
 36 * functions are implemented in assembler and support only a fixed number of
 37 * arguments. The macros below allows us to check at build time that we don't
 38 * try to call them with too many arguments.
 39 *
 40 * __efi_nargs() will return the number of arguments if it is 7 or less, and
 41 * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it
 42 * impossible to calculate the exact number of arguments beyond some
 43 * pre-defined limit. The maximum number of arguments currently supported by
 44 * any of the thunks is 7, so this is good enough for now and can be extended
 45 * in the obvious way if we ever need more.
 46 */
 47
 48#define __efi_nargs(...) __efi_nargs_(__VA_ARGS__)
 49#define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__,	\
 50	__efi_arg_sentinel(9), __efi_arg_sentinel(8),		\
 51	__efi_arg_sentinel(7), __efi_arg_sentinel(6),		\
 52	__efi_arg_sentinel(5), __efi_arg_sentinel(4),		\
 53	__efi_arg_sentinel(3), __efi_arg_sentinel(2),		\
 54	__efi_arg_sentinel(1), __efi_arg_sentinel(0))
 55#define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, n, ...)	\
 56	__take_second_arg(n,					\
 57		({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 10; }))
 58#define __efi_arg_sentinel(n) , n
 59
 60/*
 61 * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis
 62 * represents more than n arguments.
 63 */
 64
 65#define __efi_nargs_check(f, n, ...)					\
 66	__efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n)
 67#define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n)
 68#define __efi_nargs_check__(f, p, n) ({					\
 69	BUILD_BUG_ON_MSG(						\
 70		(p) > (n),						\
 71		#f " called with too many arguments (" #p ">" #n ")");	\
 72})
 73
 74static inline void efi_fpu_begin(void)
 75{
 76	/*
 77	 * The UEFI calling convention (UEFI spec 2.3.2 and 2.3.4) requires
 78	 * that FCW and MXCSR (64-bit) must be initialized prior to calling
 79	 * UEFI code.  (Oddly the spec does not require that the FPU stack
 80	 * be empty.)
 81	 */
 82	kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
 83}
 84
 85static inline void efi_fpu_end(void)
 86{
 87	kernel_fpu_end();
 88}
 89
 90#ifdef CONFIG_X86_32
 91#define arch_efi_call_virt_setup()					\
 92({									\
 93	efi_fpu_begin();						\
 94	firmware_restrict_branch_speculation_start();			\
 
 
 
 
 95})
 96
 97#define arch_efi_call_virt_teardown()					\
 
 98({									\
 99	firmware_restrict_branch_speculation_end();			\
100	efi_fpu_end();							\
 
 
101})
102
 
 
103#else /* !CONFIG_X86_32 */
104
105#define EFI_LOADER_SIGNATURE	"EL64"
106
107extern asmlinkage u64 __efi_call(void *fp, ...);
108
109#define efi_call(...) ({						\
110	__efi_nargs_check(efi_call, 7, __VA_ARGS__);			\
111	__efi_call(__VA_ARGS__);					\
112})
113
114#define arch_efi_call_virt_setup()					\
 
 
 
 
 
 
 
 
 
 
 
115({									\
 
 
116	efi_sync_low_kernel_mappings();					\
117	efi_fpu_begin();						\
118	firmware_restrict_branch_speculation_start();			\
119	efi_enter_mm();							\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120})
121
122#undef arch_efi_call_virt
123#define arch_efi_call_virt(p, f, args...) ({				\
124	u64 ret, ibt = ibt_save();					\
125	ret = efi_call((void *)p->f, args);				\
126	ibt_restore(ibt);						\
127	ret;								\
128})
129
130#define arch_efi_call_virt_teardown()					\
131({									\
132	efi_leave_mm();							\
133	firmware_restrict_branch_speculation_end();			\
134	efi_fpu_end();							\
135})
136
137#ifdef CONFIG_KASAN
138/*
139 * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
140 * only in kernel binary.  Since the EFI stub linked into a separate binary it
141 * doesn't have __memset().  So we should use standard memset from
142 * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
143 */
144#undef memcpy
145#undef memset
146#undef memmove
147#endif
148
149#endif /* CONFIG_X86_32 */
150
 
 
151extern int __init efi_memblock_x86_reserve_range(void);
 
 
152extern void __init efi_print_memmap(void);
 
 
153extern void __init efi_map_region(efi_memory_desc_t *md);
154extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
155extern void efi_sync_low_kernel_mappings(void);
156extern int __init efi_alloc_page_tables(void);
157extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 
 
 
158extern void __init efi_runtime_update_mappings(void);
159extern void __init efi_dump_pagetable(void);
160extern void __init efi_apply_memmap_quirks(void);
161extern int __init efi_reuse_config(u64 tables, int nr_tables);
162extern void efi_delete_dummy_variable(void);
163extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr);
164extern void efi_free_boot_services(void);
165
166void efi_enter_mm(void);
167void efi_leave_mm(void);
168
169/* kexec external ABI */
170struct efi_setup_data {
171	u64 fw_vendor;
172	u64 __unused;
173	u64 tables;
174	u64 smbios;
175	u64 reserved[8];
176};
177
178extern u64 efi_setup;
179
180#ifdef CONFIG_EFI
181extern u64 __efi64_thunk(u32, ...);
182
183#define efi64_thunk(...) ({						\
184	u64 __pad[3]; /* must have space for 3 args on the stack */	\
185	__efi_nargs_check(efi64_thunk, 9, __VA_ARGS__);			\
186	__efi64_thunk(__VA_ARGS__, __pad);				\
187})
188
189static inline bool efi_is_mixed(void)
190{
191	if (!IS_ENABLED(CONFIG_EFI_MIXED))
192		return false;
193	return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT);
194}
195
196static inline bool efi_runtime_supported(void)
197{
198	if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT))
 
 
 
199		return true;
200
201	return IS_ENABLED(CONFIG_EFI_MIXED);
202}
203
 
204extern void parse_efi_setup(u64 phys_addr, u32 data_len);
205
206extern void efi_thunk_runtime_setup(void);
207efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
208					 unsigned long descriptor_size,
209					 u32 descriptor_version,
210					 efi_memory_desc_t *virtual_map,
211					 unsigned long systab_phys);
212
213/* arch specific definitions used by the stub code */
214
215#ifdef CONFIG_EFI_MIXED
216
217#define ARCH_HAS_EFISTUB_WRAPPERS
218
219static inline bool efi_is_64bit(void)
220{
221	extern const bool efi_is64;
222
223	return efi_is64;
224}
225
226static inline bool efi_is_native(void)
227{
228	return efi_is_64bit();
229}
230
231#define efi_table_attr(inst, attr)					\
232	(efi_is_native() ? (inst)->attr					\
233			 : efi_mixed_table_attr((inst), attr))
234
235#define efi_mixed_table_attr(inst, attr)				\
236	(__typeof__(inst->attr))					\
237		_Generic(inst->mixed_mode.attr,				\
238		u32:		(unsigned long)(inst->mixed_mode.attr),	\
239		default:	(inst->mixed_mode.attr))
240
241/*
242 * The following macros allow translating arguments if necessary from native to
243 * mixed mode. The use case for this is to initialize the upper 32 bits of
244 * output parameters, and where the 32-bit method requires a 64-bit argument,
245 * which must be split up into two arguments to be thunked properly.
246 *
247 * As examples, the AllocatePool boot service returns the address of the
248 * allocation, but it will not set the high 32 bits of the address. To ensure
249 * that the full 64-bit address is initialized, we zero-init the address before
250 * calling the thunk.
251 *
252 * The FreePages boot service takes a 64-bit physical address even in 32-bit
253 * mode. For the thunk to work correctly, a native 64-bit call of
254 * 	free_pages(addr, size)
255 * must be translated to
256 * 	efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size)
257 * so that the two 32-bit halves of addr get pushed onto the stack separately.
258 */
259
260static inline void *efi64_zero_upper(void *p)
261{
262	((u32 *)p)[1] = 0;
263	return p;
264}
265
266static inline u32 efi64_convert_status(efi_status_t status)
267{
268	return (u32)(status | (u64)status >> 32);
269}
270
271#define __efi64_split(val)		(val) & U32_MAX, (u64)(val) >> 32
272
273#define __efi64_argmap_free_pages(addr, size)				\
274	((addr), 0, (size))
275
276#define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver)	\
277	((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver))
278
279#define __efi64_argmap_allocate_pool(type, size, buffer)		\
280	((type), (size), efi64_zero_upper(buffer))
281
282#define __efi64_argmap_create_event(type, tpl, f, c, event)		\
283	((type), (tpl), (f), (c), efi64_zero_upper(event))
284
285#define __efi64_argmap_set_timer(event, type, time)			\
286	((event), (type), lower_32_bits(time), upper_32_bits(time))
287
288#define __efi64_argmap_wait_for_event(num, event, index)		\
289	((num), (event), efi64_zero_upper(index))
290
291#define __efi64_argmap_handle_protocol(handle, protocol, interface)	\
292	((handle), (protocol), efi64_zero_upper(interface))
293
294#define __efi64_argmap_locate_protocol(protocol, reg, interface)	\
295	((protocol), (reg), efi64_zero_upper(interface))
296
297#define __efi64_argmap_locate_device_path(protocol, path, handle)	\
298	((protocol), (path), efi64_zero_upper(handle))
299
300#define __efi64_argmap_exit(handle, status, size, data)			\
301	((handle), efi64_convert_status(status), (size), (data))
302
303/* PCI I/O */
304#define __efi64_argmap_get_location(protocol, seg, bus, dev, func)	\
305	((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus),	\
306	 efi64_zero_upper(dev), efi64_zero_upper(func))
307
308/* LoadFile */
309#define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf)	\
310	((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf))
311
312/* Graphics Output Protocol */
313#define __efi64_argmap_query_mode(gop, mode, size, info)		\
314	((gop), (mode), efi64_zero_upper(size), efi64_zero_upper(info))
315
316/* TCG2 protocol */
317#define __efi64_argmap_hash_log_extend_event(prot, fl, addr, size, ev)	\
318	((prot), (fl), 0ULL, (u64)(addr), 0ULL, (u64)(size), 0ULL, ev)
319
320/* DXE services */
321#define __efi64_argmap_get_memory_space_descriptor(phys, desc) \
322	(__efi64_split(phys), (desc))
323
324#define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \
325	(__efi64_split(phys), __efi64_split(size), __efi64_split(flags))
326
327/* file protocol */
328#define __efi64_argmap_open(prot, newh, fname, mode, attr) \
329	((prot), efi64_zero_upper(newh), (fname), __efi64_split(mode), \
330	 __efi64_split(attr))
331
332#define __efi64_argmap_set_position(pos) (__efi64_split(pos))
333
334/* file system protocol */
335#define __efi64_argmap_open_volume(prot, file) \
336	((prot), efi64_zero_upper(file))
337
338/*
339 * The macros below handle the plumbing for the argument mapping. To add a
340 * mapping for a specific EFI method, simply define a macro
341 * __efi64_argmap_<method name>, following the examples above.
342 */
343
344#define __efi64_thunk_map(inst, func, ...)				\
345	efi64_thunk(inst->mixed_mode.func,				\
346		__efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__),	\
347			       (__VA_ARGS__)))
348
349#define __efi64_argmap(mapped, args)					\
350	__PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args)
351#define __efi64_argmap__0(mapped, args) __efi_eval mapped
352#define __efi64_argmap__1(mapped, args) __efi_eval args
353
354#define __efi_eat(...)
355#define __efi_eval(...) __VA_ARGS__
356
357static inline efi_status_t __efi64_widen_efi_status(u64 status)
358{
359	/* use rotate to move the value of bit #31 into position #63 */
360	return ror64(rol32(status, 1), 1);
361}
 
362
363/* The macro below handles dispatching via the thunk if needed */
364
365#define efi_fn_call(inst, func, ...)					\
366	(efi_is_native() ? (inst)->func(__VA_ARGS__)			\
367			 : efi_mixed_call((inst), func, ##__VA_ARGS__))
368
369#define efi_mixed_call(inst, func, ...)					\
370	_Generic(inst->func(__VA_ARGS__),				\
371	efi_status_t:							\
372		__efi64_widen_efi_status(				\
373			__efi64_thunk_map(inst, func, ##__VA_ARGS__)),	\
374	u64: ({ BUILD_BUG(); ULONG_MAX; }),				\
375	default:							\
376		(__typeof__(inst->func(__VA_ARGS__)))			\
377			__efi64_thunk_map(inst, func, ##__VA_ARGS__))
378
379#else /* CONFIG_EFI_MIXED */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
381static inline bool efi_is_64bit(void)
382{
383	return IS_ENABLED(CONFIG_X86_64);
384}
385
386#endif /* CONFIG_EFI_MIXED */
 
387
388extern bool efi_reboot_required(void);
389extern bool efi_is_table_address(unsigned long phys_addr);
390
391extern void efi_reserve_boot_services(void);
392#else
393static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
394static inline bool efi_reboot_required(void)
395{
396	return false;
397}
398static inline  bool efi_is_table_address(unsigned long phys_addr)
399{
400	return false;
401}
402static inline void efi_reserve_boot_services(void)
403{
404}
405#endif /* CONFIG_EFI */
406
407#ifdef CONFIG_EFI_FAKE_MEMMAP
408extern void __init efi_fake_memmap_early(void);
409extern void __init efi_fake_memmap(void);
410#else
411static inline void efi_fake_memmap_early(void)
412{
413}
414
415static inline void efi_fake_memmap(void)
416{
417}
418#endif
419
420extern int __init efi_memmap_alloc(unsigned int num_entries,
421				   struct efi_memory_map_data *data);
422extern void __efi_memmap_free(u64 phys, unsigned long size,
423			      unsigned long flags);
424#define __efi_memmap_free __efi_memmap_free
425
426extern int __init efi_memmap_install(struct efi_memory_map_data *data);
427extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
428					 struct range *range);
429extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
430				     void *buf, struct efi_mem_range *mem);
431
432#define arch_ima_efi_boot_mode	\
433	({ extern struct boot_params boot_params; boot_params.secure_boot; })
434
435#ifdef CONFIG_EFI_RUNTIME_MAP
436int efi_get_runtime_map_size(void);
437int efi_get_runtime_map_desc_size(void);
438int efi_runtime_map_copy(void *buf, size_t bufsz);
439#else
440static inline int efi_get_runtime_map_size(void)
441{
442	return 0;
443}
444
445static inline int efi_get_runtime_map_desc_size(void)
446{
447	return 0;
448}
449
450static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
451{
452	return 0;
453}
454
455#endif
456
457#endif /* _ASM_X86_EFI_H */
v4.6
 
  1#ifndef _ASM_X86_EFI_H
  2#define _ASM_X86_EFI_H
  3
  4#include <asm/fpu/api.h>
  5#include <asm/pgtable.h>
  6#include <asm/tlb.h>
 
 
 
 
 
 
 
 
 
  7
  8/*
  9 * We map the EFI regions needed for runtime services non-contiguously,
 10 * with preserved alignment on virtual addresses starting from -4G down
 11 * for a total max space of 64G. This way, we provide for stable runtime
 12 * services addresses across kernels so that a kexec'd kernel can still
 13 * use them.
 14 *
 15 * This is the main reason why we're doing stable VA mappings for RT
 16 * services.
 17 *
 18 * This flag is used in conjuction with a chicken bit called
 19 * "efi=old_map" which can be used as a fallback to the old runtime
 20 * services mapping method in case there's some b0rkage with a
 21 * particular EFI implementation (haha, it is hard to hold up the
 22 * sarcasm here...).
 23 */
 24#define EFI_OLD_MEMMAP		EFI_ARCH_1
 25
 26#define EFI32_LOADER_SIGNATURE	"EL32"
 27#define EFI64_LOADER_SIGNATURE	"EL64"
 28
 29#define MAX_CMDLINE_ADDRESS	UINT_MAX
 30
 31#ifdef CONFIG_X86_32
 
 
 
 
 
 
 
 
 
 
 
 
 32
 33
 34extern unsigned long asmlinkage efi_call_phys(void *, ...);
 
 
 
 
 
 
 
 
 
 35
 36/*
 37 * Wrap all the virtual calls in a way that forces the parameters on the stack.
 
 38 */
 39
 40/* Use this macro if your virtual returns a non-void value */
 41#define efi_call_virt(f, args...) \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42({									\
 43	efi_status_t __s;						\
 44	kernel_fpu_begin();						\
 45	__s = ((efi_##f##_t __attribute__((regparm(0)))*)		\
 46		efi.systab->runtime->f)(args);				\
 47	kernel_fpu_end();						\
 48	__s;								\
 49})
 50
 51/* Use this macro if your virtual call does not return any value */
 52#define __efi_call_virt(f, args...) \
 53({									\
 54	kernel_fpu_begin();						\
 55	((efi_##f##_t __attribute__((regparm(0)))*)			\
 56		efi.systab->runtime->f)(args);				\
 57	kernel_fpu_end();						\
 58})
 59
 60#define efi_ioremap(addr, size, type, attr)	ioremap_cache(addr, size)
 61
 62#else /* !CONFIG_X86_32 */
 63
 64#define EFI_LOADER_SIGNATURE	"EL64"
 65
 66extern u64 asmlinkage efi_call(void *fp, ...);
 67
 68#define efi_call_phys(f, args...)		efi_call((f), args)
 
 
 
 69
 70/*
 71 * Scratch space used for switching the pagetable in the EFI stub
 72 */
 73struct efi_scratch {
 74	u64	r15;
 75	u64	prev_cr3;
 76	pgd_t	*efi_pgt;
 77	bool	use_pgd;
 78	u64	phys_stack;
 79} __packed;
 80
 81#define efi_call_virt(f, ...)						\
 82({									\
 83	efi_status_t __s;						\
 84									\
 85	efi_sync_low_kernel_mappings();					\
 86	preempt_disable();						\
 87	__kernel_fpu_begin();						\
 88									\
 89	if (efi_scratch.use_pgd) {					\
 90		efi_scratch.prev_cr3 = read_cr3();			\
 91		write_cr3((unsigned long)efi_scratch.efi_pgt);		\
 92		__flush_tlb_all();					\
 93	}								\
 94									\
 95	__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);	\
 96									\
 97	if (efi_scratch.use_pgd) {					\
 98		write_cr3(efi_scratch.prev_cr3);			\
 99		__flush_tlb_all();					\
100	}								\
101									\
102	__kernel_fpu_end();						\
103	preempt_enable();						\
104	__s;								\
105})
106
107/*
108 * All X86_64 virt calls return non-void values. Thus, use non-void call for
109 * virt calls that would be void on X86_32.
110 */
111#define __efi_call_virt(f, args...) efi_call_virt(f, args)
 
 
112
113extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
114					u32 type, u64 attribute);
 
 
 
 
115
116#ifdef CONFIG_KASAN
117/*
118 * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
119 * only in kernel binary.  Since the EFI stub linked into a separate binary it
120 * doesn't have __memset().  So we should use standard memset from
121 * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
122 */
123#undef memcpy
124#undef memset
125#undef memmove
126#endif
127
128#endif /* CONFIG_X86_32 */
129
130extern struct efi_scratch efi_scratch;
131extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
132extern int __init efi_memblock_x86_reserve_range(void);
133extern pgd_t * __init efi_call_phys_prolog(void);
134extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
135extern void __init efi_print_memmap(void);
136extern void __init efi_unmap_memmap(void);
137extern void __init efi_memory_uc(u64 addr, unsigned long size);
138extern void __init efi_map_region(efi_memory_desc_t *md);
139extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
140extern void efi_sync_low_kernel_mappings(void);
141extern int __init efi_alloc_page_tables(void);
142extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
143extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
144extern void __init old_map_region(efi_memory_desc_t *md);
145extern void __init runtime_code_page_mkexec(void);
146extern void __init efi_runtime_update_mappings(void);
147extern void __init efi_dump_pagetable(void);
148extern void __init efi_apply_memmap_quirks(void);
149extern int __init efi_reuse_config(u64 tables, int nr_tables);
150extern void efi_delete_dummy_variable(void);
 
 
151
 
 
 
 
152struct efi_setup_data {
153	u64 fw_vendor;
154	u64 runtime;
155	u64 tables;
156	u64 smbios;
157	u64 reserved[8];
158};
159
160extern u64 efi_setup;
161
162#ifdef CONFIG_EFI
 
 
 
 
 
 
 
163
164static inline bool efi_is_native(void)
165{
166	return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
 
 
167}
168
169static inline bool efi_runtime_supported(void)
170{
171	if (efi_is_native())
172		return true;
173
174	if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
175		return true;
176
177	return false;
178}
179
180extern struct console early_efi_console;
181extern void parse_efi_setup(u64 phys_addr, u32 data_len);
182
 
 
 
 
 
 
 
 
 
183#ifdef CONFIG_EFI_MIXED
184extern void efi_thunk_runtime_setup(void);
185extern efi_status_t efi_thunk_set_virtual_address_map(
186	void *phys_set_virtual_address_map,
187	unsigned long memory_map_size,
188	unsigned long descriptor_size,
189	u32 descriptor_version,
190	efi_memory_desc_t *virtual_map);
191#else
192static inline void efi_thunk_runtime_setup(void) {}
193static inline efi_status_t efi_thunk_set_virtual_address_map(
194	void *phys_set_virtual_address_map,
195	unsigned long memory_map_size,
196	unsigned long descriptor_size,
197	u32 descriptor_version,
198	efi_memory_desc_t *virtual_map)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199{
200	return EFI_SUCCESS;
 
201}
202#endif /* CONFIG_EFI_MIXED */
203
 
204
205/* arch specific definitions used by the stub code */
 
 
 
 
 
 
 
 
 
 
 
 
206
207struct efi_config {
208	u64 image_handle;
209	u64 table;
210	u64 allocate_pool;
211	u64 allocate_pages;
212	u64 get_memory_map;
213	u64 free_pool;
214	u64 free_pages;
215	u64 locate_handle;
216	u64 handle_protocol;
217	u64 exit_boot_services;
218	u64 text_output;
219	efi_status_t (*call)(unsigned long, ...);
220	bool is64;
221} __packed;
222
223__pure const struct efi_config *__efi_early(void);
 
 
 
224
225#define efi_call_early(f, ...)						\
226	__efi_early()->call(__efi_early()->f, __VA_ARGS__);
227
228extern bool efi_reboot_required(void);
 
229
 
230#else
231static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
232static inline bool efi_reboot_required(void)
233{
234	return false;
235}
 
 
 
 
 
 
 
236#endif /* CONFIG_EFI */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
238#endif /* _ASM_X86_EFI_H */