Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * EFI capsule support.
  4 *
  5 * Copyright 2013 Intel Corporation; author Matt Fleming
 
 
 
  6 */
  7
  8#define pr_fmt(fmt) "efi: " fmt
  9
 10#include <linux/slab.h>
 11#include <linux/mutex.h>
 12#include <linux/highmem.h>
 13#include <linux/efi.h>
 14#include <linux/vmalloc.h>
 15#include <asm/efi.h>
 16#include <asm/io.h>
 17
 18typedef struct {
 19	u64 length;
 20	u64 data;
 21} efi_capsule_block_desc_t;
 22
 23static bool capsule_pending;
 24static bool stop_capsules;
 25static int efi_reset_type = -1;
 26
 27/*
 28 * capsule_mutex serialises access to both capsule_pending and
 29 * efi_reset_type and stop_capsules.
 30 */
 31static DEFINE_MUTEX(capsule_mutex);
 32
 33/**
 34 * efi_capsule_pending - has a capsule been passed to the firmware?
 35 * @reset_type: store the type of EFI reset if capsule is pending
 36 *
 37 * To ensure that the registered capsule is processed correctly by the
 38 * firmware we need to perform a specific type of reset. If a capsule is
 39 * pending return the reset type in @reset_type.
 40 *
 41 * This function will race with callers of efi_capsule_update(), for
 42 * example, calling this function while somebody else is in
 43 * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
 44 * will miss the updates to capsule_pending and efi_reset_type after
 45 * efi_capsule_update_locked() completes.
 46 *
 47 * A non-racy use is from platform reboot code because we use
 48 * system_state to ensure no capsules can be sent to the firmware once
 49 * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
 50 */
 51bool efi_capsule_pending(int *reset_type)
 52{
 53	if (!capsule_pending)
 54		return false;
 55
 56	if (reset_type)
 57		*reset_type = efi_reset_type;
 58
 59	return true;
 60}
 61
 62/*
 63 * Whitelist of EFI capsule flags that we support.
 64 *
 65 * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
 66 * require us to prepare the kernel for reboot. Refuse to load any
 67 * capsules with that flag and any other flags that we do not know how
 68 * to handle.
 69 */
 70#define EFI_CAPSULE_SUPPORTED_FLAG_MASK			\
 71	(EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
 72
 73/**
 74 * efi_capsule_supported - does the firmware support the capsule?
 75 * @guid: vendor guid of capsule
 76 * @flags: capsule flags
 77 * @size: size of capsule data
 78 * @reset: the reset type required for this capsule
 79 *
 80 * Check whether a capsule with @flags is supported by the firmware
 81 * and that @size doesn't exceed the maximum size for a capsule.
 82 *
 83 * No attempt is made to check @reset against the reset type required
 84 * by any pending capsules because of the races involved.
 85 */
 86int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
 87{
 88	efi_capsule_header_t capsule;
 89	efi_capsule_header_t *cap_list[] = { &capsule };
 90	efi_status_t status;
 91	u64 max_size;
 92
 93	if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
 94		return -EINVAL;
 95
 96	capsule.headersize = capsule.imagesize = sizeof(capsule);
 97	memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
 98	capsule.flags = flags;
 99
100	status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
101	if (status != EFI_SUCCESS)
102		return efi_status_to_err(status);
103
104	if (size > max_size)
105		return -ENOSPC;
106
107	return 0;
108}
109EXPORT_SYMBOL_GPL(efi_capsule_supported);
110
111/*
112 * Every scatter gather list (block descriptor) page must end with a
113 * continuation pointer. The last continuation pointer of the last
114 * page must be zero to mark the end of the chain.
115 */
116#define SGLIST_PER_PAGE	((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
117
118/*
119 * How many scatter gather list (block descriptor) pages do we need
120 * to map @count pages?
121 */
122static inline unsigned int sg_pages_num(unsigned int count)
123{
124	return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
125}
126
127/**
128 * efi_capsule_update_locked - pass a single capsule to the firmware
129 * @capsule: capsule to send to the firmware
130 * @sg_pages: array of scatter gather (block descriptor) pages
131 * @reset: the reset type required for @capsule
132 *
133 * Since this function must be called under capsule_mutex check
134 * whether efi_reset_type will conflict with @reset, and atomically
135 * set it and capsule_pending if a capsule was successfully sent to
136 * the firmware.
137 *
138 * We also check to see if the system is about to restart, and if so,
139 * abort. This avoids races between efi_capsule_update() and
140 * efi_capsule_pending().
141 */
142static int
143efi_capsule_update_locked(efi_capsule_header_t *capsule,
144			  struct page **sg_pages, int reset)
145{
146	efi_physical_addr_t sglist_phys;
147	efi_status_t status;
148
149	lockdep_assert_held(&capsule_mutex);
150
151	/*
152	 * If someone has already registered a capsule that requires a
153	 * different reset type, we're out of luck and must abort.
154	 */
155	if (efi_reset_type >= 0 && efi_reset_type != reset) {
156		pr_err("Conflicting capsule reset type %d (%d).\n",
157		       reset, efi_reset_type);
158		return -EINVAL;
159	}
160
161	/*
162	 * If the system is getting ready to restart it may have
163	 * called efi_capsule_pending() to make decisions (such as
164	 * whether to force an EFI reboot), and we're racing against
165	 * that call. Abort in that case.
166	 */
167	if (unlikely(stop_capsules)) {
168		pr_warn("Capsule update raced with reboot, aborting.\n");
169		return -EINVAL;
170	}
171
172	sglist_phys = page_to_phys(sg_pages[0]);
173
174	status = efi.update_capsule(&capsule, 1, sglist_phys);
175	if (status == EFI_SUCCESS) {
176		capsule_pending = true;
177		efi_reset_type = reset;
178	}
179
180	return efi_status_to_err(status);
181}
182
183/**
184 * efi_capsule_update - send a capsule to the firmware
185 * @capsule: capsule to send to firmware
186 * @pages: an array of capsule data pages
187 *
188 * Build a scatter gather list with EFI capsule block descriptors to
189 * map the capsule described by @capsule with its data in @pages and
190 * send it to the firmware via the UpdateCapsule() runtime service.
191 *
192 * @capsule must be a virtual mapping of the complete capsule update in the
193 * kernel address space, as the capsule can be consumed immediately.
194 * A capsule_header_t that describes the entire contents of the capsule
195 * must be at the start of the first data page.
196 *
197 * Even though this function will validate that the firmware supports
198 * the capsule guid, users will likely want to check that
199 * efi_capsule_supported() returns true before calling this function
200 * because it makes it easier to print helpful error messages.
201 *
202 * If the capsule is successfully submitted to the firmware, any
203 * subsequent calls to efi_capsule_pending() will return true. @pages
204 * must not be released or modified if this function returns
205 * successfully.
206 *
207 * Callers must be prepared for this function to fail, which can
208 * happen if we raced with system reboot or if there is already a
209 * pending capsule that has a reset type that conflicts with the one
210 * required by @capsule. Do NOT use efi_capsule_pending() to detect
211 * this conflict since that would be racy. Instead, submit the capsule
212 * to efi_capsule_update() and check the return value.
213 *
214 * Return 0 on success, a converted EFI status code on failure.
215 */
216int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
217{
218	u32 imagesize = capsule->imagesize;
219	efi_guid_t guid = capsule->guid;
220	unsigned int count, sg_count;
221	u32 flags = capsule->flags;
222	struct page **sg_pages;
223	int rv, reset_type;
224	int i, j;
225
226	rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
227	if (rv)
228		return rv;
229
230	count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
231	sg_count = sg_pages_num(count);
232
233	sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL);
234	if (!sg_pages)
235		return -ENOMEM;
236
237	for (i = 0; i < sg_count; i++) {
238		sg_pages[i] = alloc_page(GFP_KERNEL);
239		if (!sg_pages[i]) {
240			rv = -ENOMEM;
241			goto out;
242		}
243	}
244
245	for (i = 0; i < sg_count; i++) {
246		efi_capsule_block_desc_t *sglist;
247
248		sglist = kmap_atomic(sg_pages[i]);
249
250		for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
251			u64 sz = min_t(u64, imagesize,
252				       PAGE_SIZE - (u64)*pages % PAGE_SIZE);
253
254			sglist[j].length = sz;
255			sglist[j].data = *pages++;
256
257			imagesize -= sz;
258			count--;
259		}
260
261		/* Continuation pointer */
262		sglist[j].length = 0;
263
264		if (i + 1 == sg_count)
265			sglist[j].data = 0;
266		else
267			sglist[j].data = page_to_phys(sg_pages[i + 1]);
268
269#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
270		/*
271		 * At runtime, the firmware has no way to find out where the
272		 * sglist elements are mapped, if they are mapped in the first
273		 * place. Therefore, on architectures that can only perform
274		 * cache maintenance by virtual address, the firmware is unable
275		 * to perform this maintenance, and so it is up to the OS to do
276		 * it instead.
277		 */
278		efi_capsule_flush_cache_range(sglist, PAGE_SIZE);
279#endif
280		kunmap_atomic(sglist);
281	}
282
283	mutex_lock(&capsule_mutex);
284	rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
285	mutex_unlock(&capsule_mutex);
286
287out:
288	for (i = 0; rv && i < sg_count; i++) {
289		if (sg_pages[i])
290			__free_page(sg_pages[i]);
291	}
292
293	kfree(sg_pages);
294	return rv;
295}
296EXPORT_SYMBOL_GPL(efi_capsule_update);
297
298static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
299{
300	mutex_lock(&capsule_mutex);
301	stop_capsules = true;
302	mutex_unlock(&capsule_mutex);
303
304	return NOTIFY_DONE;
305}
306
307static struct notifier_block capsule_reboot_nb = {
308	.notifier_call = capsule_reboot_notify,
309};
310
311static int __init capsule_reboot_register(void)
312{
313	return register_reboot_notifier(&capsule_reboot_nb);
314}
315core_initcall(capsule_reboot_register);
v4.17
 
  1/*
  2 * EFI capsule support.
  3 *
  4 * Copyright 2013 Intel Corporation; author Matt Fleming
  5 *
  6 * This file is part of the Linux kernel, and is made available under
  7 * the terms of the GNU General Public License version 2.
  8 */
  9
 10#define pr_fmt(fmt) "efi: " fmt
 11
 12#include <linux/slab.h>
 13#include <linux/mutex.h>
 14#include <linux/highmem.h>
 15#include <linux/efi.h>
 16#include <linux/vmalloc.h>
 
 17#include <asm/io.h>
 18
 19typedef struct {
 20	u64 length;
 21	u64 data;
 22} efi_capsule_block_desc_t;
 23
 24static bool capsule_pending;
 25static bool stop_capsules;
 26static int efi_reset_type = -1;
 27
 28/*
 29 * capsule_mutex serialises access to both capsule_pending and
 30 * efi_reset_type and stop_capsules.
 31 */
 32static DEFINE_MUTEX(capsule_mutex);
 33
 34/**
 35 * efi_capsule_pending - has a capsule been passed to the firmware?
 36 * @reset_type: store the type of EFI reset if capsule is pending
 37 *
 38 * To ensure that the registered capsule is processed correctly by the
 39 * firmware we need to perform a specific type of reset. If a capsule is
 40 * pending return the reset type in @reset_type.
 41 *
 42 * This function will race with callers of efi_capsule_update(), for
 43 * example, calling this function while somebody else is in
 44 * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
 45 * will miss the updates to capsule_pending and efi_reset_type after
 46 * efi_capsule_update_locked() completes.
 47 *
 48 * A non-racy use is from platform reboot code because we use
 49 * system_state to ensure no capsules can be sent to the firmware once
 50 * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
 51 */
 52bool efi_capsule_pending(int *reset_type)
 53{
 54	if (!capsule_pending)
 55		return false;
 56
 57	if (reset_type)
 58		*reset_type = efi_reset_type;
 59
 60	return true;
 61}
 62
 63/*
 64 * Whitelist of EFI capsule flags that we support.
 65 *
 66 * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
 67 * require us to prepare the kernel for reboot. Refuse to load any
 68 * capsules with that flag and any other flags that we do not know how
 69 * to handle.
 70 */
 71#define EFI_CAPSULE_SUPPORTED_FLAG_MASK			\
 72	(EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
 73
 74/**
 75 * efi_capsule_supported - does the firmware support the capsule?
 76 * @guid: vendor guid of capsule
 77 * @flags: capsule flags
 78 * @size: size of capsule data
 79 * @reset: the reset type required for this capsule
 80 *
 81 * Check whether a capsule with @flags is supported by the firmware
 82 * and that @size doesn't exceed the maximum size for a capsule.
 83 *
 84 * No attempt is made to check @reset against the reset type required
 85 * by any pending capsules because of the races involved.
 86 */
 87int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
 88{
 89	efi_capsule_header_t capsule;
 90	efi_capsule_header_t *cap_list[] = { &capsule };
 91	efi_status_t status;
 92	u64 max_size;
 93
 94	if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
 95		return -EINVAL;
 96
 97	capsule.headersize = capsule.imagesize = sizeof(capsule);
 98	memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
 99	capsule.flags = flags;
100
101	status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
102	if (status != EFI_SUCCESS)
103		return efi_status_to_err(status);
104
105	if (size > max_size)
106		return -ENOSPC;
107
108	return 0;
109}
110EXPORT_SYMBOL_GPL(efi_capsule_supported);
111
112/*
113 * Every scatter gather list (block descriptor) page must end with a
114 * continuation pointer. The last continuation pointer of the last
115 * page must be zero to mark the end of the chain.
116 */
117#define SGLIST_PER_PAGE	((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
118
119/*
120 * How many scatter gather list (block descriptor) pages do we need
121 * to map @count pages?
122 */
123static inline unsigned int sg_pages_num(unsigned int count)
124{
125	return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
126}
127
128/**
129 * efi_capsule_update_locked - pass a single capsule to the firmware
130 * @capsule: capsule to send to the firmware
131 * @sg_pages: array of scatter gather (block descriptor) pages
132 * @reset: the reset type required for @capsule
133 *
134 * Since this function must be called under capsule_mutex check
135 * whether efi_reset_type will conflict with @reset, and atomically
136 * set it and capsule_pending if a capsule was successfully sent to
137 * the firmware.
138 *
139 * We also check to see if the system is about to restart, and if so,
140 * abort. This avoids races between efi_capsule_update() and
141 * efi_capsule_pending().
142 */
143static int
144efi_capsule_update_locked(efi_capsule_header_t *capsule,
145			  struct page **sg_pages, int reset)
146{
147	efi_physical_addr_t sglist_phys;
148	efi_status_t status;
149
150	lockdep_assert_held(&capsule_mutex);
151
152	/*
153	 * If someone has already registered a capsule that requires a
154	 * different reset type, we're out of luck and must abort.
155	 */
156	if (efi_reset_type >= 0 && efi_reset_type != reset) {
157		pr_err("Conflicting capsule reset type %d (%d).\n",
158		       reset, efi_reset_type);
159		return -EINVAL;
160	}
161
162	/*
163	 * If the system is getting ready to restart it may have
164	 * called efi_capsule_pending() to make decisions (such as
165	 * whether to force an EFI reboot), and we're racing against
166	 * that call. Abort in that case.
167	 */
168	if (unlikely(stop_capsules)) {
169		pr_warn("Capsule update raced with reboot, aborting.\n");
170		return -EINVAL;
171	}
172
173	sglist_phys = page_to_phys(sg_pages[0]);
174
175	status = efi.update_capsule(&capsule, 1, sglist_phys);
176	if (status == EFI_SUCCESS) {
177		capsule_pending = true;
178		efi_reset_type = reset;
179	}
180
181	return efi_status_to_err(status);
182}
183
184/**
185 * efi_capsule_update - send a capsule to the firmware
186 * @capsule: capsule to send to firmware
187 * @pages: an array of capsule data pages
188 *
189 * Build a scatter gather list with EFI capsule block descriptors to
190 * map the capsule described by @capsule with its data in @pages and
191 * send it to the firmware via the UpdateCapsule() runtime service.
192 *
193 * @capsule must be a virtual mapping of the complete capsule update in the
194 * kernel address space, as the capsule can be consumed immediately.
195 * A capsule_header_t that describes the entire contents of the capsule
196 * must be at the start of the first data page.
197 *
198 * Even though this function will validate that the firmware supports
199 * the capsule guid, users will likely want to check that
200 * efi_capsule_supported() returns true before calling this function
201 * because it makes it easier to print helpful error messages.
202 *
203 * If the capsule is successfully submitted to the firmware, any
204 * subsequent calls to efi_capsule_pending() will return true. @pages
205 * must not be released or modified if this function returns
206 * successfully.
207 *
208 * Callers must be prepared for this function to fail, which can
209 * happen if we raced with system reboot or if there is already a
210 * pending capsule that has a reset type that conflicts with the one
211 * required by @capsule. Do NOT use efi_capsule_pending() to detect
212 * this conflict since that would be racy. Instead, submit the capsule
213 * to efi_capsule_update() and check the return value.
214 *
215 * Return 0 on success, a converted EFI status code on failure.
216 */
217int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
218{
219	u32 imagesize = capsule->imagesize;
220	efi_guid_t guid = capsule->guid;
221	unsigned int count, sg_count;
222	u32 flags = capsule->flags;
223	struct page **sg_pages;
224	int rv, reset_type;
225	int i, j;
226
227	rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
228	if (rv)
229		return rv;
230
231	count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
232	sg_count = sg_pages_num(count);
233
234	sg_pages = kzalloc(sg_count * sizeof(*sg_pages), GFP_KERNEL);
235	if (!sg_pages)
236		return -ENOMEM;
237
238	for (i = 0; i < sg_count; i++) {
239		sg_pages[i] = alloc_page(GFP_KERNEL);
240		if (!sg_pages[i]) {
241			rv = -ENOMEM;
242			goto out;
243		}
244	}
245
246	for (i = 0; i < sg_count; i++) {
247		efi_capsule_block_desc_t *sglist;
248
249		sglist = kmap(sg_pages[i]);
250
251		for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
252			u64 sz = min_t(u64, imagesize,
253				       PAGE_SIZE - (u64)*pages % PAGE_SIZE);
254
255			sglist[j].length = sz;
256			sglist[j].data = *pages++;
257
258			imagesize -= sz;
259			count--;
260		}
261
262		/* Continuation pointer */
263		sglist[j].length = 0;
264
265		if (i + 1 == sg_count)
266			sglist[j].data = 0;
267		else
268			sglist[j].data = page_to_phys(sg_pages[i + 1]);
269
270		kunmap(sg_pages[i]);
 
 
 
 
 
 
 
 
 
 
 
271	}
272
273	mutex_lock(&capsule_mutex);
274	rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
275	mutex_unlock(&capsule_mutex);
276
277out:
278	for (i = 0; rv && i < sg_count; i++) {
279		if (sg_pages[i])
280			__free_page(sg_pages[i]);
281	}
282
283	kfree(sg_pages);
284	return rv;
285}
286EXPORT_SYMBOL_GPL(efi_capsule_update);
287
288static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
289{
290	mutex_lock(&capsule_mutex);
291	stop_capsules = true;
292	mutex_unlock(&capsule_mutex);
293
294	return NOTIFY_DONE;
295}
296
297static struct notifier_block capsule_reboot_nb = {
298	.notifier_call = capsule_reboot_notify,
299};
300
301static int __init capsule_reboot_register(void)
302{
303	return register_reboot_notifier(&capsule_reboot_nb);
304}
305core_initcall(capsule_reboot_register);