Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#include <linux/efi.h>
  4#include <linux/memblock.h>
  5#include <linux/spinlock.h>
  6#include <linux/crash_dump.h>
  7#include <linux/nmi.h>
  8#include <asm/unaccepted_memory.h>
  9
 10/* Protects unaccepted memory bitmap and accepting_list */
 11static DEFINE_SPINLOCK(unaccepted_memory_lock);
 12
 13struct accept_range {
 14	struct list_head list;
 15	unsigned long start;
 16	unsigned long end;
 17};
 18
 19static LIST_HEAD(accepting_list);
 20
 21/*
 22 * accept_memory() -- Consult bitmap and accept the memory if needed.
 23 *
 24 * Only memory that is explicitly marked as unaccepted in the bitmap requires
 25 * an action. All the remaining memory is implicitly accepted and doesn't need
 26 * acceptance.
 27 *
 28 * No need to accept:
 29 *  - anything if the system has no unaccepted table;
 30 *  - memory that is below phys_base;
 31 *  - memory that is above the memory that addressable by the bitmap;
 32 */
 33void accept_memory(phys_addr_t start, phys_addr_t end)
 34{
 35	struct efi_unaccepted_memory *unaccepted;
 36	unsigned long range_start, range_end;
 37	struct accept_range range, *entry;
 38	unsigned long flags;
 39	u64 unit_size;
 40
 41	unaccepted = efi_get_unaccepted_table();
 42	if (!unaccepted)
 43		return;
 44
 45	unit_size = unaccepted->unit_size;
 46
 47	/*
 48	 * Only care for the part of the range that is represented
 49	 * in the bitmap.
 50	 */
 51	if (start < unaccepted->phys_base)
 52		start = unaccepted->phys_base;
 53	if (end < unaccepted->phys_base)
 54		return;
 55
 56	/* Translate to offsets from the beginning of the bitmap */
 57	start -= unaccepted->phys_base;
 58	end -= unaccepted->phys_base;
 59
 60	/*
 61	 * load_unaligned_zeropad() can lead to unwanted loads across page
 62	 * boundaries. The unwanted loads are typically harmless. But, they
 63	 * might be made to totally unrelated or even unmapped memory.
 64	 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
 65	 * #VE) to recover from these unwanted loads.
 66	 *
 67	 * But, this approach does not work for unaccepted memory. For TDX, a
 68	 * load from unaccepted memory will not lead to a recoverable exception
 69	 * within the guest. The guest will exit to the VMM where the only
 70	 * recourse is to terminate the guest.
 71	 *
 72	 * There are two parts to fix this issue and comprehensively avoid
 73	 * access to unaccepted memory. Together these ensure that an extra
 74	 * "guard" page is accepted in addition to the memory that needs to be
 75	 * used:
 76	 *
 77	 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
 78	 *    checks up to end+unit_size if 'end' is aligned on a unit_size
 79	 *    boundary.
 80	 *
 81	 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
 82	 *    'end' is aligned on a unit_size boundary. (immediately following
 83	 *    this comment)
 84	 */
 85	if (!(end % unit_size))
 86		end += unit_size;
 87
 88	/* Make sure not to overrun the bitmap */
 89	if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
 90		end = unaccepted->size * unit_size * BITS_PER_BYTE;
 91
 92	range.start = start / unit_size;
 93	range.end = DIV_ROUND_UP(end, unit_size);
 94retry:
 95	spin_lock_irqsave(&unaccepted_memory_lock, flags);
 96
 97	/*
 98	 * Check if anybody works on accepting the same range of the memory.
 99	 *
100	 * The check is done with unit_size granularity. It is crucial to catch
101	 * all accept requests to the same unit_size block, even if they don't
102	 * overlap on physical address level.
103	 */
104	list_for_each_entry(entry, &accepting_list, list) {
105		if (entry->end <= range.start)
106			continue;
107		if (entry->start >= range.end)
108			continue;
109
110		/*
111		 * Somebody else accepting the range. Or at least part of it.
112		 *
113		 * Drop the lock and retry until it is complete.
114		 */
115		spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
116		goto retry;
117	}
118
119	/*
120	 * Register that the range is about to be accepted.
121	 * Make sure nobody else will accept it.
122	 */
123	list_add(&range.list, &accepting_list);
124
125	range_start = range.start;
126	for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
127				   range.end) {
128		unsigned long phys_start, phys_end;
129		unsigned long len = range_end - range_start;
130
131		phys_start = range_start * unit_size + unaccepted->phys_base;
132		phys_end = range_end * unit_size + unaccepted->phys_base;
133
134		/*
135		 * Keep interrupts disabled until the accept operation is
136		 * complete in order to prevent deadlocks.
137		 *
138		 * Enabling interrupts before calling arch_accept_memory()
139		 * creates an opportunity for an interrupt handler to request
140		 * acceptance for the same memory. The handler will continuously
141		 * spin with interrupts disabled, preventing other task from
142		 * making progress with the acceptance process.
143		 */
144		spin_unlock(&unaccepted_memory_lock);
145
146		arch_accept_memory(phys_start, phys_end);
147
148		spin_lock(&unaccepted_memory_lock);
149		bitmap_clear(unaccepted->bitmap, range_start, len);
150	}
151
152	list_del(&range.list);
153
154	touch_softlockup_watchdog();
155
156	spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
157}
158
159bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
160{
161	struct efi_unaccepted_memory *unaccepted;
162	unsigned long flags;
163	bool ret = false;
164	u64 unit_size;
165
166	unaccepted = efi_get_unaccepted_table();
167	if (!unaccepted)
168		return false;
169
170	unit_size = unaccepted->unit_size;
171
172	/*
173	 * Only care for the part of the range that is represented
174	 * in the bitmap.
175	 */
176	if (start < unaccepted->phys_base)
177		start = unaccepted->phys_base;
178	if (end < unaccepted->phys_base)
179		return false;
180
181	/* Translate to offsets from the beginning of the bitmap */
182	start -= unaccepted->phys_base;
183	end -= unaccepted->phys_base;
184
185	/*
186	 * Also consider the unaccepted state of the *next* page. See fix #1 in
187	 * the comment on load_unaligned_zeropad() in accept_memory().
188	 */
189	if (!(end % unit_size))
190		end += unit_size;
191
192	/* Make sure not to overrun the bitmap */
193	if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
194		end = unaccepted->size * unit_size * BITS_PER_BYTE;
195
196	spin_lock_irqsave(&unaccepted_memory_lock, flags);
197	while (start < end) {
198		if (test_bit(start / unit_size, unaccepted->bitmap)) {
199			ret = true;
200			break;
201		}
202
203		start += unit_size;
204	}
205	spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
206
207	return ret;
208}
209
210#ifdef CONFIG_PROC_VMCORE
211static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
212						unsigned long pfn)
213{
214	return !pfn_is_unaccepted_memory(pfn);
215}
216
217static struct vmcore_cb vmcore_cb = {
218	.pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
219};
220
221static int __init unaccepted_memory_init_kdump(void)
222{
223	register_vmcore_cb(&vmcore_cb);
224	return 0;
225}
226core_initcall(unaccepted_memory_init_kdump);
227#endif /* CONFIG_PROC_VMCORE */