Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/efi.h>
4#include <linux/memblock.h>
5#include <linux/spinlock.h>
6#include <linux/crash_dump.h>
7#include <linux/nmi.h>
8#include <asm/unaccepted_memory.h>
9
10/* Protects unaccepted memory bitmap and accepting_list */
11static DEFINE_SPINLOCK(unaccepted_memory_lock);
12
13struct accept_range {
14 struct list_head list;
15 unsigned long start;
16 unsigned long end;
17};
18
19static LIST_HEAD(accepting_list);
20
21/*
22 * accept_memory() -- Consult bitmap and accept the memory if needed.
23 *
24 * Only memory that is explicitly marked as unaccepted in the bitmap requires
25 * an action. All the remaining memory is implicitly accepted and doesn't need
26 * acceptance.
27 *
28 * No need to accept:
29 * - anything if the system has no unaccepted table;
30 * - memory that is below phys_base;
31 * - memory that is above the memory that addressable by the bitmap;
32 */
33void accept_memory(phys_addr_t start, unsigned long size)
34{
35 struct efi_unaccepted_memory *unaccepted;
36 unsigned long range_start, range_end;
37 struct accept_range range, *entry;
38 phys_addr_t end = start + size;
39 unsigned long flags;
40 u64 unit_size;
41
42 unaccepted = efi_get_unaccepted_table();
43 if (!unaccepted)
44 return;
45
46 unit_size = unaccepted->unit_size;
47
48 /*
49 * Only care for the part of the range that is represented
50 * in the bitmap.
51 */
52 if (start < unaccepted->phys_base)
53 start = unaccepted->phys_base;
54 if (end < unaccepted->phys_base)
55 return;
56
57 /* Translate to offsets from the beginning of the bitmap */
58 start -= unaccepted->phys_base;
59 end -= unaccepted->phys_base;
60
61 /*
62 * load_unaligned_zeropad() can lead to unwanted loads across page
63 * boundaries. The unwanted loads are typically harmless. But, they
64 * might be made to totally unrelated or even unmapped memory.
65 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
66 * #VE) to recover from these unwanted loads.
67 *
68 * But, this approach does not work for unaccepted memory. For TDX, a
69 * load from unaccepted memory will not lead to a recoverable exception
70 * within the guest. The guest will exit to the VMM where the only
71 * recourse is to terminate the guest.
72 *
73 * There are two parts to fix this issue and comprehensively avoid
74 * access to unaccepted memory. Together these ensure that an extra
75 * "guard" page is accepted in addition to the memory that needs to be
76 * used:
77 *
78 * 1. Implicitly extend the range_contains_unaccepted_memory(start, size)
79 * checks up to the next unit_size if 'start+size' is aligned on a
80 * unit_size boundary.
81 *
82 * 2. Implicitly extend accept_memory(start, size) to the next unit_size
83 * if 'size+end' is aligned on a unit_size boundary. (immediately
84 * following this comment)
85 */
86 if (!(end % unit_size))
87 end += unit_size;
88
89 /* Make sure not to overrun the bitmap */
90 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
91 end = unaccepted->size * unit_size * BITS_PER_BYTE;
92
93 range.start = start / unit_size;
94 range.end = DIV_ROUND_UP(end, unit_size);
95retry:
96 spin_lock_irqsave(&unaccepted_memory_lock, flags);
97
98 /*
99 * Check if anybody works on accepting the same range of the memory.
100 *
101 * The check is done with unit_size granularity. It is crucial to catch
102 * all accept requests to the same unit_size block, even if they don't
103 * overlap on physical address level.
104 */
105 list_for_each_entry(entry, &accepting_list, list) {
106 if (entry->end <= range.start)
107 continue;
108 if (entry->start >= range.end)
109 continue;
110
111 /*
112 * Somebody else accepting the range. Or at least part of it.
113 *
114 * Drop the lock and retry until it is complete.
115 */
116 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
117 goto retry;
118 }
119
120 /*
121 * Register that the range is about to be accepted.
122 * Make sure nobody else will accept it.
123 */
124 list_add(&range.list, &accepting_list);
125
126 range_start = range.start;
127 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
128 range.end) {
129 unsigned long phys_start, phys_end;
130 unsigned long len = range_end - range_start;
131
132 phys_start = range_start * unit_size + unaccepted->phys_base;
133 phys_end = range_end * unit_size + unaccepted->phys_base;
134
135 /*
136 * Keep interrupts disabled until the accept operation is
137 * complete in order to prevent deadlocks.
138 *
139 * Enabling interrupts before calling arch_accept_memory()
140 * creates an opportunity for an interrupt handler to request
141 * acceptance for the same memory. The handler will continuously
142 * spin with interrupts disabled, preventing other task from
143 * making progress with the acceptance process.
144 */
145 spin_unlock(&unaccepted_memory_lock);
146
147 arch_accept_memory(phys_start, phys_end);
148
149 spin_lock(&unaccepted_memory_lock);
150 bitmap_clear(unaccepted->bitmap, range_start, len);
151 }
152
153 list_del(&range.list);
154
155 touch_softlockup_watchdog();
156
157 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
158}
159
160bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size)
161{
162 struct efi_unaccepted_memory *unaccepted;
163 phys_addr_t end = start + size;
164 unsigned long flags;
165 bool ret = false;
166 u64 unit_size;
167
168 unaccepted = efi_get_unaccepted_table();
169 if (!unaccepted)
170 return false;
171
172 unit_size = unaccepted->unit_size;
173
174 /*
175 * Only care for the part of the range that is represented
176 * in the bitmap.
177 */
178 if (start < unaccepted->phys_base)
179 start = unaccepted->phys_base;
180 if (end < unaccepted->phys_base)
181 return false;
182
183 /* Translate to offsets from the beginning of the bitmap */
184 start -= unaccepted->phys_base;
185 end -= unaccepted->phys_base;
186
187 /*
188 * Also consider the unaccepted state of the *next* page. See fix #1 in
189 * the comment on load_unaligned_zeropad() in accept_memory().
190 */
191 if (!(end % unit_size))
192 end += unit_size;
193
194 /* Make sure not to overrun the bitmap */
195 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
196 end = unaccepted->size * unit_size * BITS_PER_BYTE;
197
198 spin_lock_irqsave(&unaccepted_memory_lock, flags);
199 while (start < end) {
200 if (test_bit(start / unit_size, unaccepted->bitmap)) {
201 ret = true;
202 break;
203 }
204
205 start += unit_size;
206 }
207 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
208
209 return ret;
210}
211
212#ifdef CONFIG_PROC_VMCORE
213static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
214 unsigned long pfn)
215{
216 return !pfn_is_unaccepted_memory(pfn);
217}
218
219static struct vmcore_cb vmcore_cb = {
220 .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
221};
222
223static int __init unaccepted_memory_init_kdump(void)
224{
225 register_vmcore_cb(&vmcore_cb);
226 return 0;
227}
228core_initcall(unaccepted_memory_init_kdump);
229#endif /* CONFIG_PROC_VMCORE */
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/efi.h>
4#include <linux/memblock.h>
5#include <linux/spinlock.h>
6#include <linux/crash_dump.h>
7#include <asm/unaccepted_memory.h>
8
9/* Protects unaccepted memory bitmap and accepting_list */
10static DEFINE_SPINLOCK(unaccepted_memory_lock);
11
12struct accept_range {
13 struct list_head list;
14 unsigned long start;
15 unsigned long end;
16};
17
18static LIST_HEAD(accepting_list);
19
20/*
21 * accept_memory() -- Consult bitmap and accept the memory if needed.
22 *
23 * Only memory that is explicitly marked as unaccepted in the bitmap requires
24 * an action. All the remaining memory is implicitly accepted and doesn't need
25 * acceptance.
26 *
27 * No need to accept:
28 * - anything if the system has no unaccepted table;
29 * - memory that is below phys_base;
30 * - memory that is above the memory that addressable by the bitmap;
31 */
32void accept_memory(phys_addr_t start, phys_addr_t end)
33{
34 struct efi_unaccepted_memory *unaccepted;
35 unsigned long range_start, range_end;
36 struct accept_range range, *entry;
37 unsigned long flags;
38 u64 unit_size;
39
40 unaccepted = efi_get_unaccepted_table();
41 if (!unaccepted)
42 return;
43
44 unit_size = unaccepted->unit_size;
45
46 /*
47 * Only care for the part of the range that is represented
48 * in the bitmap.
49 */
50 if (start < unaccepted->phys_base)
51 start = unaccepted->phys_base;
52 if (end < unaccepted->phys_base)
53 return;
54
55 /* Translate to offsets from the beginning of the bitmap */
56 start -= unaccepted->phys_base;
57 end -= unaccepted->phys_base;
58
59 /*
60 * load_unaligned_zeropad() can lead to unwanted loads across page
61 * boundaries. The unwanted loads are typically harmless. But, they
62 * might be made to totally unrelated or even unmapped memory.
63 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
64 * #VE) to recover from these unwanted loads.
65 *
66 * But, this approach does not work for unaccepted memory. For TDX, a
67 * load from unaccepted memory will not lead to a recoverable exception
68 * within the guest. The guest will exit to the VMM where the only
69 * recourse is to terminate the guest.
70 *
71 * There are two parts to fix this issue and comprehensively avoid
72 * access to unaccepted memory. Together these ensure that an extra
73 * "guard" page is accepted in addition to the memory that needs to be
74 * used:
75 *
76 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
77 * checks up to end+unit_size if 'end' is aligned on a unit_size
78 * boundary.
79 *
80 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
81 * 'end' is aligned on a unit_size boundary. (immediately following
82 * this comment)
83 */
84 if (!(end % unit_size))
85 end += unit_size;
86
87 /* Make sure not to overrun the bitmap */
88 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
89 end = unaccepted->size * unit_size * BITS_PER_BYTE;
90
91 range.start = start / unit_size;
92 range.end = DIV_ROUND_UP(end, unit_size);
93retry:
94 spin_lock_irqsave(&unaccepted_memory_lock, flags);
95
96 /*
97 * Check if anybody works on accepting the same range of the memory.
98 *
99 * The check is done with unit_size granularity. It is crucial to catch
100 * all accept requests to the same unit_size block, even if they don't
101 * overlap on physical address level.
102 */
103 list_for_each_entry(entry, &accepting_list, list) {
104 if (entry->end <= range.start)
105 continue;
106 if (entry->start >= range.end)
107 continue;
108
109 /*
110 * Somebody else accepting the range. Or at least part of it.
111 *
112 * Drop the lock and retry until it is complete.
113 */
114 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
115 goto retry;
116 }
117
118 /*
119 * Register that the range is about to be accepted.
120 * Make sure nobody else will accept it.
121 */
122 list_add(&range.list, &accepting_list);
123
124 range_start = range.start;
125 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
126 range.end) {
127 unsigned long phys_start, phys_end;
128 unsigned long len = range_end - range_start;
129
130 phys_start = range_start * unit_size + unaccepted->phys_base;
131 phys_end = range_end * unit_size + unaccepted->phys_base;
132
133 /*
134 * Keep interrupts disabled until the accept operation is
135 * complete in order to prevent deadlocks.
136 *
137 * Enabling interrupts before calling arch_accept_memory()
138 * creates an opportunity for an interrupt handler to request
139 * acceptance for the same memory. The handler will continuously
140 * spin with interrupts disabled, preventing other task from
141 * making progress with the acceptance process.
142 */
143 spin_unlock(&unaccepted_memory_lock);
144
145 arch_accept_memory(phys_start, phys_end);
146
147 spin_lock(&unaccepted_memory_lock);
148 bitmap_clear(unaccepted->bitmap, range_start, len);
149 }
150
151 list_del(&range.list);
152 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
153}
154
155bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
156{
157 struct efi_unaccepted_memory *unaccepted;
158 unsigned long flags;
159 bool ret = false;
160 u64 unit_size;
161
162 unaccepted = efi_get_unaccepted_table();
163 if (!unaccepted)
164 return false;
165
166 unit_size = unaccepted->unit_size;
167
168 /*
169 * Only care for the part of the range that is represented
170 * in the bitmap.
171 */
172 if (start < unaccepted->phys_base)
173 start = unaccepted->phys_base;
174 if (end < unaccepted->phys_base)
175 return false;
176
177 /* Translate to offsets from the beginning of the bitmap */
178 start -= unaccepted->phys_base;
179 end -= unaccepted->phys_base;
180
181 /*
182 * Also consider the unaccepted state of the *next* page. See fix #1 in
183 * the comment on load_unaligned_zeropad() in accept_memory().
184 */
185 if (!(end % unit_size))
186 end += unit_size;
187
188 /* Make sure not to overrun the bitmap */
189 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
190 end = unaccepted->size * unit_size * BITS_PER_BYTE;
191
192 spin_lock_irqsave(&unaccepted_memory_lock, flags);
193 while (start < end) {
194 if (test_bit(start / unit_size, unaccepted->bitmap)) {
195 ret = true;
196 break;
197 }
198
199 start += unit_size;
200 }
201 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
202
203 return ret;
204}
205
206#ifdef CONFIG_PROC_VMCORE
207static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
208 unsigned long pfn)
209{
210 return !pfn_is_unaccepted_memory(pfn);
211}
212
213static struct vmcore_cb vmcore_cb = {
214 .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
215};
216
217static int __init unaccepted_memory_init_kdump(void)
218{
219 register_vmcore_cb(&vmcore_cb);
220 return 0;
221}
222core_initcall(unaccepted_memory_init_kdump);
223#endif /* CONFIG_PROC_VMCORE */