Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/efi.h>
4#include <linux/memblock.h>
5#include <linux/spinlock.h>
6#include <linux/crash_dump.h>
7#include <linux/nmi.h>
8#include <asm/unaccepted_memory.h>
9
10/* Protects unaccepted memory bitmap and accepting_list */
11static DEFINE_SPINLOCK(unaccepted_memory_lock);
12
13struct accept_range {
14 struct list_head list;
15 unsigned long start;
16 unsigned long end;
17};
18
19static LIST_HEAD(accepting_list);
20
21/*
22 * accept_memory() -- Consult bitmap and accept the memory if needed.
23 *
24 * Only memory that is explicitly marked as unaccepted in the bitmap requires
25 * an action. All the remaining memory is implicitly accepted and doesn't need
26 * acceptance.
27 *
28 * No need to accept:
29 * - anything if the system has no unaccepted table;
30 * - memory that is below phys_base;
31 * - memory that is above the memory that addressable by the bitmap;
32 */
33void accept_memory(phys_addr_t start, phys_addr_t end)
34{
35 struct efi_unaccepted_memory *unaccepted;
36 unsigned long range_start, range_end;
37 struct accept_range range, *entry;
38 unsigned long flags;
39 u64 unit_size;
40
41 unaccepted = efi_get_unaccepted_table();
42 if (!unaccepted)
43 return;
44
45 unit_size = unaccepted->unit_size;
46
47 /*
48 * Only care for the part of the range that is represented
49 * in the bitmap.
50 */
51 if (start < unaccepted->phys_base)
52 start = unaccepted->phys_base;
53 if (end < unaccepted->phys_base)
54 return;
55
56 /* Translate to offsets from the beginning of the bitmap */
57 start -= unaccepted->phys_base;
58 end -= unaccepted->phys_base;
59
60 /*
61 * load_unaligned_zeropad() can lead to unwanted loads across page
62 * boundaries. The unwanted loads are typically harmless. But, they
63 * might be made to totally unrelated or even unmapped memory.
64 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
65 * #VE) to recover from these unwanted loads.
66 *
67 * But, this approach does not work for unaccepted memory. For TDX, a
68 * load from unaccepted memory will not lead to a recoverable exception
69 * within the guest. The guest will exit to the VMM where the only
70 * recourse is to terminate the guest.
71 *
72 * There are two parts to fix this issue and comprehensively avoid
73 * access to unaccepted memory. Together these ensure that an extra
74 * "guard" page is accepted in addition to the memory that needs to be
75 * used:
76 *
77 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
78 * checks up to end+unit_size if 'end' is aligned on a unit_size
79 * boundary.
80 *
81 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
82 * 'end' is aligned on a unit_size boundary. (immediately following
83 * this comment)
84 */
85 if (!(end % unit_size))
86 end += unit_size;
87
88 /* Make sure not to overrun the bitmap */
89 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
90 end = unaccepted->size * unit_size * BITS_PER_BYTE;
91
92 range.start = start / unit_size;
93 range.end = DIV_ROUND_UP(end, unit_size);
94retry:
95 spin_lock_irqsave(&unaccepted_memory_lock, flags);
96
97 /*
98 * Check if anybody works on accepting the same range of the memory.
99 *
100 * The check is done with unit_size granularity. It is crucial to catch
101 * all accept requests to the same unit_size block, even if they don't
102 * overlap on physical address level.
103 */
104 list_for_each_entry(entry, &accepting_list, list) {
105 if (entry->end <= range.start)
106 continue;
107 if (entry->start >= range.end)
108 continue;
109
110 /*
111 * Somebody else accepting the range. Or at least part of it.
112 *
113 * Drop the lock and retry until it is complete.
114 */
115 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
116 goto retry;
117 }
118
119 /*
120 * Register that the range is about to be accepted.
121 * Make sure nobody else will accept it.
122 */
123 list_add(&range.list, &accepting_list);
124
125 range_start = range.start;
126 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
127 range.end) {
128 unsigned long phys_start, phys_end;
129 unsigned long len = range_end - range_start;
130
131 phys_start = range_start * unit_size + unaccepted->phys_base;
132 phys_end = range_end * unit_size + unaccepted->phys_base;
133
134 /*
135 * Keep interrupts disabled until the accept operation is
136 * complete in order to prevent deadlocks.
137 *
138 * Enabling interrupts before calling arch_accept_memory()
139 * creates an opportunity for an interrupt handler to request
140 * acceptance for the same memory. The handler will continuously
141 * spin with interrupts disabled, preventing other task from
142 * making progress with the acceptance process.
143 */
144 spin_unlock(&unaccepted_memory_lock);
145
146 arch_accept_memory(phys_start, phys_end);
147
148 spin_lock(&unaccepted_memory_lock);
149 bitmap_clear(unaccepted->bitmap, range_start, len);
150 }
151
152 list_del(&range.list);
153
154 touch_softlockup_watchdog();
155
156 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
157}
158
159bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
160{
161 struct efi_unaccepted_memory *unaccepted;
162 unsigned long flags;
163 bool ret = false;
164 u64 unit_size;
165
166 unaccepted = efi_get_unaccepted_table();
167 if (!unaccepted)
168 return false;
169
170 unit_size = unaccepted->unit_size;
171
172 /*
173 * Only care for the part of the range that is represented
174 * in the bitmap.
175 */
176 if (start < unaccepted->phys_base)
177 start = unaccepted->phys_base;
178 if (end < unaccepted->phys_base)
179 return false;
180
181 /* Translate to offsets from the beginning of the bitmap */
182 start -= unaccepted->phys_base;
183 end -= unaccepted->phys_base;
184
185 /*
186 * Also consider the unaccepted state of the *next* page. See fix #1 in
187 * the comment on load_unaligned_zeropad() in accept_memory().
188 */
189 if (!(end % unit_size))
190 end += unit_size;
191
192 /* Make sure not to overrun the bitmap */
193 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
194 end = unaccepted->size * unit_size * BITS_PER_BYTE;
195
196 spin_lock_irqsave(&unaccepted_memory_lock, flags);
197 while (start < end) {
198 if (test_bit(start / unit_size, unaccepted->bitmap)) {
199 ret = true;
200 break;
201 }
202
203 start += unit_size;
204 }
205 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
206
207 return ret;
208}
209
210#ifdef CONFIG_PROC_VMCORE
211static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
212 unsigned long pfn)
213{
214 return !pfn_is_unaccepted_memory(pfn);
215}
216
217static struct vmcore_cb vmcore_cb = {
218 .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
219};
220
221static int __init unaccepted_memory_init_kdump(void)
222{
223 register_vmcore_cb(&vmcore_cb);
224 return 0;
225}
226core_initcall(unaccepted_memory_init_kdump);
227#endif /* CONFIG_PROC_VMCORE */
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/efi.h>
4#include <linux/memblock.h>
5#include <linux/spinlock.h>
6#include <linux/crash_dump.h>
7#include <linux/nmi.h>
8#include <asm/unaccepted_memory.h>
9
10/* Protects unaccepted memory bitmap and accepting_list */
11static DEFINE_SPINLOCK(unaccepted_memory_lock);
12
13struct accept_range {
14 struct list_head list;
15 unsigned long start;
16 unsigned long end;
17};
18
19static LIST_HEAD(accepting_list);
20
21/*
22 * accept_memory() -- Consult bitmap and accept the memory if needed.
23 *
24 * Only memory that is explicitly marked as unaccepted in the bitmap requires
25 * an action. All the remaining memory is implicitly accepted and doesn't need
26 * acceptance.
27 *
28 * No need to accept:
29 * - anything if the system has no unaccepted table;
30 * - memory that is below phys_base;
31 * - memory that is above the memory that addressable by the bitmap;
32 */
33void accept_memory(phys_addr_t start, unsigned long size)
34{
35 struct efi_unaccepted_memory *unaccepted;
36 unsigned long range_start, range_end;
37 struct accept_range range, *entry;
38 phys_addr_t end = start + size;
39 unsigned long flags;
40 u64 unit_size;
41
42 unaccepted = efi_get_unaccepted_table();
43 if (!unaccepted)
44 return;
45
46 unit_size = unaccepted->unit_size;
47
48 /*
49 * Only care for the part of the range that is represented
50 * in the bitmap.
51 */
52 if (start < unaccepted->phys_base)
53 start = unaccepted->phys_base;
54 if (end < unaccepted->phys_base)
55 return;
56
57 /* Translate to offsets from the beginning of the bitmap */
58 start -= unaccepted->phys_base;
59 end -= unaccepted->phys_base;
60
61 /*
62 * load_unaligned_zeropad() can lead to unwanted loads across page
63 * boundaries. The unwanted loads are typically harmless. But, they
64 * might be made to totally unrelated or even unmapped memory.
65 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
66 * #VE) to recover from these unwanted loads.
67 *
68 * But, this approach does not work for unaccepted memory. For TDX, a
69 * load from unaccepted memory will not lead to a recoverable exception
70 * within the guest. The guest will exit to the VMM where the only
71 * recourse is to terminate the guest.
72 *
73 * There are two parts to fix this issue and comprehensively avoid
74 * access to unaccepted memory. Together these ensure that an extra
75 * "guard" page is accepted in addition to the memory that needs to be
76 * used:
77 *
78 * 1. Implicitly extend the range_contains_unaccepted_memory(start, size)
79 * checks up to the next unit_size if 'start+size' is aligned on a
80 * unit_size boundary.
81 *
82 * 2. Implicitly extend accept_memory(start, size) to the next unit_size
83 * if 'size+end' is aligned on a unit_size boundary. (immediately
84 * following this comment)
85 */
86 if (!(end % unit_size))
87 end += unit_size;
88
89 /* Make sure not to overrun the bitmap */
90 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
91 end = unaccepted->size * unit_size * BITS_PER_BYTE;
92
93 range.start = start / unit_size;
94 range.end = DIV_ROUND_UP(end, unit_size);
95retry:
96 spin_lock_irqsave(&unaccepted_memory_lock, flags);
97
98 /*
99 * Check if anybody works on accepting the same range of the memory.
100 *
101 * The check is done with unit_size granularity. It is crucial to catch
102 * all accept requests to the same unit_size block, even if they don't
103 * overlap on physical address level.
104 */
105 list_for_each_entry(entry, &accepting_list, list) {
106 if (entry->end <= range.start)
107 continue;
108 if (entry->start >= range.end)
109 continue;
110
111 /*
112 * Somebody else accepting the range. Or at least part of it.
113 *
114 * Drop the lock and retry until it is complete.
115 */
116 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
117 goto retry;
118 }
119
120 /*
121 * Register that the range is about to be accepted.
122 * Make sure nobody else will accept it.
123 */
124 list_add(&range.list, &accepting_list);
125
126 range_start = range.start;
127 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
128 range.end) {
129 unsigned long phys_start, phys_end;
130 unsigned long len = range_end - range_start;
131
132 phys_start = range_start * unit_size + unaccepted->phys_base;
133 phys_end = range_end * unit_size + unaccepted->phys_base;
134
135 /*
136 * Keep interrupts disabled until the accept operation is
137 * complete in order to prevent deadlocks.
138 *
139 * Enabling interrupts before calling arch_accept_memory()
140 * creates an opportunity for an interrupt handler to request
141 * acceptance for the same memory. The handler will continuously
142 * spin with interrupts disabled, preventing other task from
143 * making progress with the acceptance process.
144 */
145 spin_unlock(&unaccepted_memory_lock);
146
147 arch_accept_memory(phys_start, phys_end);
148
149 spin_lock(&unaccepted_memory_lock);
150 bitmap_clear(unaccepted->bitmap, range_start, len);
151 }
152
153 list_del(&range.list);
154
155 touch_softlockup_watchdog();
156
157 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
158}
159
160bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size)
161{
162 struct efi_unaccepted_memory *unaccepted;
163 phys_addr_t end = start + size;
164 unsigned long flags;
165 bool ret = false;
166 u64 unit_size;
167
168 unaccepted = efi_get_unaccepted_table();
169 if (!unaccepted)
170 return false;
171
172 unit_size = unaccepted->unit_size;
173
174 /*
175 * Only care for the part of the range that is represented
176 * in the bitmap.
177 */
178 if (start < unaccepted->phys_base)
179 start = unaccepted->phys_base;
180 if (end < unaccepted->phys_base)
181 return false;
182
183 /* Translate to offsets from the beginning of the bitmap */
184 start -= unaccepted->phys_base;
185 end -= unaccepted->phys_base;
186
187 /*
188 * Also consider the unaccepted state of the *next* page. See fix #1 in
189 * the comment on load_unaligned_zeropad() in accept_memory().
190 */
191 if (!(end % unit_size))
192 end += unit_size;
193
194 /* Make sure not to overrun the bitmap */
195 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
196 end = unaccepted->size * unit_size * BITS_PER_BYTE;
197
198 spin_lock_irqsave(&unaccepted_memory_lock, flags);
199 while (start < end) {
200 if (test_bit(start / unit_size, unaccepted->bitmap)) {
201 ret = true;
202 break;
203 }
204
205 start += unit_size;
206 }
207 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
208
209 return ret;
210}
211
212#ifdef CONFIG_PROC_VMCORE
213static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
214 unsigned long pfn)
215{
216 return !pfn_is_unaccepted_memory(pfn);
217}
218
219static struct vmcore_cb vmcore_cb = {
220 .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
221};
222
223static int __init unaccepted_memory_init_kdump(void)
224{
225 register_vmcore_cb(&vmcore_cb);
226 return 0;
227}
228core_initcall(unaccepted_memory_init_kdump);
229#endif /* CONFIG_PROC_VMCORE */