Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables kernel and guest-mode vCPU access to guest physical
6 * memory with suitable invalidation mechanisms.
7 *
8 * Copyright © 2021 Amazon.com, Inc. or its affiliates.
9 *
10 * Authors:
11 * David Woodhouse <dwmw2@infradead.org>
12 */
13
14#include <linux/kvm_host.h>
15#include <linux/kvm.h>
16#include <linux/highmem.h>
17#include <linux/module.h>
18#include <linux/errno.h>
19
20#include "kvm_mm.h"
21
22/*
23 * MMU notifier 'invalidate_range_start' hook.
24 */
25void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
26 unsigned long end)
27{
28 struct gfn_to_pfn_cache *gpc;
29
30 spin_lock(&kvm->gpc_lock);
31 list_for_each_entry(gpc, &kvm->gpc_list, list) {
32 read_lock_irq(&gpc->lock);
33
34 /* Only a single page so no need to care about length */
35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
36 gpc->uhva >= start && gpc->uhva < end) {
37 read_unlock_irq(&gpc->lock);
38
39 /*
40 * There is a small window here where the cache could
41 * be modified, and invalidation would no longer be
42 * necessary. Hence check again whether invalidation
43 * is still necessary once the write lock has been
44 * acquired.
45 */
46
47 write_lock_irq(&gpc->lock);
48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
49 gpc->uhva >= start && gpc->uhva < end)
50 gpc->valid = false;
51 write_unlock_irq(&gpc->lock);
52 continue;
53 }
54
55 read_unlock_irq(&gpc->lock);
56 }
57 spin_unlock(&kvm->gpc_lock);
58}
59
60static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
61 unsigned long len)
62{
63 unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
64 offset_in_page(gpa);
65
66 /*
67 * The cached access must fit within a single page. The 'len' argument
68 * to activate() and refresh() exists only to enforce that.
69 */
70 return offset + len <= PAGE_SIZE;
71}
72
73bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
74{
75 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
76
77 if (!gpc->active)
78 return false;
79
80 /*
81 * If the page was cached from a memslot, make sure the memslots have
82 * not been re-configured.
83 */
84 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
85 return false;
86
87 if (kvm_is_error_hva(gpc->uhva))
88 return false;
89
90 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
91 return false;
92
93 if (!gpc->valid)
94 return false;
95
96 return true;
97}
98
99static void *gpc_map(kvm_pfn_t pfn)
100{
101 if (pfn_valid(pfn))
102 return kmap(pfn_to_page(pfn));
103
104#ifdef CONFIG_HAS_IOMEM
105 return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
106#else
107 return NULL;
108#endif
109}
110
111static void gpc_unmap(kvm_pfn_t pfn, void *khva)
112{
113 /* Unmap the old pfn/page if it was mapped before. */
114 if (is_error_noslot_pfn(pfn) || !khva)
115 return;
116
117 if (pfn_valid(pfn)) {
118 kunmap(pfn_to_page(pfn));
119 return;
120 }
121
122#ifdef CONFIG_HAS_IOMEM
123 memunmap(khva);
124#endif
125}
126
127static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
128{
129 /*
130 * mn_active_invalidate_count acts for all intents and purposes
131 * like mmu_invalidate_in_progress here; but the latter cannot
132 * be used here because the invalidation of caches in the
133 * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
134 * is elevated.
135 *
136 * Note, it does not matter that mn_active_invalidate_count
137 * is not protected by gpc->lock. It is guaranteed to
138 * be elevated before the mmu_notifier acquires gpc->lock, and
139 * isn't dropped until after mmu_invalidate_seq is updated.
140 */
141 if (kvm->mn_active_invalidate_count)
142 return true;
143
144 /*
145 * Ensure mn_active_invalidate_count is read before
146 * mmu_invalidate_seq. This pairs with the smp_wmb() in
147 * mmu_notifier_invalidate_range_end() to guarantee either the
148 * old (non-zero) value of mn_active_invalidate_count or the
149 * new (incremented) value of mmu_invalidate_seq is observed.
150 */
151 smp_rmb();
152 return kvm->mmu_invalidate_seq != mmu_seq;
153}
154
155static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
156{
157 /* Note, the new page offset may be different than the old! */
158 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
159 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
160 void *new_khva = NULL;
161 unsigned long mmu_seq;
162 struct page *page;
163
164 struct kvm_follow_pfn kfp = {
165 .slot = gpc->memslot,
166 .gfn = gpa_to_gfn(gpc->gpa),
167 .flags = FOLL_WRITE,
168 .hva = gpc->uhva,
169 .refcounted_page = &page,
170 };
171
172 lockdep_assert_held(&gpc->refresh_lock);
173
174 lockdep_assert_held_write(&gpc->lock);
175
176 /*
177 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
178 * assets have already been updated and so a concurrent check() from a
179 * different task may not fail the gpa/uhva/generation checks.
180 */
181 gpc->valid = false;
182
183 do {
184 mmu_seq = gpc->kvm->mmu_invalidate_seq;
185 smp_rmb();
186
187 write_unlock_irq(&gpc->lock);
188
189 /*
190 * If the previous iteration "failed" due to an mmu_notifier
191 * event, release the pfn and unmap the kernel virtual address
192 * from the previous attempt. Unmapping might sleep, so this
193 * needs to be done after dropping the lock. Opportunistically
194 * check for resched while the lock isn't held.
195 */
196 if (new_pfn != KVM_PFN_ERR_FAULT) {
197 /*
198 * Keep the mapping if the previous iteration reused
199 * the existing mapping and didn't create a new one.
200 */
201 if (new_khva != old_khva)
202 gpc_unmap(new_pfn, new_khva);
203
204 kvm_release_page_unused(page);
205
206 cond_resched();
207 }
208
209 new_pfn = hva_to_pfn(&kfp);
210 if (is_error_noslot_pfn(new_pfn))
211 goto out_error;
212
213 /*
214 * Obtain a new kernel mapping if KVM itself will access the
215 * pfn. Note, kmap() and memremap() can both sleep, so this
216 * too must be done outside of gpc->lock!
217 */
218 if (new_pfn == gpc->pfn)
219 new_khva = old_khva;
220 else
221 new_khva = gpc_map(new_pfn);
222
223 if (!new_khva) {
224 kvm_release_page_unused(page);
225 goto out_error;
226 }
227
228 write_lock_irq(&gpc->lock);
229
230 /*
231 * Other tasks must wait for _this_ refresh to complete before
232 * attempting to refresh.
233 */
234 WARN_ON_ONCE(gpc->valid);
235 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
236
237 gpc->valid = true;
238 gpc->pfn = new_pfn;
239 gpc->khva = new_khva + offset_in_page(gpc->uhva);
240
241 /*
242 * Put the reference to the _new_ page. The page is now tracked by the
243 * cache and can be safely migrated, swapped, etc... as the cache will
244 * invalidate any mappings in response to relevant mmu_notifier events.
245 */
246 kvm_release_page_clean(page);
247
248 return 0;
249
250out_error:
251 write_lock_irq(&gpc->lock);
252
253 return -EFAULT;
254}
255
256static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva)
257{
258 unsigned long page_offset;
259 bool unmap_old = false;
260 unsigned long old_uhva;
261 kvm_pfn_t old_pfn;
262 bool hva_change = false;
263 void *old_khva;
264 int ret;
265
266 /* Either gpa or uhva must be valid, but not both */
267 if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
268 return -EINVAL;
269
270 lockdep_assert_held(&gpc->refresh_lock);
271
272 write_lock_irq(&gpc->lock);
273
274 if (!gpc->active) {
275 ret = -EINVAL;
276 goto out_unlock;
277 }
278
279 old_pfn = gpc->pfn;
280 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
281 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
282
283 if (kvm_is_error_gpa(gpa)) {
284 page_offset = offset_in_page(uhva);
285
286 gpc->gpa = INVALID_GPA;
287 gpc->memslot = NULL;
288 gpc->uhva = PAGE_ALIGN_DOWN(uhva);
289
290 if (gpc->uhva != old_uhva)
291 hva_change = true;
292 } else {
293 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
294
295 page_offset = offset_in_page(gpa);
296
297 if (gpc->gpa != gpa || gpc->generation != slots->generation ||
298 kvm_is_error_hva(gpc->uhva)) {
299 gfn_t gfn = gpa_to_gfn(gpa);
300
301 gpc->gpa = gpa;
302 gpc->generation = slots->generation;
303 gpc->memslot = __gfn_to_memslot(slots, gfn);
304 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
305
306 if (kvm_is_error_hva(gpc->uhva)) {
307 ret = -EFAULT;
308 goto out;
309 }
310
311 /*
312 * Even if the GPA and/or the memslot generation changed, the
313 * HVA may still be the same.
314 */
315 if (gpc->uhva != old_uhva)
316 hva_change = true;
317 } else {
318 gpc->uhva = old_uhva;
319 }
320 }
321
322 /* Note: the offset must be correct before calling hva_to_pfn_retry() */
323 gpc->uhva += page_offset;
324
325 /*
326 * If the userspace HVA changed or the PFN was already invalid,
327 * drop the lock and do the HVA to PFN lookup again.
328 */
329 if (!gpc->valid || hva_change) {
330 ret = hva_to_pfn_retry(gpc);
331 } else {
332 /*
333 * If the HVA→PFN mapping was already valid, don't unmap it.
334 * But do update gpc->khva because the offset within the page
335 * may have changed.
336 */
337 gpc->khva = old_khva + page_offset;
338 ret = 0;
339 goto out_unlock;
340 }
341
342 out:
343 /*
344 * Invalidate the cache and purge the pfn/khva if the refresh failed.
345 * Some/all of the uhva, gpa, and memslot generation info may still be
346 * valid, leave it as is.
347 */
348 if (ret) {
349 gpc->valid = false;
350 gpc->pfn = KVM_PFN_ERR_FAULT;
351 gpc->khva = NULL;
352 }
353
354 /* Detect a pfn change before dropping the lock! */
355 unmap_old = (old_pfn != gpc->pfn);
356
357out_unlock:
358 write_unlock_irq(&gpc->lock);
359
360 if (unmap_old)
361 gpc_unmap(old_pfn, old_khva);
362
363 return ret;
364}
365
366int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
367{
368 unsigned long uhva;
369
370 guard(mutex)(&gpc->refresh_lock);
371
372 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
373 return -EINVAL;
374
375 /*
376 * If the GPA is valid then ignore the HVA, as a cache can be GPA-based
377 * or HVA-based, not both. For GPA-based caches, the HVA will be
378 * recomputed during refresh if necessary.
379 */
380 uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
381
382 return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
383}
384
385void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
386{
387 rwlock_init(&gpc->lock);
388 mutex_init(&gpc->refresh_lock);
389
390 gpc->kvm = kvm;
391 gpc->pfn = KVM_PFN_ERR_FAULT;
392 gpc->gpa = INVALID_GPA;
393 gpc->uhva = KVM_HVA_ERR_BAD;
394 gpc->active = gpc->valid = false;
395}
396
397static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
398 unsigned long len)
399{
400 struct kvm *kvm = gpc->kvm;
401
402 if (!kvm_gpc_is_valid_len(gpa, uhva, len))
403 return -EINVAL;
404
405 guard(mutex)(&gpc->refresh_lock);
406
407 if (!gpc->active) {
408 if (KVM_BUG_ON(gpc->valid, kvm))
409 return -EIO;
410
411 spin_lock(&kvm->gpc_lock);
412 list_add(&gpc->list, &kvm->gpc_list);
413 spin_unlock(&kvm->gpc_lock);
414
415 /*
416 * Activate the cache after adding it to the list, a concurrent
417 * refresh must not establish a mapping until the cache is
418 * reachable by mmu_notifier events.
419 */
420 write_lock_irq(&gpc->lock);
421 gpc->active = true;
422 write_unlock_irq(&gpc->lock);
423 }
424 return __kvm_gpc_refresh(gpc, gpa, uhva);
425}
426
427int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
428{
429 /*
430 * Explicitly disallow INVALID_GPA so that the magic value can be used
431 * by KVM to differentiate between GPA-based and HVA-based caches.
432 */
433 if (WARN_ON_ONCE(kvm_is_error_gpa(gpa)))
434 return -EINVAL;
435
436 return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
437}
438
439int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
440{
441 if (!access_ok((void __user *)uhva, len))
442 return -EINVAL;
443
444 return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
445}
446
447void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
448{
449 struct kvm *kvm = gpc->kvm;
450 kvm_pfn_t old_pfn;
451 void *old_khva;
452
453 guard(mutex)(&gpc->refresh_lock);
454
455 if (gpc->active) {
456 /*
457 * Deactivate the cache before removing it from the list, KVM
458 * must stall mmu_notifier events until all users go away, i.e.
459 * until gpc->lock is dropped and refresh is guaranteed to fail.
460 */
461 write_lock_irq(&gpc->lock);
462 gpc->active = false;
463 gpc->valid = false;
464
465 /*
466 * Leave the GPA => uHVA cache intact, it's protected by the
467 * memslot generation. The PFN lookup needs to be redone every
468 * time as mmu_notifier protection is lost when the cache is
469 * removed from the VM's gpc_list.
470 */
471 old_khva = gpc->khva - offset_in_page(gpc->khva);
472 gpc->khva = NULL;
473
474 old_pfn = gpc->pfn;
475 gpc->pfn = KVM_PFN_ERR_FAULT;
476 write_unlock_irq(&gpc->lock);
477
478 spin_lock(&kvm->gpc_lock);
479 list_del(&gpc->list);
480 spin_unlock(&kvm->gpc_lock);
481
482 gpc_unmap(old_pfn, old_khva);
483 }
484}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables kernel and guest-mode vCPU access to guest physical
6 * memory with suitable invalidation mechanisms.
7 *
8 * Copyright © 2021 Amazon.com, Inc. or its affiliates.
9 *
10 * Authors:
11 * David Woodhouse <dwmw2@infradead.org>
12 */
13
14#include <linux/kvm_host.h>
15#include <linux/kvm.h>
16#include <linux/highmem.h>
17#include <linux/module.h>
18#include <linux/errno.h>
19
20#include "kvm_mm.h"
21
22/*
23 * MMU notifier 'invalidate_range_start' hook.
24 */
25void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
26 unsigned long end, bool may_block)
27{
28 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
29 struct gfn_to_pfn_cache *gpc;
30 bool evict_vcpus = false;
31
32 spin_lock(&kvm->gpc_lock);
33 list_for_each_entry(gpc, &kvm->gpc_list, list) {
34 write_lock_irq(&gpc->lock);
35
36 /* Only a single page so no need to care about length */
37 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
38 gpc->uhva >= start && gpc->uhva < end) {
39 gpc->valid = false;
40
41 /*
42 * If a guest vCPU could be using the physical address,
43 * it needs to be forced out of guest mode.
44 */
45 if (gpc->usage & KVM_GUEST_USES_PFN) {
46 if (!evict_vcpus) {
47 evict_vcpus = true;
48 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
49 }
50 __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
51 }
52 }
53 write_unlock_irq(&gpc->lock);
54 }
55 spin_unlock(&kvm->gpc_lock);
56
57 if (evict_vcpus) {
58 /*
59 * KVM needs to ensure the vCPU is fully out of guest context
60 * before allowing the invalidation to continue.
61 */
62 unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
63 bool called;
64
65 /*
66 * If the OOM reaper is active, then all vCPUs should have
67 * been stopped already, so perform the request without
68 * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
69 */
70 if (!may_block)
71 req &= ~KVM_REQUEST_WAIT;
72
73 called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
74
75 WARN_ON_ONCE(called && !may_block);
76 }
77}
78
79bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
80{
81 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
82
83 if (!gpc->active)
84 return false;
85
86 if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
87 return false;
88
89 if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
90 return false;
91
92 if (!gpc->valid)
93 return false;
94
95 return true;
96}
97EXPORT_SYMBOL_GPL(kvm_gpc_check);
98
99static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva)
100{
101 /* Unmap the old pfn/page if it was mapped before. */
102 if (!is_error_noslot_pfn(pfn) && khva) {
103 if (pfn_valid(pfn))
104 kunmap(pfn_to_page(pfn));
105#ifdef CONFIG_HAS_IOMEM
106 else
107 memunmap(khva);
108#endif
109 }
110}
111
112static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
113{
114 /*
115 * mn_active_invalidate_count acts for all intents and purposes
116 * like mmu_invalidate_in_progress here; but the latter cannot
117 * be used here because the invalidation of caches in the
118 * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
119 * is elevated.
120 *
121 * Note, it does not matter that mn_active_invalidate_count
122 * is not protected by gpc->lock. It is guaranteed to
123 * be elevated before the mmu_notifier acquires gpc->lock, and
124 * isn't dropped until after mmu_invalidate_seq is updated.
125 */
126 if (kvm->mn_active_invalidate_count)
127 return true;
128
129 /*
130 * Ensure mn_active_invalidate_count is read before
131 * mmu_invalidate_seq. This pairs with the smp_wmb() in
132 * mmu_notifier_invalidate_range_end() to guarantee either the
133 * old (non-zero) value of mn_active_invalidate_count or the
134 * new (incremented) value of mmu_invalidate_seq is observed.
135 */
136 smp_rmb();
137 return kvm->mmu_invalidate_seq != mmu_seq;
138}
139
140static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
141{
142 /* Note, the new page offset may be different than the old! */
143 void *old_khva = gpc->khva - offset_in_page(gpc->khva);
144 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
145 void *new_khva = NULL;
146 unsigned long mmu_seq;
147
148 lockdep_assert_held(&gpc->refresh_lock);
149
150 lockdep_assert_held_write(&gpc->lock);
151
152 /*
153 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
154 * assets have already been updated and so a concurrent check() from a
155 * different task may not fail the gpa/uhva/generation checks.
156 */
157 gpc->valid = false;
158
159 do {
160 mmu_seq = gpc->kvm->mmu_invalidate_seq;
161 smp_rmb();
162
163 write_unlock_irq(&gpc->lock);
164
165 /*
166 * If the previous iteration "failed" due to an mmu_notifier
167 * event, release the pfn and unmap the kernel virtual address
168 * from the previous attempt. Unmapping might sleep, so this
169 * needs to be done after dropping the lock. Opportunistically
170 * check for resched while the lock isn't held.
171 */
172 if (new_pfn != KVM_PFN_ERR_FAULT) {
173 /*
174 * Keep the mapping if the previous iteration reused
175 * the existing mapping and didn't create a new one.
176 */
177 if (new_khva != old_khva)
178 gpc_unmap_khva(new_pfn, new_khva);
179
180 kvm_release_pfn_clean(new_pfn);
181
182 cond_resched();
183 }
184
185 /* We always request a writeable mapping */
186 new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
187 if (is_error_noslot_pfn(new_pfn))
188 goto out_error;
189
190 /*
191 * Obtain a new kernel mapping if KVM itself will access the
192 * pfn. Note, kmap() and memremap() can both sleep, so this
193 * too must be done outside of gpc->lock!
194 */
195 if (gpc->usage & KVM_HOST_USES_PFN) {
196 if (new_pfn == gpc->pfn) {
197 new_khva = old_khva;
198 } else if (pfn_valid(new_pfn)) {
199 new_khva = kmap(pfn_to_page(new_pfn));
200#ifdef CONFIG_HAS_IOMEM
201 } else {
202 new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
203#endif
204 }
205 if (!new_khva) {
206 kvm_release_pfn_clean(new_pfn);
207 goto out_error;
208 }
209 }
210
211 write_lock_irq(&gpc->lock);
212
213 /*
214 * Other tasks must wait for _this_ refresh to complete before
215 * attempting to refresh.
216 */
217 WARN_ON_ONCE(gpc->valid);
218 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
219
220 gpc->valid = true;
221 gpc->pfn = new_pfn;
222 gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
223
224 /*
225 * Put the reference to the _new_ pfn. The pfn is now tracked by the
226 * cache and can be safely migrated, swapped, etc... as the cache will
227 * invalidate any mappings in response to relevant mmu_notifier events.
228 */
229 kvm_release_pfn_clean(new_pfn);
230
231 return 0;
232
233out_error:
234 write_lock_irq(&gpc->lock);
235
236 return -EFAULT;
237}
238
239static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
240 unsigned long len)
241{
242 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
243 unsigned long page_offset = gpa & ~PAGE_MASK;
244 bool unmap_old = false;
245 unsigned long old_uhva;
246 kvm_pfn_t old_pfn;
247 void *old_khva;
248 int ret;
249
250 /*
251 * If must fit within a single page. The 'len' argument is
252 * only to enforce that.
253 */
254 if (page_offset + len > PAGE_SIZE)
255 return -EINVAL;
256
257 /*
258 * If another task is refreshing the cache, wait for it to complete.
259 * There is no guarantee that concurrent refreshes will see the same
260 * gpa, memslots generation, etc..., so they must be fully serialized.
261 */
262 mutex_lock(&gpc->refresh_lock);
263
264 write_lock_irq(&gpc->lock);
265
266 if (!gpc->active) {
267 ret = -EINVAL;
268 goto out_unlock;
269 }
270
271 old_pfn = gpc->pfn;
272 old_khva = gpc->khva - offset_in_page(gpc->khva);
273 old_uhva = gpc->uhva;
274
275 /* If the userspace HVA is invalid, refresh that first */
276 if (gpc->gpa != gpa || gpc->generation != slots->generation ||
277 kvm_is_error_hva(gpc->uhva)) {
278 gfn_t gfn = gpa_to_gfn(gpa);
279
280 gpc->gpa = gpa;
281 gpc->generation = slots->generation;
282 gpc->memslot = __gfn_to_memslot(slots, gfn);
283 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
284
285 if (kvm_is_error_hva(gpc->uhva)) {
286 ret = -EFAULT;
287 goto out;
288 }
289 }
290
291 /*
292 * If the userspace HVA changed or the PFN was already invalid,
293 * drop the lock and do the HVA to PFN lookup again.
294 */
295 if (!gpc->valid || old_uhva != gpc->uhva) {
296 ret = hva_to_pfn_retry(gpc);
297 } else {
298 /*
299 * If the HVA→PFN mapping was already valid, don't unmap it.
300 * But do update gpc->khva because the offset within the page
301 * may have changed.
302 */
303 gpc->khva = old_khva + page_offset;
304 ret = 0;
305 goto out_unlock;
306 }
307
308 out:
309 /*
310 * Invalidate the cache and purge the pfn/khva if the refresh failed.
311 * Some/all of the uhva, gpa, and memslot generation info may still be
312 * valid, leave it as is.
313 */
314 if (ret) {
315 gpc->valid = false;
316 gpc->pfn = KVM_PFN_ERR_FAULT;
317 gpc->khva = NULL;
318 }
319
320 /* Detect a pfn change before dropping the lock! */
321 unmap_old = (old_pfn != gpc->pfn);
322
323out_unlock:
324 write_unlock_irq(&gpc->lock);
325
326 mutex_unlock(&gpc->refresh_lock);
327
328 if (unmap_old)
329 gpc_unmap_khva(old_pfn, old_khva);
330
331 return ret;
332}
333
334int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
335{
336 return __kvm_gpc_refresh(gpc, gpc->gpa, len);
337}
338EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
339
340void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
341 struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
342{
343 WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
344 WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
345
346 rwlock_init(&gpc->lock);
347 mutex_init(&gpc->refresh_lock);
348
349 gpc->kvm = kvm;
350 gpc->vcpu = vcpu;
351 gpc->usage = usage;
352 gpc->pfn = KVM_PFN_ERR_FAULT;
353 gpc->uhva = KVM_HVA_ERR_BAD;
354}
355EXPORT_SYMBOL_GPL(kvm_gpc_init);
356
357int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
358{
359 struct kvm *kvm = gpc->kvm;
360
361 if (!gpc->active) {
362 if (KVM_BUG_ON(gpc->valid, kvm))
363 return -EIO;
364
365 spin_lock(&kvm->gpc_lock);
366 list_add(&gpc->list, &kvm->gpc_list);
367 spin_unlock(&kvm->gpc_lock);
368
369 /*
370 * Activate the cache after adding it to the list, a concurrent
371 * refresh must not establish a mapping until the cache is
372 * reachable by mmu_notifier events.
373 */
374 write_lock_irq(&gpc->lock);
375 gpc->active = true;
376 write_unlock_irq(&gpc->lock);
377 }
378 return __kvm_gpc_refresh(gpc, gpa, len);
379}
380EXPORT_SYMBOL_GPL(kvm_gpc_activate);
381
382void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
383{
384 struct kvm *kvm = gpc->kvm;
385 kvm_pfn_t old_pfn;
386 void *old_khva;
387
388 if (gpc->active) {
389 /*
390 * Deactivate the cache before removing it from the list, KVM
391 * must stall mmu_notifier events until all users go away, i.e.
392 * until gpc->lock is dropped and refresh is guaranteed to fail.
393 */
394 write_lock_irq(&gpc->lock);
395 gpc->active = false;
396 gpc->valid = false;
397
398 /*
399 * Leave the GPA => uHVA cache intact, it's protected by the
400 * memslot generation. The PFN lookup needs to be redone every
401 * time as mmu_notifier protection is lost when the cache is
402 * removed from the VM's gpc_list.
403 */
404 old_khva = gpc->khva - offset_in_page(gpc->khva);
405 gpc->khva = NULL;
406
407 old_pfn = gpc->pfn;
408 gpc->pfn = KVM_PFN_ERR_FAULT;
409 write_unlock_irq(&gpc->lock);
410
411 spin_lock(&kvm->gpc_lock);
412 list_del(&gpc->list);
413 spin_unlock(&kvm->gpc_lock);
414
415 gpc_unmap_khva(old_pfn, old_khva);
416 }
417}
418EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);