Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Kernel-based Virtual Machine driver for Linux
  4 *
  5 * This module enables kernel and guest-mode vCPU access to guest physical
  6 * memory with suitable invalidation mechanisms.
  7 *
  8 * Copyright © 2021 Amazon.com, Inc. or its affiliates.
  9 *
 10 * Authors:
 11 *   David Woodhouse <dwmw2@infradead.org>
 12 */
 13
 14#include <linux/kvm_host.h>
 15#include <linux/kvm.h>
 16#include <linux/highmem.h>
 17#include <linux/module.h>
 18#include <linux/errno.h>
 19
 20#include "kvm_mm.h"
 21
 22/*
 23 * MMU notifier 'invalidate_range_start' hook.
 24 */
 25void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
 26				       unsigned long end, bool may_block)
 27{
 28	DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
 29	struct gfn_to_pfn_cache *gpc;
 30	bool evict_vcpus = false;
 31
 32	spin_lock(&kvm->gpc_lock);
 33	list_for_each_entry(gpc, &kvm->gpc_list, list) {
 34		write_lock_irq(&gpc->lock);
 35
 36		/* Only a single page so no need to care about length */
 37		if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
 38		    gpc->uhva >= start && gpc->uhva < end) {
 39			gpc->valid = false;
 40
 41			/*
 42			 * If a guest vCPU could be using the physical address,
 43			 * it needs to be forced out of guest mode.
 44			 */
 45			if (gpc->usage & KVM_GUEST_USES_PFN) {
 46				if (!evict_vcpus) {
 47					evict_vcpus = true;
 48					bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
 49				}
 50				__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
 51			}
 52		}
 53		write_unlock_irq(&gpc->lock);
 54	}
 55	spin_unlock(&kvm->gpc_lock);
 56
 57	if (evict_vcpus) {
 58		/*
 59		 * KVM needs to ensure the vCPU is fully out of guest context
 60		 * before allowing the invalidation to continue.
 61		 */
 62		unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
 63		bool called;
 64
 65		/*
 66		 * If the OOM reaper is active, then all vCPUs should have
 67		 * been stopped already, so perform the request without
 68		 * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
 69		 */
 70		if (!may_block)
 71			req &= ~KVM_REQUEST_WAIT;
 72
 73		called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
 74
 75		WARN_ON_ONCE(called && !may_block);
 76	}
 77}
 78
 79bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
 80{
 81	struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
 82
 83	if (!gpc->active)
 84		return false;
 85
 86	if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
 87		return false;
 88
 89	if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
 90		return false;
 91
 92	if (!gpc->valid)
 93		return false;
 94
 95	return true;
 96}
 97EXPORT_SYMBOL_GPL(kvm_gpc_check);
 98
 99static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva)
100{
101	/* Unmap the old pfn/page if it was mapped before. */
102	if (!is_error_noslot_pfn(pfn) && khva) {
103		if (pfn_valid(pfn))
104			kunmap(pfn_to_page(pfn));
105#ifdef CONFIG_HAS_IOMEM
106		else
107			memunmap(khva);
108#endif
109	}
110}
111
112static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
113{
114	/*
115	 * mn_active_invalidate_count acts for all intents and purposes
116	 * like mmu_invalidate_in_progress here; but the latter cannot
117	 * be used here because the invalidation of caches in the
118	 * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
119	 * is elevated.
120	 *
121	 * Note, it does not matter that mn_active_invalidate_count
122	 * is not protected by gpc->lock.  It is guaranteed to
123	 * be elevated before the mmu_notifier acquires gpc->lock, and
124	 * isn't dropped until after mmu_invalidate_seq is updated.
125	 */
126	if (kvm->mn_active_invalidate_count)
127		return true;
128
129	/*
130	 * Ensure mn_active_invalidate_count is read before
131	 * mmu_invalidate_seq.  This pairs with the smp_wmb() in
132	 * mmu_notifier_invalidate_range_end() to guarantee either the
133	 * old (non-zero) value of mn_active_invalidate_count or the
134	 * new (incremented) value of mmu_invalidate_seq is observed.
135	 */
136	smp_rmb();
137	return kvm->mmu_invalidate_seq != mmu_seq;
138}
139
140static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
141{
142	/* Note, the new page offset may be different than the old! */
143	void *old_khva = gpc->khva - offset_in_page(gpc->khva);
144	kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
145	void *new_khva = NULL;
146	unsigned long mmu_seq;
147
148	lockdep_assert_held(&gpc->refresh_lock);
149
150	lockdep_assert_held_write(&gpc->lock);
151
152	/*
153	 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
154	 * assets have already been updated and so a concurrent check() from a
155	 * different task may not fail the gpa/uhva/generation checks.
156	 */
157	gpc->valid = false;
158
159	do {
160		mmu_seq = gpc->kvm->mmu_invalidate_seq;
161		smp_rmb();
162
163		write_unlock_irq(&gpc->lock);
164
165		/*
166		 * If the previous iteration "failed" due to an mmu_notifier
167		 * event, release the pfn and unmap the kernel virtual address
168		 * from the previous attempt.  Unmapping might sleep, so this
169		 * needs to be done after dropping the lock.  Opportunistically
170		 * check for resched while the lock isn't held.
171		 */
172		if (new_pfn != KVM_PFN_ERR_FAULT) {
173			/*
174			 * Keep the mapping if the previous iteration reused
175			 * the existing mapping and didn't create a new one.
176			 */
177			if (new_khva != old_khva)
178				gpc_unmap_khva(new_pfn, new_khva);
179
180			kvm_release_pfn_clean(new_pfn);
181
182			cond_resched();
183		}
184
185		/* We always request a writeable mapping */
186		new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
187		if (is_error_noslot_pfn(new_pfn))
188			goto out_error;
189
190		/*
191		 * Obtain a new kernel mapping if KVM itself will access the
192		 * pfn.  Note, kmap() and memremap() can both sleep, so this
193		 * too must be done outside of gpc->lock!
194		 */
195		if (gpc->usage & KVM_HOST_USES_PFN) {
196			if (new_pfn == gpc->pfn) {
197				new_khva = old_khva;
198			} else if (pfn_valid(new_pfn)) {
199				new_khva = kmap(pfn_to_page(new_pfn));
200#ifdef CONFIG_HAS_IOMEM
201			} else {
202				new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
203#endif
204			}
205			if (!new_khva) {
206				kvm_release_pfn_clean(new_pfn);
207				goto out_error;
208			}
209		}
210
211		write_lock_irq(&gpc->lock);
212
213		/*
214		 * Other tasks must wait for _this_ refresh to complete before
215		 * attempting to refresh.
216		 */
217		WARN_ON_ONCE(gpc->valid);
218	} while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
219
220	gpc->valid = true;
221	gpc->pfn = new_pfn;
222	gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
223
224	/*
225	 * Put the reference to the _new_ pfn.  The pfn is now tracked by the
226	 * cache and can be safely migrated, swapped, etc... as the cache will
227	 * invalidate any mappings in response to relevant mmu_notifier events.
228	 */
229	kvm_release_pfn_clean(new_pfn);
230
231	return 0;
232
233out_error:
234	write_lock_irq(&gpc->lock);
235
236	return -EFAULT;
237}
238
239static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
240			     unsigned long len)
241{
242	struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
243	unsigned long page_offset = gpa & ~PAGE_MASK;
244	bool unmap_old = false;
245	unsigned long old_uhva;
246	kvm_pfn_t old_pfn;
247	void *old_khva;
248	int ret;
249
250	/*
251	 * If must fit within a single page. The 'len' argument is
252	 * only to enforce that.
253	 */
254	if (page_offset + len > PAGE_SIZE)
255		return -EINVAL;
256
257	/*
258	 * If another task is refreshing the cache, wait for it to complete.
259	 * There is no guarantee that concurrent refreshes will see the same
260	 * gpa, memslots generation, etc..., so they must be fully serialized.
261	 */
262	mutex_lock(&gpc->refresh_lock);
263
264	write_lock_irq(&gpc->lock);
265
266	if (!gpc->active) {
267		ret = -EINVAL;
268		goto out_unlock;
269	}
270
271	old_pfn = gpc->pfn;
272	old_khva = gpc->khva - offset_in_page(gpc->khva);
273	old_uhva = gpc->uhva;
274
275	/* If the userspace HVA is invalid, refresh that first */
276	if (gpc->gpa != gpa || gpc->generation != slots->generation ||
277	    kvm_is_error_hva(gpc->uhva)) {
278		gfn_t gfn = gpa_to_gfn(gpa);
279
280		gpc->gpa = gpa;
281		gpc->generation = slots->generation;
282		gpc->memslot = __gfn_to_memslot(slots, gfn);
283		gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
284
285		if (kvm_is_error_hva(gpc->uhva)) {
286			ret = -EFAULT;
287			goto out;
288		}
289	}
290
291	/*
292	 * If the userspace HVA changed or the PFN was already invalid,
293	 * drop the lock and do the HVA to PFN lookup again.
294	 */
295	if (!gpc->valid || old_uhva != gpc->uhva) {
296		ret = hva_to_pfn_retry(gpc);
297	} else {
298		/*
299		 * If the HVA→PFN mapping was already valid, don't unmap it.
300		 * But do update gpc->khva because the offset within the page
301		 * may have changed.
302		 */
303		gpc->khva = old_khva + page_offset;
304		ret = 0;
305		goto out_unlock;
306	}
307
308 out:
309	/*
310	 * Invalidate the cache and purge the pfn/khva if the refresh failed.
311	 * Some/all of the uhva, gpa, and memslot generation info may still be
312	 * valid, leave it as is.
313	 */
314	if (ret) {
315		gpc->valid = false;
316		gpc->pfn = KVM_PFN_ERR_FAULT;
317		gpc->khva = NULL;
318	}
319
320	/* Detect a pfn change before dropping the lock! */
321	unmap_old = (old_pfn != gpc->pfn);
322
323out_unlock:
324	write_unlock_irq(&gpc->lock);
325
326	mutex_unlock(&gpc->refresh_lock);
327
328	if (unmap_old)
329		gpc_unmap_khva(old_pfn, old_khva);
330
331	return ret;
332}
333
334int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
335{
336	return __kvm_gpc_refresh(gpc, gpc->gpa, len);
337}
338EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
339
340void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
341		  struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
342{
343	WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
344	WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
345
346	rwlock_init(&gpc->lock);
347	mutex_init(&gpc->refresh_lock);
348
349	gpc->kvm = kvm;
350	gpc->vcpu = vcpu;
351	gpc->usage = usage;
352	gpc->pfn = KVM_PFN_ERR_FAULT;
353	gpc->uhva = KVM_HVA_ERR_BAD;
354}
355EXPORT_SYMBOL_GPL(kvm_gpc_init);
356
357int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
358{
359	struct kvm *kvm = gpc->kvm;
360
361	if (!gpc->active) {
362		if (KVM_BUG_ON(gpc->valid, kvm))
363			return -EIO;
364
365		spin_lock(&kvm->gpc_lock);
366		list_add(&gpc->list, &kvm->gpc_list);
367		spin_unlock(&kvm->gpc_lock);
368
369		/*
370		 * Activate the cache after adding it to the list, a concurrent
371		 * refresh must not establish a mapping until the cache is
372		 * reachable by mmu_notifier events.
373		 */
374		write_lock_irq(&gpc->lock);
375		gpc->active = true;
376		write_unlock_irq(&gpc->lock);
377	}
378	return __kvm_gpc_refresh(gpc, gpa, len);
379}
380EXPORT_SYMBOL_GPL(kvm_gpc_activate);
381
382void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
383{
384	struct kvm *kvm = gpc->kvm;
385	kvm_pfn_t old_pfn;
386	void *old_khva;
387
388	if (gpc->active) {
389		/*
390		 * Deactivate the cache before removing it from the list, KVM
391		 * must stall mmu_notifier events until all users go away, i.e.
392		 * until gpc->lock is dropped and refresh is guaranteed to fail.
393		 */
394		write_lock_irq(&gpc->lock);
395		gpc->active = false;
396		gpc->valid = false;
397
398		/*
399		 * Leave the GPA => uHVA cache intact, it's protected by the
400		 * memslot generation.  The PFN lookup needs to be redone every
401		 * time as mmu_notifier protection is lost when the cache is
402		 * removed from the VM's gpc_list.
403		 */
404		old_khva = gpc->khva - offset_in_page(gpc->khva);
405		gpc->khva = NULL;
406
407		old_pfn = gpc->pfn;
408		gpc->pfn = KVM_PFN_ERR_FAULT;
409		write_unlock_irq(&gpc->lock);
410
411		spin_lock(&kvm->gpc_lock);
412		list_del(&gpc->list);
413		spin_unlock(&kvm->gpc_lock);
414
415		gpc_unmap_khva(old_pfn, old_khva);
416	}
417}
418EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);