Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * kvm asynchronous fault support
  4 *
  5 * Copyright 2010 Red Hat, Inc.
  6 *
  7 * Author:
  8 *      Gleb Natapov <gleb@redhat.com>
  9 */
 10
 11#include <linux/kvm_host.h>
 12#include <linux/slab.h>
 13#include <linux/module.h>
 14#include <linux/mmu_context.h>
 15#include <linux/sched/mm.h>
 16
 17#include "async_pf.h"
 18#include <trace/events/kvm.h>
 19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20static struct kmem_cache *async_pf_cache;
 21
 22int kvm_async_pf_init(void)
 23{
 24	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
 25
 26	if (!async_pf_cache)
 27		return -ENOMEM;
 28
 29	return 0;
 30}
 31
 32void kvm_async_pf_deinit(void)
 33{
 34	kmem_cache_destroy(async_pf_cache);
 35	async_pf_cache = NULL;
 36}
 37
 38void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 39{
 40	INIT_LIST_HEAD(&vcpu->async_pf.done);
 41	INIT_LIST_HEAD(&vcpu->async_pf.queue);
 42	spin_lock_init(&vcpu->async_pf.lock);
 43}
 44
 45static void async_pf_execute(struct work_struct *work)
 46{
 47	struct kvm_async_pf *apf =
 48		container_of(work, struct kvm_async_pf, work);
 
 49	struct kvm_vcpu *vcpu = apf->vcpu;
 50	struct mm_struct *mm = vcpu->kvm->mm;
 51	unsigned long addr = apf->addr;
 52	gpa_t cr2_or_gpa = apf->cr2_or_gpa;
 53	int locked = 1;
 54	bool first;
 55
 56	might_sleep();
 57
 58	/*
 59	 * Attempt to pin the VM's host address space, and simply skip gup() if
 60	 * acquiring a pin fail, i.e. if the process is exiting.  Note, KVM
 61	 * holds a reference to its associated mm_struct until the very end of
 62	 * kvm_destroy_vm(), i.e. the struct itself won't be freed before this
 63	 * work item is fully processed.
 64	 */
 65	if (mmget_not_zero(mm)) {
 66		mmap_read_lock(mm);
 67		get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked);
 68		if (locked)
 69			mmap_read_unlock(mm);
 70		mmput(mm);
 71	}
 72
 73	/*
 74	 * Notify and kick the vCPU even if faulting in the page failed, e.g.
 75	 * so that the vCPU can retry the fault synchronously.
 76	 */
 77	if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
 78		kvm_arch_async_page_present(vcpu, apf);
 79
 80	spin_lock(&vcpu->async_pf.lock);
 81	first = list_empty(&vcpu->async_pf.done);
 82	list_add_tail(&apf->link, &vcpu->async_pf.done);
 
 83	spin_unlock(&vcpu->async_pf.lock);
 84
 85	/*
 86	 * The apf struct may be freed by kvm_check_async_pf_completion() as
 87	 * soon as the lock is dropped.  Nullify it to prevent improper usage.
 88	 */
 89	apf = NULL;
 90
 91	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
 92		kvm_arch_async_page_present_queued(vcpu);
 93
 94	trace_kvm_async_pf_completed(addr, cr2_or_gpa);
 95
 96	__kvm_vcpu_wake_up(vcpu);
 97}
 98
 99static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
100{
101	/*
102	 * The async #PF is "done", but KVM must wait for the work item itself,
103	 * i.e. async_pf_execute(), to run to completion.  If KVM is a module,
104	 * KVM must ensure *no* code owned by the KVM (the module) can be run
105	 * after the last call to module_put().  Note, flushing the work item
106	 * is always required when the item is taken off the completion queue.
107	 * E.g. even if the vCPU handles the item in the "normal" path, the VM
108	 * could be terminated before async_pf_execute() completes.
109	 *
110	 * Wake all events skip the queue and go straight done, i.e. don't
111	 * need to be flushed (but sanity check that the work wasn't queued).
112	 */
113	if (work->wakeup_all)
114		WARN_ON_ONCE(work->work.func);
115	else
116		flush_work(&work->work);
117	kmem_cache_free(async_pf_cache, work);
118}
119
120void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
121{
 
 
122	/* cancel outstanding work queue item */
123	while (!list_empty(&vcpu->async_pf.queue)) {
124		struct kvm_async_pf *work =
125			list_first_entry(&vcpu->async_pf.queue,
126					 typeof(*work), queue);
127		list_del(&work->queue);
128
 
 
 
 
 
 
 
 
129#ifdef CONFIG_KVM_ASYNC_PF_SYNC
130		flush_work(&work->work);
131#else
132		if (cancel_work_sync(&work->work))
 
 
133			kmem_cache_free(async_pf_cache, work);
 
134#endif
 
135	}
136
137	spin_lock(&vcpu->async_pf.lock);
138	while (!list_empty(&vcpu->async_pf.done)) {
139		struct kvm_async_pf *work =
140			list_first_entry(&vcpu->async_pf.done,
141					 typeof(*work), link);
142		list_del(&work->link);
143
144		spin_unlock(&vcpu->async_pf.lock);
145		kvm_flush_and_free_async_pf_work(work);
146		spin_lock(&vcpu->async_pf.lock);
147	}
148	spin_unlock(&vcpu->async_pf.lock);
149
150	vcpu->async_pf.queued = 0;
151}
152
153void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
154{
155	struct kvm_async_pf *work;
156
157	while (!list_empty_careful(&vcpu->async_pf.done) &&
158	      kvm_arch_can_dequeue_async_page_present(vcpu)) {
159		spin_lock(&vcpu->async_pf.lock);
160		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
161					      link);
162		list_del(&work->link);
163		spin_unlock(&vcpu->async_pf.lock);
164
165		kvm_arch_async_page_ready(vcpu, work);
166		if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
167			kvm_arch_async_page_present(vcpu, work);
168
169		list_del(&work->queue);
170		vcpu->async_pf.queued--;
171		kvm_flush_and_free_async_pf_work(work);
172	}
173}
174
175/*
176 * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
177 * success, 'false' on failure (page fault has to be handled synchronously).
178 */
179bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
180			unsigned long hva, struct kvm_arch_async_pf *arch)
181{
182	struct kvm_async_pf *work;
183
184	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
185		return false;
186
187	/* Arch specific code should not do async PF in this case */
188	if (unlikely(kvm_is_error_hva(hva)))
189		return false;
190
191	/*
192	 * do alloc nowait since if we are going to sleep anyway we
193	 * may as well sleep faulting in page
194	 */
195	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
196	if (!work)
197		return false;
198
199	work->wakeup_all = false;
200	work->vcpu = vcpu;
201	work->cr2_or_gpa = cr2_or_gpa;
202	work->addr = hva;
203	work->arch = *arch;
 
 
 
 
 
 
 
 
204
205	INIT_WORK(&work->work, async_pf_execute);
 
 
206
207	list_add_tail(&work->queue, &vcpu->async_pf.queue);
208	vcpu->async_pf.queued++;
209	work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
210
211	schedule_work(&work->work);
212
213	return true;
 
 
214}
215
216int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
217{
218	struct kvm_async_pf *work;
219	bool first;
220
221	if (!list_empty_careful(&vcpu->async_pf.done))
222		return 0;
223
224	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
225	if (!work)
226		return -ENOMEM;
227
228	work->wakeup_all = true;
229	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
230
231	spin_lock(&vcpu->async_pf.lock);
232	first = list_empty(&vcpu->async_pf.done);
233	list_add_tail(&work->link, &vcpu->async_pf.done);
234	spin_unlock(&vcpu->async_pf.lock);
235
236	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
237		kvm_arch_async_page_present_queued(vcpu);
238
239	vcpu->async_pf.queued++;
240	return 0;
241}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * kvm asynchronous fault support
  4 *
  5 * Copyright 2010 Red Hat, Inc.
  6 *
  7 * Author:
  8 *      Gleb Natapov <gleb@redhat.com>
  9 */
 10
 11#include <linux/kvm_host.h>
 12#include <linux/slab.h>
 13#include <linux/module.h>
 14#include <linux/mmu_context.h>
 15#include <linux/sched/mm.h>
 16
 17#include "async_pf.h"
 18#include <trace/events/kvm.h>
 19
 20static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
 21					       struct kvm_async_pf *work)
 22{
 23#ifdef CONFIG_KVM_ASYNC_PF_SYNC
 24	kvm_arch_async_page_present(vcpu, work);
 25#endif
 26}
 27static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
 28						struct kvm_async_pf *work)
 29{
 30#ifndef CONFIG_KVM_ASYNC_PF_SYNC
 31	kvm_arch_async_page_present(vcpu, work);
 32#endif
 33}
 34
 35static struct kmem_cache *async_pf_cache;
 36
 37int kvm_async_pf_init(void)
 38{
 39	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
 40
 41	if (!async_pf_cache)
 42		return -ENOMEM;
 43
 44	return 0;
 45}
 46
 47void kvm_async_pf_deinit(void)
 48{
 49	kmem_cache_destroy(async_pf_cache);
 50	async_pf_cache = NULL;
 51}
 52
 53void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 54{
 55	INIT_LIST_HEAD(&vcpu->async_pf.done);
 56	INIT_LIST_HEAD(&vcpu->async_pf.queue);
 57	spin_lock_init(&vcpu->async_pf.lock);
 58}
 59
 60static void async_pf_execute(struct work_struct *work)
 61{
 62	struct kvm_async_pf *apf =
 63		container_of(work, struct kvm_async_pf, work);
 64	struct mm_struct *mm = apf->mm;
 65	struct kvm_vcpu *vcpu = apf->vcpu;
 
 66	unsigned long addr = apf->addr;
 67	gva_t gva = apf->gva;
 68	int locked = 1;
 
 69
 70	might_sleep();
 71
 72	/*
 73	 * This work is run asynchronously to the task which owns
 74	 * mm and might be done in another context, so we must
 75	 * access remotely.
 
 
 76	 */
 77	down_read(&mm->mmap_sem);
 78	get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
 79			&locked);
 80	if (locked)
 81		up_read(&mm->mmap_sem);
 
 
 82
 83	kvm_async_page_present_sync(vcpu, apf);
 
 
 
 
 
 84
 85	spin_lock(&vcpu->async_pf.lock);
 
 86	list_add_tail(&apf->link, &vcpu->async_pf.done);
 87	apf->vcpu = NULL;
 88	spin_unlock(&vcpu->async_pf.lock);
 89
 90	/*
 91	 * apf may be freed by kvm_check_async_pf_completion() after
 92	 * this point
 93	 */
 
 94
 95	trace_kvm_async_pf_completed(addr, gva);
 
 96
 97	if (swq_has_sleeper(&vcpu->wq))
 98		swake_up_one(&vcpu->wq);
 
 
 99
100	mmput(mm);
101	kvm_put_kvm(vcpu->kvm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102}
103
104void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
105{
106	spin_lock(&vcpu->async_pf.lock);
107
108	/* cancel outstanding work queue item */
109	while (!list_empty(&vcpu->async_pf.queue)) {
110		struct kvm_async_pf *work =
111			list_first_entry(&vcpu->async_pf.queue,
112					 typeof(*work), queue);
113		list_del(&work->queue);
114
115		/*
116		 * We know it's present in vcpu->async_pf.done, do
117		 * nothing here.
118		 */
119		if (!work->vcpu)
120			continue;
121
122		spin_unlock(&vcpu->async_pf.lock);
123#ifdef CONFIG_KVM_ASYNC_PF_SYNC
124		flush_work(&work->work);
125#else
126		if (cancel_work_sync(&work->work)) {
127			mmput(work->mm);
128			kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
129			kmem_cache_free(async_pf_cache, work);
130		}
131#endif
132		spin_lock(&vcpu->async_pf.lock);
133	}
134
 
135	while (!list_empty(&vcpu->async_pf.done)) {
136		struct kvm_async_pf *work =
137			list_first_entry(&vcpu->async_pf.done,
138					 typeof(*work), link);
139		list_del(&work->link);
140		kmem_cache_free(async_pf_cache, work);
 
 
 
141	}
142	spin_unlock(&vcpu->async_pf.lock);
143
144	vcpu->async_pf.queued = 0;
145}
146
147void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
148{
149	struct kvm_async_pf *work;
150
151	while (!list_empty_careful(&vcpu->async_pf.done) &&
152	      kvm_arch_can_inject_async_page_present(vcpu)) {
153		spin_lock(&vcpu->async_pf.lock);
154		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
155					      link);
156		list_del(&work->link);
157		spin_unlock(&vcpu->async_pf.lock);
158
159		kvm_arch_async_page_ready(vcpu, work);
160		kvm_async_page_present_async(vcpu, work);
 
161
162		list_del(&work->queue);
163		vcpu->async_pf.queued--;
164		kmem_cache_free(async_pf_cache, work);
165	}
166}
167
168int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
169		       struct kvm_arch_async_pf *arch)
 
 
 
 
170{
171	struct kvm_async_pf *work;
172
173	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
174		return 0;
175
176	/* setup delayed work */
 
 
177
178	/*
179	 * do alloc nowait since if we are going to sleep anyway we
180	 * may as well sleep faulting in page
181	 */
182	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
183	if (!work)
184		return 0;
185
186	work->wakeup_all = false;
187	work->vcpu = vcpu;
188	work->gva = gva;
189	work->addr = hva;
190	work->arch = *arch;
191	work->mm = current->mm;
192	mmget(work->mm);
193	kvm_get_kvm(work->vcpu->kvm);
194
195	/* this can't really happen otherwise gfn_to_pfn_async
196	   would succeed */
197	if (unlikely(kvm_is_error_hva(work->addr)))
198		goto retry_sync;
199
200	INIT_WORK(&work->work, async_pf_execute);
201	if (!schedule_work(&work->work))
202		goto retry_sync;
203
204	list_add_tail(&work->queue, &vcpu->async_pf.queue);
205	vcpu->async_pf.queued++;
206	kvm_arch_async_page_not_present(vcpu, work);
207	return 1;
208retry_sync:
209	kvm_put_kvm(work->vcpu->kvm);
210	mmput(work->mm);
211	kmem_cache_free(async_pf_cache, work);
212	return 0;
213}
214
215int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
216{
217	struct kvm_async_pf *work;
 
218
219	if (!list_empty_careful(&vcpu->async_pf.done))
220		return 0;
221
222	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
223	if (!work)
224		return -ENOMEM;
225
226	work->wakeup_all = true;
227	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
228
229	spin_lock(&vcpu->async_pf.lock);
 
230	list_add_tail(&work->link, &vcpu->async_pf.done);
231	spin_unlock(&vcpu->async_pf.lock);
 
 
 
232
233	vcpu->async_pf.queued++;
234	return 0;
235}