Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * kvm asynchronous fault support
  4 *
  5 * Copyright 2010 Red Hat, Inc.
  6 *
  7 * Author:
  8 *      Gleb Natapov <gleb@redhat.com>
  9 */
 10
 11#include <linux/kvm_host.h>
 12#include <linux/slab.h>
 13#include <linux/module.h>
 14#include <linux/mmu_context.h>
 15#include <linux/sched/mm.h>
 16
 17#include "async_pf.h"
 18#include <trace/events/kvm.h>
 19
 20static struct kmem_cache *async_pf_cache;
 21
 22int kvm_async_pf_init(void)
 23{
 24	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
 25
 26	if (!async_pf_cache)
 27		return -ENOMEM;
 28
 29	return 0;
 30}
 31
 32void kvm_async_pf_deinit(void)
 33{
 34	kmem_cache_destroy(async_pf_cache);
 35	async_pf_cache = NULL;
 36}
 37
 38void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 39{
 40	INIT_LIST_HEAD(&vcpu->async_pf.done);
 41	INIT_LIST_HEAD(&vcpu->async_pf.queue);
 42	spin_lock_init(&vcpu->async_pf.lock);
 43}
 44
 45static void async_pf_execute(struct work_struct *work)
 46{
 47	struct kvm_async_pf *apf =
 48		container_of(work, struct kvm_async_pf, work);
 
 49	struct kvm_vcpu *vcpu = apf->vcpu;
 50	struct mm_struct *mm = vcpu->kvm->mm;
 51	unsigned long addr = apf->addr;
 52	gpa_t cr2_or_gpa = apf->cr2_or_gpa;
 53	int locked = 1;
 54	bool first;
 55
 56	might_sleep();
 57
 58	/*
 59	 * Attempt to pin the VM's host address space, and simply skip gup() if
 60	 * acquiring a pin fail, i.e. if the process is exiting.  Note, KVM
 61	 * holds a reference to its associated mm_struct until the very end of
 62	 * kvm_destroy_vm(), i.e. the struct itself won't be freed before this
 63	 * work item is fully processed.
 64	 */
 65	if (mmget_not_zero(mm)) {
 66		mmap_read_lock(mm);
 67		get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked);
 68		if (locked)
 69			mmap_read_unlock(mm);
 70		mmput(mm);
 71	}
 72
 73	/*
 74	 * Notify and kick the vCPU even if faulting in the page failed, e.g.
 75	 * so that the vCPU can retry the fault synchronously.
 76	 */
 77	if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
 78		kvm_arch_async_page_present(vcpu, apf);
 79
 80	spin_lock(&vcpu->async_pf.lock);
 81	first = list_empty(&vcpu->async_pf.done);
 82	list_add_tail(&apf->link, &vcpu->async_pf.done);
 83	apf->vcpu = NULL;
 84	spin_unlock(&vcpu->async_pf.lock);
 85
 86	/*
 87	 * The apf struct may be freed by kvm_check_async_pf_completion() as
 88	 * soon as the lock is dropped.  Nullify it to prevent improper usage.
 89	 */
 90	apf = NULL;
 91
 92	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
 93		kvm_arch_async_page_present_queued(vcpu);
 94
 
 
 
 
 
 95	trace_kvm_async_pf_completed(addr, cr2_or_gpa);
 96
 97	__kvm_vcpu_wake_up(vcpu);
 98}
 99
100static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
101{
102	/*
103	 * The async #PF is "done", but KVM must wait for the work item itself,
104	 * i.e. async_pf_execute(), to run to completion.  If KVM is a module,
105	 * KVM must ensure *no* code owned by the KVM (the module) can be run
106	 * after the last call to module_put().  Note, flushing the work item
107	 * is always required when the item is taken off the completion queue.
108	 * E.g. even if the vCPU handles the item in the "normal" path, the VM
109	 * could be terminated before async_pf_execute() completes.
110	 *
111	 * Wake all events skip the queue and go straight done, i.e. don't
112	 * need to be flushed (but sanity check that the work wasn't queued).
113	 */
114	if (work->wakeup_all)
115		WARN_ON_ONCE(work->work.func);
116	else
117		flush_work(&work->work);
118	kmem_cache_free(async_pf_cache, work);
119}
120
121void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
122{
123	spin_lock(&vcpu->async_pf.lock);
124
125	/* cancel outstanding work queue item */
126	while (!list_empty(&vcpu->async_pf.queue)) {
127		struct kvm_async_pf *work =
128			list_first_entry(&vcpu->async_pf.queue,
129					 typeof(*work), queue);
130		list_del(&work->queue);
131
132		/*
133		 * We know it's present in vcpu->async_pf.done, do
134		 * nothing here.
135		 */
136		if (!work->vcpu)
137			continue;
138
139		spin_unlock(&vcpu->async_pf.lock);
140#ifdef CONFIG_KVM_ASYNC_PF_SYNC
141		flush_work(&work->work);
142#else
143		if (cancel_work_sync(&work->work))
 
 
144			kmem_cache_free(async_pf_cache, work);
 
145#endif
146		spin_lock(&vcpu->async_pf.lock);
147	}
148
149	while (!list_empty(&vcpu->async_pf.done)) {
150		struct kvm_async_pf *work =
151			list_first_entry(&vcpu->async_pf.done,
152					 typeof(*work), link);
153		list_del(&work->link);
154
155		spin_unlock(&vcpu->async_pf.lock);
156		kvm_flush_and_free_async_pf_work(work);
157		spin_lock(&vcpu->async_pf.lock);
158	}
159	spin_unlock(&vcpu->async_pf.lock);
160
161	vcpu->async_pf.queued = 0;
162}
163
164void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
165{
166	struct kvm_async_pf *work;
167
168	while (!list_empty_careful(&vcpu->async_pf.done) &&
169	      kvm_arch_can_dequeue_async_page_present(vcpu)) {
170		spin_lock(&vcpu->async_pf.lock);
171		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
172					      link);
173		list_del(&work->link);
174		spin_unlock(&vcpu->async_pf.lock);
175
176		kvm_arch_async_page_ready(vcpu, work);
177		if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
178			kvm_arch_async_page_present(vcpu, work);
179
180		list_del(&work->queue);
181		vcpu->async_pf.queued--;
182		kvm_flush_and_free_async_pf_work(work);
183	}
184}
185
186/*
187 * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
188 * success, 'false' on failure (page fault has to be handled synchronously).
189 */
190bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
191			unsigned long hva, struct kvm_arch_async_pf *arch)
192{
193	struct kvm_async_pf *work;
194
195	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
196		return false;
197
198	/* Arch specific code should not do async PF in this case */
199	if (unlikely(kvm_is_error_hva(hva)))
200		return false;
201
202	/*
203	 * do alloc nowait since if we are going to sleep anyway we
204	 * may as well sleep faulting in page
205	 */
206	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
207	if (!work)
208		return false;
209
210	work->wakeup_all = false;
211	work->vcpu = vcpu;
212	work->cr2_or_gpa = cr2_or_gpa;
213	work->addr = hva;
214	work->arch = *arch;
 
 
 
215
216	INIT_WORK(&work->work, async_pf_execute);
217
218	list_add_tail(&work->queue, &vcpu->async_pf.queue);
219	vcpu->async_pf.queued++;
220	work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
221
222	schedule_work(&work->work);
223
224	return true;
225}
226
227int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
228{
229	struct kvm_async_pf *work;
230	bool first;
231
232	if (!list_empty_careful(&vcpu->async_pf.done))
233		return 0;
234
235	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
236	if (!work)
237		return -ENOMEM;
238
239	work->wakeup_all = true;
240	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
241
242	spin_lock(&vcpu->async_pf.lock);
243	first = list_empty(&vcpu->async_pf.done);
244	list_add_tail(&work->link, &vcpu->async_pf.done);
245	spin_unlock(&vcpu->async_pf.lock);
246
247	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
248		kvm_arch_async_page_present_queued(vcpu);
249
250	vcpu->async_pf.queued++;
251	return 0;
252}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * kvm asynchronous fault support
  4 *
  5 * Copyright 2010 Red Hat, Inc.
  6 *
  7 * Author:
  8 *      Gleb Natapov <gleb@redhat.com>
  9 */
 10
 11#include <linux/kvm_host.h>
 12#include <linux/slab.h>
 13#include <linux/module.h>
 14#include <linux/mmu_context.h>
 15#include <linux/sched/mm.h>
 16
 17#include "async_pf.h"
 18#include <trace/events/kvm.h>
 19
 20static struct kmem_cache *async_pf_cache;
 21
 22int kvm_async_pf_init(void)
 23{
 24	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
 25
 26	if (!async_pf_cache)
 27		return -ENOMEM;
 28
 29	return 0;
 30}
 31
 32void kvm_async_pf_deinit(void)
 33{
 34	kmem_cache_destroy(async_pf_cache);
 35	async_pf_cache = NULL;
 36}
 37
 38void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 39{
 40	INIT_LIST_HEAD(&vcpu->async_pf.done);
 41	INIT_LIST_HEAD(&vcpu->async_pf.queue);
 42	spin_lock_init(&vcpu->async_pf.lock);
 43}
 44
 45static void async_pf_execute(struct work_struct *work)
 46{
 47	struct kvm_async_pf *apf =
 48		container_of(work, struct kvm_async_pf, work);
 49	struct mm_struct *mm = apf->mm;
 50	struct kvm_vcpu *vcpu = apf->vcpu;
 
 51	unsigned long addr = apf->addr;
 52	gpa_t cr2_or_gpa = apf->cr2_or_gpa;
 53	int locked = 1;
 54	bool first;
 55
 56	might_sleep();
 57
 58	/*
 59	 * This work is run asynchronously to the task which owns
 60	 * mm and might be done in another context, so we must
 61	 * access remotely.
 
 
 62	 */
 63	mmap_read_lock(mm);
 64	get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL,
 65			&locked);
 66	if (locked)
 67		mmap_read_unlock(mm);
 
 
 68
 
 
 
 
 69	if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
 70		kvm_arch_async_page_present(vcpu, apf);
 71
 72	spin_lock(&vcpu->async_pf.lock);
 73	first = list_empty(&vcpu->async_pf.done);
 74	list_add_tail(&apf->link, &vcpu->async_pf.done);
 75	apf->vcpu = NULL;
 76	spin_unlock(&vcpu->async_pf.lock);
 77
 
 
 
 
 
 
 78	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
 79		kvm_arch_async_page_present_queued(vcpu);
 80
 81	/*
 82	 * apf may be freed by kvm_check_async_pf_completion() after
 83	 * this point
 84	 */
 85
 86	trace_kvm_async_pf_completed(addr, cr2_or_gpa);
 87
 88	__kvm_vcpu_wake_up(vcpu);
 
 89
 90	mmput(mm);
 91	kvm_put_kvm(vcpu->kvm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92}
 93
 94void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
 95{
 96	spin_lock(&vcpu->async_pf.lock);
 97
 98	/* cancel outstanding work queue item */
 99	while (!list_empty(&vcpu->async_pf.queue)) {
100		struct kvm_async_pf *work =
101			list_first_entry(&vcpu->async_pf.queue,
102					 typeof(*work), queue);
103		list_del(&work->queue);
104
105		/*
106		 * We know it's present in vcpu->async_pf.done, do
107		 * nothing here.
108		 */
109		if (!work->vcpu)
110			continue;
111
112		spin_unlock(&vcpu->async_pf.lock);
113#ifdef CONFIG_KVM_ASYNC_PF_SYNC
114		flush_work(&work->work);
115#else
116		if (cancel_work_sync(&work->work)) {
117			mmput(work->mm);
118			kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
119			kmem_cache_free(async_pf_cache, work);
120		}
121#endif
122		spin_lock(&vcpu->async_pf.lock);
123	}
124
125	while (!list_empty(&vcpu->async_pf.done)) {
126		struct kvm_async_pf *work =
127			list_first_entry(&vcpu->async_pf.done,
128					 typeof(*work), link);
129		list_del(&work->link);
130		kmem_cache_free(async_pf_cache, work);
 
 
 
131	}
132	spin_unlock(&vcpu->async_pf.lock);
133
134	vcpu->async_pf.queued = 0;
135}
136
137void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
138{
139	struct kvm_async_pf *work;
140
141	while (!list_empty_careful(&vcpu->async_pf.done) &&
142	      kvm_arch_can_dequeue_async_page_present(vcpu)) {
143		spin_lock(&vcpu->async_pf.lock);
144		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
145					      link);
146		list_del(&work->link);
147		spin_unlock(&vcpu->async_pf.lock);
148
149		kvm_arch_async_page_ready(vcpu, work);
150		if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
151			kvm_arch_async_page_present(vcpu, work);
152
153		list_del(&work->queue);
154		vcpu->async_pf.queued--;
155		kmem_cache_free(async_pf_cache, work);
156	}
157}
158
159/*
160 * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
161 * success, 'false' on failure (page fault has to be handled synchronously).
162 */
163bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
164			unsigned long hva, struct kvm_arch_async_pf *arch)
165{
166	struct kvm_async_pf *work;
167
168	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
169		return false;
170
171	/* Arch specific code should not do async PF in this case */
172	if (unlikely(kvm_is_error_hva(hva)))
173		return false;
174
175	/*
176	 * do alloc nowait since if we are going to sleep anyway we
177	 * may as well sleep faulting in page
178	 */
179	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
180	if (!work)
181		return false;
182
183	work->wakeup_all = false;
184	work->vcpu = vcpu;
185	work->cr2_or_gpa = cr2_or_gpa;
186	work->addr = hva;
187	work->arch = *arch;
188	work->mm = current->mm;
189	mmget(work->mm);
190	kvm_get_kvm(work->vcpu->kvm);
191
192	INIT_WORK(&work->work, async_pf_execute);
193
194	list_add_tail(&work->queue, &vcpu->async_pf.queue);
195	vcpu->async_pf.queued++;
196	work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
197
198	schedule_work(&work->work);
199
200	return true;
201}
202
203int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
204{
205	struct kvm_async_pf *work;
206	bool first;
207
208	if (!list_empty_careful(&vcpu->async_pf.done))
209		return 0;
210
211	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
212	if (!work)
213		return -ENOMEM;
214
215	work->wakeup_all = true;
216	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
217
218	spin_lock(&vcpu->async_pf.lock);
219	first = list_empty(&vcpu->async_pf.done);
220	list_add_tail(&work->link, &vcpu->async_pf.done);
221	spin_unlock(&vcpu->async_pf.lock);
222
223	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
224		kvm_arch_async_page_present_queued(vcpu);
225
226	vcpu->async_pf.queued++;
227	return 0;
228}