Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * kvm asynchronous fault support
  4 *
  5 * Copyright 2010 Red Hat, Inc.
  6 *
  7 * Author:
  8 *      Gleb Natapov <gleb@redhat.com>
  9 */
 10
 11#include <linux/kvm_host.h>
 12#include <linux/slab.h>
 13#include <linux/module.h>
 14#include <linux/mmu_context.h>
 15#include <linux/sched/mm.h>
 16
 17#include "async_pf.h"
 18#include <trace/events/kvm.h>
 19
 20static struct kmem_cache *async_pf_cache;
 21
 22int kvm_async_pf_init(void)
 23{
 24	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
 25
 26	if (!async_pf_cache)
 27		return -ENOMEM;
 28
 29	return 0;
 30}
 31
 32void kvm_async_pf_deinit(void)
 33{
 34	kmem_cache_destroy(async_pf_cache);
 35	async_pf_cache = NULL;
 36}
 37
 38void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 39{
 40	INIT_LIST_HEAD(&vcpu->async_pf.done);
 41	INIT_LIST_HEAD(&vcpu->async_pf.queue);
 42	spin_lock_init(&vcpu->async_pf.lock);
 43}
 44
 45static void async_pf_execute(struct work_struct *work)
 46{
 47	struct kvm_async_pf *apf =
 48		container_of(work, struct kvm_async_pf, work);
 
 49	struct kvm_vcpu *vcpu = apf->vcpu;
 50	struct mm_struct *mm = vcpu->kvm->mm;
 51	unsigned long addr = apf->addr;
 52	gpa_t cr2_or_gpa = apf->cr2_or_gpa;
 53	int locked = 1;
 54	bool first;
 55
 56	might_sleep();
 57
 58	/*
 59	 * Attempt to pin the VM's host address space, and simply skip gup() if
 60	 * acquiring a pin fail, i.e. if the process is exiting.  Note, KVM
 61	 * holds a reference to its associated mm_struct until the very end of
 62	 * kvm_destroy_vm(), i.e. the struct itself won't be freed before this
 63	 * work item is fully processed.
 64	 */
 65	if (mmget_not_zero(mm)) {
 66		mmap_read_lock(mm);
 67		get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked);
 68		if (locked)
 69			mmap_read_unlock(mm);
 70		mmput(mm);
 71	}
 72
 73	/*
 74	 * Notify and kick the vCPU even if faulting in the page failed, e.g.
 75	 * so that the vCPU can retry the fault synchronously.
 76	 */
 77	if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
 78		kvm_arch_async_page_present(vcpu, apf);
 79
 80	spin_lock(&vcpu->async_pf.lock);
 81	first = list_empty(&vcpu->async_pf.done);
 82	list_add_tail(&apf->link, &vcpu->async_pf.done);
 
 83	spin_unlock(&vcpu->async_pf.lock);
 84
 85	/*
 86	 * The apf struct may be freed by kvm_check_async_pf_completion() as
 87	 * soon as the lock is dropped.  Nullify it to prevent improper usage.
 88	 */
 89	apf = NULL;
 90
 91	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
 92		kvm_arch_async_page_present_queued(vcpu);
 93
 
 
 
 
 
 94	trace_kvm_async_pf_completed(addr, cr2_or_gpa);
 95
 96	__kvm_vcpu_wake_up(vcpu);
 97}
 98
 99static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
100{
101	/*
102	 * The async #PF is "done", but KVM must wait for the work item itself,
103	 * i.e. async_pf_execute(), to run to completion.  If KVM is a module,
104	 * KVM must ensure *no* code owned by the KVM (the module) can be run
105	 * after the last call to module_put().  Note, flushing the work item
106	 * is always required when the item is taken off the completion queue.
107	 * E.g. even if the vCPU handles the item in the "normal" path, the VM
108	 * could be terminated before async_pf_execute() completes.
109	 *
110	 * Wake all events skip the queue and go straight done, i.e. don't
111	 * need to be flushed (but sanity check that the work wasn't queued).
112	 */
113	if (work->wakeup_all)
114		WARN_ON_ONCE(work->work.func);
115	else
116		flush_work(&work->work);
117	kmem_cache_free(async_pf_cache, work);
118}
119
120void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
121{
 
 
122	/* cancel outstanding work queue item */
123	while (!list_empty(&vcpu->async_pf.queue)) {
124		struct kvm_async_pf *work =
125			list_first_entry(&vcpu->async_pf.queue,
126					 typeof(*work), queue);
127		list_del(&work->queue);
128
 
 
 
 
 
 
 
 
129#ifdef CONFIG_KVM_ASYNC_PF_SYNC
130		flush_work(&work->work);
131#else
132		if (cancel_work_sync(&work->work))
 
 
133			kmem_cache_free(async_pf_cache, work);
 
134#endif
 
135	}
136
137	spin_lock(&vcpu->async_pf.lock);
138	while (!list_empty(&vcpu->async_pf.done)) {
139		struct kvm_async_pf *work =
140			list_first_entry(&vcpu->async_pf.done,
141					 typeof(*work), link);
142		list_del(&work->link);
143
144		spin_unlock(&vcpu->async_pf.lock);
145		kvm_flush_and_free_async_pf_work(work);
146		spin_lock(&vcpu->async_pf.lock);
147	}
148	spin_unlock(&vcpu->async_pf.lock);
149
150	vcpu->async_pf.queued = 0;
151}
152
153void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
154{
155	struct kvm_async_pf *work;
156
157	while (!list_empty_careful(&vcpu->async_pf.done) &&
158	      kvm_arch_can_dequeue_async_page_present(vcpu)) {
159		spin_lock(&vcpu->async_pf.lock);
160		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
161					      link);
162		list_del(&work->link);
163		spin_unlock(&vcpu->async_pf.lock);
164
165		kvm_arch_async_page_ready(vcpu, work);
166		if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
167			kvm_arch_async_page_present(vcpu, work);
168
169		list_del(&work->queue);
170		vcpu->async_pf.queued--;
171		kvm_flush_and_free_async_pf_work(work);
172	}
173}
174
175/*
176 * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
177 * success, 'false' on failure (page fault has to be handled synchronously).
178 */
179bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
180			unsigned long hva, struct kvm_arch_async_pf *arch)
181{
182	struct kvm_async_pf *work;
183
184	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
185		return false;
186
187	/* Arch specific code should not do async PF in this case */
188	if (unlikely(kvm_is_error_hva(hva)))
189		return false;
190
191	/*
192	 * do alloc nowait since if we are going to sleep anyway we
193	 * may as well sleep faulting in page
194	 */
195	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
196	if (!work)
197		return false;
198
199	work->wakeup_all = false;
200	work->vcpu = vcpu;
201	work->cr2_or_gpa = cr2_or_gpa;
202	work->addr = hva;
203	work->arch = *arch;
 
 
 
204
205	INIT_WORK(&work->work, async_pf_execute);
206
207	list_add_tail(&work->queue, &vcpu->async_pf.queue);
208	vcpu->async_pf.queued++;
209	work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
210
211	schedule_work(&work->work);
212
213	return true;
214}
215
216int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
217{
218	struct kvm_async_pf *work;
219	bool first;
220
221	if (!list_empty_careful(&vcpu->async_pf.done))
222		return 0;
223
224	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
225	if (!work)
226		return -ENOMEM;
227
228	work->wakeup_all = true;
229	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
230
231	spin_lock(&vcpu->async_pf.lock);
232	first = list_empty(&vcpu->async_pf.done);
233	list_add_tail(&work->link, &vcpu->async_pf.done);
234	spin_unlock(&vcpu->async_pf.lock);
235
236	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
237		kvm_arch_async_page_present_queued(vcpu);
238
239	vcpu->async_pf.queued++;
240	return 0;
241}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * kvm asynchronous fault support
  4 *
  5 * Copyright 2010 Red Hat, Inc.
  6 *
  7 * Author:
  8 *      Gleb Natapov <gleb@redhat.com>
  9 */
 10
 11#include <linux/kvm_host.h>
 12#include <linux/slab.h>
 13#include <linux/module.h>
 14#include <linux/mmu_context.h>
 15#include <linux/sched/mm.h>
 16
 17#include "async_pf.h"
 18#include <trace/events/kvm.h>
 19
 20static struct kmem_cache *async_pf_cache;
 21
 22int kvm_async_pf_init(void)
 23{
 24	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
 25
 26	if (!async_pf_cache)
 27		return -ENOMEM;
 28
 29	return 0;
 30}
 31
 32void kvm_async_pf_deinit(void)
 33{
 34	kmem_cache_destroy(async_pf_cache);
 35	async_pf_cache = NULL;
 36}
 37
 38void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 39{
 40	INIT_LIST_HEAD(&vcpu->async_pf.done);
 41	INIT_LIST_HEAD(&vcpu->async_pf.queue);
 42	spin_lock_init(&vcpu->async_pf.lock);
 43}
 44
 45static void async_pf_execute(struct work_struct *work)
 46{
 47	struct kvm_async_pf *apf =
 48		container_of(work, struct kvm_async_pf, work);
 49	struct mm_struct *mm = apf->mm;
 50	struct kvm_vcpu *vcpu = apf->vcpu;
 
 51	unsigned long addr = apf->addr;
 52	gpa_t cr2_or_gpa = apf->cr2_or_gpa;
 53	int locked = 1;
 54	bool first;
 55
 56	might_sleep();
 57
 58	/*
 59	 * This work is run asynchronously to the task which owns
 60	 * mm and might be done in another context, so we must
 61	 * access remotely.
 
 
 62	 */
 63	mmap_read_lock(mm);
 64	get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked);
 65	if (locked)
 66		mmap_read_unlock(mm);
 
 
 
 67
 
 
 
 
 68	if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
 69		kvm_arch_async_page_present(vcpu, apf);
 70
 71	spin_lock(&vcpu->async_pf.lock);
 72	first = list_empty(&vcpu->async_pf.done);
 73	list_add_tail(&apf->link, &vcpu->async_pf.done);
 74	apf->vcpu = NULL;
 75	spin_unlock(&vcpu->async_pf.lock);
 76
 
 
 
 
 
 
 77	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
 78		kvm_arch_async_page_present_queued(vcpu);
 79
 80	/*
 81	 * apf may be freed by kvm_check_async_pf_completion() after
 82	 * this point
 83	 */
 84
 85	trace_kvm_async_pf_completed(addr, cr2_or_gpa);
 86
 87	__kvm_vcpu_wake_up(vcpu);
 
 88
 89	mmput(mm);
 90	kvm_put_kvm(vcpu->kvm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91}
 92
 93void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
 94{
 95	spin_lock(&vcpu->async_pf.lock);
 96
 97	/* cancel outstanding work queue item */
 98	while (!list_empty(&vcpu->async_pf.queue)) {
 99		struct kvm_async_pf *work =
100			list_first_entry(&vcpu->async_pf.queue,
101					 typeof(*work), queue);
102		list_del(&work->queue);
103
104		/*
105		 * We know it's present in vcpu->async_pf.done, do
106		 * nothing here.
107		 */
108		if (!work->vcpu)
109			continue;
110
111		spin_unlock(&vcpu->async_pf.lock);
112#ifdef CONFIG_KVM_ASYNC_PF_SYNC
113		flush_work(&work->work);
114#else
115		if (cancel_work_sync(&work->work)) {
116			mmput(work->mm);
117			kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
118			kmem_cache_free(async_pf_cache, work);
119		}
120#endif
121		spin_lock(&vcpu->async_pf.lock);
122	}
123
 
124	while (!list_empty(&vcpu->async_pf.done)) {
125		struct kvm_async_pf *work =
126			list_first_entry(&vcpu->async_pf.done,
127					 typeof(*work), link);
128		list_del(&work->link);
129		kmem_cache_free(async_pf_cache, work);
 
 
 
130	}
131	spin_unlock(&vcpu->async_pf.lock);
132
133	vcpu->async_pf.queued = 0;
134}
135
136void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
137{
138	struct kvm_async_pf *work;
139
140	while (!list_empty_careful(&vcpu->async_pf.done) &&
141	      kvm_arch_can_dequeue_async_page_present(vcpu)) {
142		spin_lock(&vcpu->async_pf.lock);
143		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
144					      link);
145		list_del(&work->link);
146		spin_unlock(&vcpu->async_pf.lock);
147
148		kvm_arch_async_page_ready(vcpu, work);
149		if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
150			kvm_arch_async_page_present(vcpu, work);
151
152		list_del(&work->queue);
153		vcpu->async_pf.queued--;
154		kmem_cache_free(async_pf_cache, work);
155	}
156}
157
158/*
159 * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
160 * success, 'false' on failure (page fault has to be handled synchronously).
161 */
162bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
163			unsigned long hva, struct kvm_arch_async_pf *arch)
164{
165	struct kvm_async_pf *work;
166
167	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
168		return false;
169
170	/* Arch specific code should not do async PF in this case */
171	if (unlikely(kvm_is_error_hva(hva)))
172		return false;
173
174	/*
175	 * do alloc nowait since if we are going to sleep anyway we
176	 * may as well sleep faulting in page
177	 */
178	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
179	if (!work)
180		return false;
181
182	work->wakeup_all = false;
183	work->vcpu = vcpu;
184	work->cr2_or_gpa = cr2_or_gpa;
185	work->addr = hva;
186	work->arch = *arch;
187	work->mm = current->mm;
188	mmget(work->mm);
189	kvm_get_kvm(work->vcpu->kvm);
190
191	INIT_WORK(&work->work, async_pf_execute);
192
193	list_add_tail(&work->queue, &vcpu->async_pf.queue);
194	vcpu->async_pf.queued++;
195	work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
196
197	schedule_work(&work->work);
198
199	return true;
200}
201
202int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
203{
204	struct kvm_async_pf *work;
205	bool first;
206
207	if (!list_empty_careful(&vcpu->async_pf.done))
208		return 0;
209
210	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
211	if (!work)
212		return -ENOMEM;
213
214	work->wakeup_all = true;
215	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
216
217	spin_lock(&vcpu->async_pf.lock);
218	first = list_empty(&vcpu->async_pf.done);
219	list_add_tail(&work->link, &vcpu->async_pf.done);
220	spin_unlock(&vcpu->async_pf.lock);
221
222	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
223		kvm_arch_async_page_present_queued(vcpu);
224
225	vcpu->async_pf.queued++;
226	return 0;
227}