Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2
3#ifndef __KVM_MM_H__
4#define __KVM_MM_H__ 1
5
6/*
7 * Architectures can choose whether to use an rwlock or spinlock
8 * for the mmu_lock. These macros, for use in common code
9 * only, avoids using #ifdefs in places that must deal with
10 * multiple architectures.
11 */
12
13#ifdef KVM_HAVE_MMU_RWLOCK
14#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
17#else
18#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
19#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
20#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
21#endif /* KVM_HAVE_MMU_RWLOCK */
22
23kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
24 bool *async, bool write_fault, bool *writable);
25
26#ifdef CONFIG_HAVE_KVM_PFNCACHE
27void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
28 unsigned long start,
29 unsigned long end,
30 bool may_block);
31#else
32static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
33 unsigned long start,
34 unsigned long end,
35 bool may_block)
36{
37}
38#endif /* HAVE_KVM_PFNCACHE */
39
40#ifdef CONFIG_KVM_PRIVATE_MEM
41void kvm_gmem_init(struct module *module);
42int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
43int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
44 unsigned int fd, loff_t offset);
45void kvm_gmem_unbind(struct kvm_memory_slot *slot);
46#else
47static inline void kvm_gmem_init(struct module *module)
48{
49
50}
51
52static inline int kvm_gmem_bind(struct kvm *kvm,
53 struct kvm_memory_slot *slot,
54 unsigned int fd, loff_t offset)
55{
56 WARN_ON_ONCE(1);
57 return -EIO;
58}
59
60static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
61{
62 WARN_ON_ONCE(1);
63}
64#endif /* CONFIG_KVM_PRIVATE_MEM */
65
66#endif /* __KVM_MM_H__ */
1/* SPDX-License-Identifier: GPL-2.0-only */
2
3#ifndef __KVM_MM_H__
4#define __KVM_MM_H__ 1
5
6/*
7 * Architectures can choose whether to use an rwlock or spinlock
8 * for the mmu_lock. These macros, for use in common code
9 * only, avoids using #ifdefs in places that must deal with
10 * multiple architectures.
11 */
12
13#ifdef KVM_HAVE_MMU_RWLOCK
14#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
17#else
18#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
19#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
20#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
21#endif /* KVM_HAVE_MMU_RWLOCK */
22
23kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
24 bool *async, bool write_fault, bool *writable);
25
26#ifdef CONFIG_HAVE_KVM_PFNCACHE
27void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
28 unsigned long start,
29 unsigned long end);
30#else
31static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
32 unsigned long start,
33 unsigned long end)
34{
35}
36#endif /* HAVE_KVM_PFNCACHE */
37
38#ifdef CONFIG_KVM_PRIVATE_MEM
39void kvm_gmem_init(struct module *module);
40int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
41int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
42 unsigned int fd, loff_t offset);
43void kvm_gmem_unbind(struct kvm_memory_slot *slot);
44#else
45static inline void kvm_gmem_init(struct module *module)
46{
47
48}
49
50static inline int kvm_gmem_bind(struct kvm *kvm,
51 struct kvm_memory_slot *slot,
52 unsigned int fd, loff_t offset)
53{
54 WARN_ON_ONCE(1);
55 return -EIO;
56}
57
58static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
59{
60 WARN_ON_ONCE(1);
61}
62#endif /* CONFIG_KVM_PRIVATE_MEM */
63
64#endif /* __KVM_MM_H__ */