Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2
3#ifndef __KVM_MM_H__
4#define __KVM_MM_H__ 1
5
6/*
7 * Architectures can choose whether to use an rwlock or spinlock
8 * for the mmu_lock. These macros, for use in common code
9 * only, avoids using #ifdefs in places that must deal with
10 * multiple architectures.
11 */
12
13#ifdef KVM_HAVE_MMU_RWLOCK
14#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
17#else
18#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
19#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
20#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
21#endif /* KVM_HAVE_MMU_RWLOCK */
22
23
24struct kvm_follow_pfn {
25 const struct kvm_memory_slot *slot;
26 const gfn_t gfn;
27
28 unsigned long hva;
29
30 /* FOLL_* flags modifying lookup behavior, e.g. FOLL_WRITE. */
31 unsigned int flags;
32
33 /*
34 * Pin the page (effectively FOLL_PIN, which is an mm/ internal flag).
35 * The page *must* be pinned if KVM will write to the page via a kernel
36 * mapping, e.g. via kmap(), mremap(), etc.
37 */
38 bool pin;
39
40 /*
41 * If non-NULL, try to get a writable mapping even for a read fault.
42 * Set to true if a writable mapping was obtained.
43 */
44 bool *map_writable;
45
46 /*
47 * Optional output. Set to a valid "struct page" if the returned pfn
48 * is for a refcounted or pinned struct page, NULL if the returned pfn
49 * has no struct page or if the struct page is not being refcounted
50 * (e.g. tail pages of non-compound higher order allocations from
51 * IO/PFNMAP mappings).
52 */
53 struct page **refcounted_page;
54};
55
56kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp);
57
58#ifdef CONFIG_HAVE_KVM_PFNCACHE
59void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
60 unsigned long start,
61 unsigned long end);
62#else
63static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
64 unsigned long start,
65 unsigned long end)
66{
67}
68#endif /* HAVE_KVM_PFNCACHE */
69
70#ifdef CONFIG_KVM_PRIVATE_MEM
71void kvm_gmem_init(struct module *module);
72int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
73int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
74 unsigned int fd, loff_t offset);
75void kvm_gmem_unbind(struct kvm_memory_slot *slot);
76#else
77static inline void kvm_gmem_init(struct module *module)
78{
79
80}
81
82static inline int kvm_gmem_bind(struct kvm *kvm,
83 struct kvm_memory_slot *slot,
84 unsigned int fd, loff_t offset)
85{
86 WARN_ON_ONCE(1);
87 return -EIO;
88}
89
90static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
91{
92 WARN_ON_ONCE(1);
93}
94#endif /* CONFIG_KVM_PRIVATE_MEM */
95
96#endif /* __KVM_MM_H__ */
1/* SPDX-License-Identifier: GPL-2.0-only */
2
3#ifndef __KVM_MM_H__
4#define __KVM_MM_H__ 1
5
6/*
7 * Architectures can choose whether to use an rwlock or spinlock
8 * for the mmu_lock. These macros, for use in common code
9 * only, avoids using #ifdefs in places that must deal with
10 * multiple architectures.
11 */
12
13#ifdef KVM_HAVE_MMU_RWLOCK
14#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
17#else
18#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
19#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
20#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
21#endif /* KVM_HAVE_MMU_RWLOCK */
22
23kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
24 bool *async, bool write_fault, bool *writable);
25
26#ifdef CONFIG_HAVE_KVM_PFNCACHE
27void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
28 unsigned long start,
29 unsigned long end);
30#else
31static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
32 unsigned long start,
33 unsigned long end)
34{
35}
36#endif /* HAVE_KVM_PFNCACHE */
37
38#ifdef CONFIG_KVM_PRIVATE_MEM
39void kvm_gmem_init(struct module *module);
40int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
41int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
42 unsigned int fd, loff_t offset);
43void kvm_gmem_unbind(struct kvm_memory_slot *slot);
44#else
45static inline void kvm_gmem_init(struct module *module)
46{
47
48}
49
50static inline int kvm_gmem_bind(struct kvm *kvm,
51 struct kvm_memory_slot *slot,
52 unsigned int fd, loff_t offset)
53{
54 WARN_ON_ONCE(1);
55 return -EIO;
56}
57
58static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
59{
60 WARN_ON_ONCE(1);
61}
62#endif /* CONFIG_KVM_PRIVATE_MEM */
63
64#endif /* __KVM_MM_H__ */