Loading...
Note: File does not exist in v6.13.7.
1/*
2 * mmu_audit.c:
3 *
4 * Audit code for KVM MMU
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 * Marcelo Tosatti <mtosatti@redhat.com>
13 * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20#include <linux/ratelimit.h>
21
22char const *audit_point_name[] = {
23 "pre page fault",
24 "post page fault",
25 "pre pte write",
26 "post pte write",
27 "pre sync",
28 "post sync"
29};
30
31#define audit_printk(kvm, fmt, args...) \
32 printk(KERN_ERR "audit: (%s) error: " \
33 fmt, audit_point_name[kvm->arch.audit_point], ##args)
34
35typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
36
37static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
38 inspect_spte_fn fn, int level)
39{
40 int i;
41
42 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
43 u64 *ent = sp->spt;
44
45 fn(vcpu, ent + i, level);
46
47 if (is_shadow_present_pte(ent[i]) &&
48 !is_last_spte(ent[i], level)) {
49 struct kvm_mmu_page *child;
50
51 child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
52 __mmu_spte_walk(vcpu, child, fn, level - 1);
53 }
54 }
55}
56
57static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
58{
59 int i;
60 struct kvm_mmu_page *sp;
61
62 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
63 return;
64
65 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
66 hpa_t root = vcpu->arch.mmu.root_hpa;
67
68 sp = page_header(root);
69 __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
70 return;
71 }
72
73 for (i = 0; i < 4; ++i) {
74 hpa_t root = vcpu->arch.mmu.pae_root[i];
75
76 if (root && VALID_PAGE(root)) {
77 root &= PT64_BASE_ADDR_MASK;
78 sp = page_header(root);
79 __mmu_spte_walk(vcpu, sp, fn, 2);
80 }
81 }
82
83 return;
84}
85
86typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
87
88static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
89{
90 struct kvm_mmu_page *sp;
91
92 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
93 fn(kvm, sp);
94}
95
96static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
97{
98 struct kvm_mmu_page *sp;
99 gfn_t gfn;
100 pfn_t pfn;
101 hpa_t hpa;
102
103 sp = page_header(__pa(sptep));
104
105 if (sp->unsync) {
106 if (level != PT_PAGE_TABLE_LEVEL) {
107 audit_printk(vcpu->kvm, "unsync sp: %p "
108 "level = %d\n", sp, level);
109 return;
110 }
111 }
112
113 if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
114 return;
115
116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
117 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
118
119 if (is_error_pfn(pfn)) {
120 kvm_release_pfn_clean(pfn);
121 return;
122 }
123
124 hpa = pfn << PAGE_SHIFT;
125 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
126 audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
127 "ent %llxn", vcpu->arch.mmu.root_level, pfn,
128 hpa, *sptep);
129}
130
131static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
132{
133 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
134 unsigned long *rmapp;
135 struct kvm_mmu_page *rev_sp;
136 gfn_t gfn;
137
138 rev_sp = page_header(__pa(sptep));
139 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
140
141 if (!gfn_to_memslot(kvm, gfn)) {
142 if (!__ratelimit(&ratelimit_state))
143 return;
144 audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
145 audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
146 (long int)(sptep - rev_sp->spt), rev_sp->gfn);
147 dump_stack();
148 return;
149 }
150
151 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
152 if (!*rmapp) {
153 if (!__ratelimit(&ratelimit_state))
154 return;
155 audit_printk(kvm, "no rmap for writable spte %llx\n",
156 *sptep);
157 dump_stack();
158 }
159}
160
161static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
162{
163 if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
164 inspect_spte_has_rmap(vcpu->kvm, sptep);
165}
166
167static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
168{
169 struct kvm_mmu_page *sp = page_header(__pa(sptep));
170
171 if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
172 audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
173 "root.\n", sp);
174}
175
176static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
177{
178 int i;
179
180 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
181 return;
182
183 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
184 if (!is_rmap_spte(sp->spt[i]))
185 continue;
186
187 inspect_spte_has_rmap(kvm, sp->spt + i);
188 }
189}
190
191static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
192{
193 struct kvm_memory_slot *slot;
194 unsigned long *rmapp;
195 u64 *sptep;
196 struct rmap_iterator iter;
197
198 if (sp->role.direct || sp->unsync || sp->role.invalid)
199 return;
200
201 slot = gfn_to_memslot(kvm, sp->gfn);
202 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
203
204 for (sptep = rmap_get_first(*rmapp, &iter); sptep;
205 sptep = rmap_get_next(&iter)) {
206 if (is_writable_pte(*sptep))
207 audit_printk(kvm, "shadow page has writable "
208 "mappings: gfn %llx role %x\n",
209 sp->gfn, sp->role.word);
210 }
211}
212
213static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
214{
215 check_mappings_rmap(kvm, sp);
216 audit_write_protection(kvm, sp);
217}
218
219static void audit_all_active_sps(struct kvm *kvm)
220{
221 walk_all_active_sps(kvm, audit_sp);
222}
223
224static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
225{
226 audit_sptes_have_rmaps(vcpu, sptep, level);
227 audit_mappings(vcpu, sptep, level);
228 audit_spte_after_sync(vcpu, sptep, level);
229}
230
231static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
232{
233 mmu_spte_walk(vcpu, audit_spte);
234}
235
236static bool mmu_audit;
237static struct static_key mmu_audit_key;
238
239static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
240{
241 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
242
243 if (!__ratelimit(&ratelimit_state))
244 return;
245
246 vcpu->kvm->arch.audit_point = point;
247 audit_all_active_sps(vcpu->kvm);
248 audit_vcpu_spte(vcpu);
249}
250
251static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
252{
253 if (static_key_false((&mmu_audit_key)))
254 __kvm_mmu_audit(vcpu, point);
255}
256
257static void mmu_audit_enable(void)
258{
259 if (mmu_audit)
260 return;
261
262 static_key_slow_inc(&mmu_audit_key);
263 mmu_audit = true;
264}
265
266static void mmu_audit_disable(void)
267{
268 if (!mmu_audit)
269 return;
270
271 static_key_slow_dec(&mmu_audit_key);
272 mmu_audit = false;
273}
274
275static int mmu_audit_set(const char *val, const struct kernel_param *kp)
276{
277 int ret;
278 unsigned long enable;
279
280 ret = strict_strtoul(val, 10, &enable);
281 if (ret < 0)
282 return -EINVAL;
283
284 switch (enable) {
285 case 0:
286 mmu_audit_disable();
287 break;
288 case 1:
289 mmu_audit_enable();
290 break;
291 default:
292 return -EINVAL;
293 }
294
295 return 0;
296}
297
298static struct kernel_param_ops audit_param_ops = {
299 .set = mmu_audit_set,
300 .get = param_get_bool,
301};
302
303module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);