Loading...
1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_MAIN_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10
11#define kvm_trace_exit_reason \
12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
17
18TRACE_EVENT(kvm_userspace_exit,
19 TP_PROTO(__u32 reason, int errno),
20 TP_ARGS(reason, errno),
21
22 TP_STRUCT__entry(
23 __field( __u32, reason )
24 __field( int, errno )
25 ),
26
27 TP_fast_assign(
28 __entry->reason = reason;
29 __entry->errno = errno;
30 ),
31
32 TP_printk("reason %s (%d)",
33 __entry->errno < 0 ?
34 (__entry->errno == -EINTR ? "restart" : "error") :
35 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
36 __entry->errno < 0 ? -__entry->errno : __entry->reason)
37);
38
39#if defined(__KVM_HAVE_IOAPIC)
40TRACE_EVENT(kvm_set_irq,
41 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
42 TP_ARGS(gsi, level, irq_source_id),
43
44 TP_STRUCT__entry(
45 __field( unsigned int, gsi )
46 __field( int, level )
47 __field( int, irq_source_id )
48 ),
49
50 TP_fast_assign(
51 __entry->gsi = gsi;
52 __entry->level = level;
53 __entry->irq_source_id = irq_source_id;
54 ),
55
56 TP_printk("gsi %u level %d source %d",
57 __entry->gsi, __entry->level, __entry->irq_source_id)
58);
59
60#define kvm_deliver_mode \
61 {0x0, "Fixed"}, \
62 {0x1, "LowPrio"}, \
63 {0x2, "SMI"}, \
64 {0x3, "Res3"}, \
65 {0x4, "NMI"}, \
66 {0x5, "INIT"}, \
67 {0x6, "SIPI"}, \
68 {0x7, "ExtINT"}
69
70TRACE_EVENT(kvm_ioapic_set_irq,
71 TP_PROTO(__u64 e, int pin, bool coalesced),
72 TP_ARGS(e, pin, coalesced),
73
74 TP_STRUCT__entry(
75 __field( __u64, e )
76 __field( int, pin )
77 __field( bool, coalesced )
78 ),
79
80 TP_fast_assign(
81 __entry->e = e;
82 __entry->pin = pin;
83 __entry->coalesced = coalesced;
84 ),
85
86 TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
87 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
88 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
89 (__entry->e & (1<<11)) ? "logical" : "physical",
90 (__entry->e & (1<<15)) ? "level" : "edge",
91 (__entry->e & (1<<16)) ? "|masked" : "",
92 __entry->coalesced ? " (coalesced)" : "")
93);
94
95TRACE_EVENT(kvm_msi_set_irq,
96 TP_PROTO(__u64 address, __u64 data),
97 TP_ARGS(address, data),
98
99 TP_STRUCT__entry(
100 __field( __u64, address )
101 __field( __u64, data )
102 ),
103
104 TP_fast_assign(
105 __entry->address = address;
106 __entry->data = data;
107 ),
108
109 TP_printk("dst %u vec %x (%s|%s|%s%s)",
110 (u8)(__entry->address >> 12), (u8)__entry->data,
111 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
112 (__entry->address & (1<<2)) ? "logical" : "physical",
113 (__entry->data & (1<<15)) ? "level" : "edge",
114 (__entry->address & (1<<3)) ? "|rh" : "")
115);
116
117#define kvm_irqchips \
118 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
119 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
120 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
121
122TRACE_EVENT(kvm_ack_irq,
123 TP_PROTO(unsigned int irqchip, unsigned int pin),
124 TP_ARGS(irqchip, pin),
125
126 TP_STRUCT__entry(
127 __field( unsigned int, irqchip )
128 __field( unsigned int, pin )
129 ),
130
131 TP_fast_assign(
132 __entry->irqchip = irqchip;
133 __entry->pin = pin;
134 ),
135
136 TP_printk("irqchip %s pin %u",
137 __print_symbolic(__entry->irqchip, kvm_irqchips),
138 __entry->pin)
139);
140
141
142
143#endif /* defined(__KVM_HAVE_IOAPIC) */
144
145#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
146#define KVM_TRACE_MMIO_READ 1
147#define KVM_TRACE_MMIO_WRITE 2
148
149#define kvm_trace_symbol_mmio \
150 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
151 { KVM_TRACE_MMIO_READ, "read" }, \
152 { KVM_TRACE_MMIO_WRITE, "write" }
153
154TRACE_EVENT(kvm_mmio,
155 TP_PROTO(int type, int len, u64 gpa, u64 val),
156 TP_ARGS(type, len, gpa, val),
157
158 TP_STRUCT__entry(
159 __field( u32, type )
160 __field( u32, len )
161 __field( u64, gpa )
162 __field( u64, val )
163 ),
164
165 TP_fast_assign(
166 __entry->type = type;
167 __entry->len = len;
168 __entry->gpa = gpa;
169 __entry->val = val;
170 ),
171
172 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
173 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
174 __entry->len, __entry->gpa, __entry->val)
175);
176
177#define kvm_fpu_load_symbol \
178 {0, "unload"}, \
179 {1, "load"}
180
181TRACE_EVENT(kvm_fpu,
182 TP_PROTO(int load),
183 TP_ARGS(load),
184
185 TP_STRUCT__entry(
186 __field( u32, load )
187 ),
188
189 TP_fast_assign(
190 __entry->load = load;
191 ),
192
193 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
194);
195
196TRACE_EVENT(kvm_age_page,
197 TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
198 TP_ARGS(hva, slot, ref),
199
200 TP_STRUCT__entry(
201 __field( u64, hva )
202 __field( u64, gfn )
203 __field( u8, referenced )
204 ),
205
206 TP_fast_assign(
207 __entry->hva = hva;
208 __entry->gfn =
209 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
210 __entry->referenced = ref;
211 ),
212
213 TP_printk("hva %llx gfn %llx %s",
214 __entry->hva, __entry->gfn,
215 __entry->referenced ? "YOUNG" : "OLD")
216);
217
218#ifdef CONFIG_KVM_ASYNC_PF
219DECLARE_EVENT_CLASS(kvm_async_get_page_class,
220
221 TP_PROTO(u64 gva, u64 gfn),
222
223 TP_ARGS(gva, gfn),
224
225 TP_STRUCT__entry(
226 __field(__u64, gva)
227 __field(u64, gfn)
228 ),
229
230 TP_fast_assign(
231 __entry->gva = gva;
232 __entry->gfn = gfn;
233 ),
234
235 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
236);
237
238DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
239
240 TP_PROTO(u64 gva, u64 gfn),
241
242 TP_ARGS(gva, gfn)
243);
244
245DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
246
247 TP_PROTO(u64 gva, u64 gfn),
248
249 TP_ARGS(gva, gfn)
250);
251
252DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
253
254 TP_PROTO(u64 token, u64 gva),
255
256 TP_ARGS(token, gva),
257
258 TP_STRUCT__entry(
259 __field(__u64, token)
260 __field(__u64, gva)
261 ),
262
263 TP_fast_assign(
264 __entry->token = token;
265 __entry->gva = gva;
266 ),
267
268 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
269
270);
271
272DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
273
274 TP_PROTO(u64 token, u64 gva),
275
276 TP_ARGS(token, gva)
277);
278
279DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
280
281 TP_PROTO(u64 token, u64 gva),
282
283 TP_ARGS(token, gva)
284);
285
286TRACE_EVENT(
287 kvm_async_pf_completed,
288 TP_PROTO(unsigned long address, struct page *page, u64 gva),
289 TP_ARGS(address, page, gva),
290
291 TP_STRUCT__entry(
292 __field(unsigned long, address)
293 __field(pfn_t, pfn)
294 __field(u64, gva)
295 ),
296
297 TP_fast_assign(
298 __entry->address = address;
299 __entry->pfn = page ? page_to_pfn(page) : 0;
300 __entry->gva = gva;
301 ),
302
303 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
304 __entry->address, __entry->pfn)
305);
306
307#endif
308
309#endif /* _TRACE_KVM_MAIN_H */
310
311/* This part must be outside protection */
312#include <trace/define_trace.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_MAIN_H
4
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm
9
10#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
11
12#define kvm_trace_exit_reason \
13 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
14 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
15 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
16 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
17 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
18 ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
19 ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
20 ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
21
22TRACE_EVENT(kvm_userspace_exit,
23 TP_PROTO(__u32 reason, int errno),
24 TP_ARGS(reason, errno),
25
26 TP_STRUCT__entry(
27 __field( __u32, reason )
28 __field( int, errno )
29 ),
30
31 TP_fast_assign(
32 __entry->reason = reason;
33 __entry->errno = errno;
34 ),
35
36 TP_printk("reason %s (%d)",
37 __entry->errno < 0 ?
38 (__entry->errno == -EINTR ? "restart" : "error") :
39 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
40 __entry->errno < 0 ? -__entry->errno : __entry->reason)
41);
42
43TRACE_EVENT(kvm_vcpu_wakeup,
44 TP_PROTO(__u64 ns, bool waited, bool valid),
45 TP_ARGS(ns, waited, valid),
46
47 TP_STRUCT__entry(
48 __field( __u64, ns )
49 __field( bool, waited )
50 __field( bool, valid )
51 ),
52
53 TP_fast_assign(
54 __entry->ns = ns;
55 __entry->waited = waited;
56 __entry->valid = valid;
57 ),
58
59 TP_printk("%s time %lld ns, polling %s",
60 __entry->waited ? "wait" : "poll",
61 __entry->ns,
62 __entry->valid ? "valid" : "invalid")
63);
64
65#if defined(CONFIG_HAVE_KVM_IRQFD)
66TRACE_EVENT(kvm_set_irq,
67 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
68 TP_ARGS(gsi, level, irq_source_id),
69
70 TP_STRUCT__entry(
71 __field( unsigned int, gsi )
72 __field( int, level )
73 __field( int, irq_source_id )
74 ),
75
76 TP_fast_assign(
77 __entry->gsi = gsi;
78 __entry->level = level;
79 __entry->irq_source_id = irq_source_id;
80 ),
81
82 TP_printk("gsi %u level %d source %d",
83 __entry->gsi, __entry->level, __entry->irq_source_id)
84);
85#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
86
87#if defined(__KVM_HAVE_IOAPIC)
88#define kvm_deliver_mode \
89 {0x0, "Fixed"}, \
90 {0x1, "LowPrio"}, \
91 {0x2, "SMI"}, \
92 {0x3, "Res3"}, \
93 {0x4, "NMI"}, \
94 {0x5, "INIT"}, \
95 {0x6, "SIPI"}, \
96 {0x7, "ExtINT"}
97
98TRACE_EVENT(kvm_ioapic_set_irq,
99 TP_PROTO(__u64 e, int pin, bool coalesced),
100 TP_ARGS(e, pin, coalesced),
101
102 TP_STRUCT__entry(
103 __field( __u64, e )
104 __field( int, pin )
105 __field( bool, coalesced )
106 ),
107
108 TP_fast_assign(
109 __entry->e = e;
110 __entry->pin = pin;
111 __entry->coalesced = coalesced;
112 ),
113
114 TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
115 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
116 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
117 (__entry->e & (1<<11)) ? "logical" : "physical",
118 (__entry->e & (1<<15)) ? "level" : "edge",
119 (__entry->e & (1<<16)) ? "|masked" : "",
120 __entry->coalesced ? " (coalesced)" : "")
121);
122
123TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
124 TP_PROTO(__u64 e),
125 TP_ARGS(e),
126
127 TP_STRUCT__entry(
128 __field( __u64, e )
129 ),
130
131 TP_fast_assign(
132 __entry->e = e;
133 ),
134
135 TP_printk("dst %x vec %u (%s|%s|%s%s)",
136 (u8)(__entry->e >> 56), (u8)__entry->e,
137 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
138 (__entry->e & (1<<11)) ? "logical" : "physical",
139 (__entry->e & (1<<15)) ? "level" : "edge",
140 (__entry->e & (1<<16)) ? "|masked" : "")
141);
142
143TRACE_EVENT(kvm_msi_set_irq,
144 TP_PROTO(__u64 address, __u64 data),
145 TP_ARGS(address, data),
146
147 TP_STRUCT__entry(
148 __field( __u64, address )
149 __field( __u64, data )
150 ),
151
152 TP_fast_assign(
153 __entry->address = address;
154 __entry->data = data;
155 ),
156
157 TP_printk("dst %llx vec %u (%s|%s|%s%s)",
158 (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
159 (u8)__entry->data,
160 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
161 (__entry->address & (1<<2)) ? "logical" : "physical",
162 (__entry->data & (1<<15)) ? "level" : "edge",
163 (__entry->address & (1<<3)) ? "|rh" : "")
164);
165
166#define kvm_irqchips \
167 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
168 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
169 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
170
171#endif /* defined(__KVM_HAVE_IOAPIC) */
172
173#if defined(CONFIG_HAVE_KVM_IRQFD)
174
175#ifdef kvm_irqchips
176#define kvm_ack_irq_string "irqchip %s pin %u"
177#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
178#else
179#define kvm_ack_irq_string "irqchip %d pin %u"
180#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
181#endif
182
183TRACE_EVENT(kvm_ack_irq,
184 TP_PROTO(unsigned int irqchip, unsigned int pin),
185 TP_ARGS(irqchip, pin),
186
187 TP_STRUCT__entry(
188 __field( unsigned int, irqchip )
189 __field( unsigned int, pin )
190 ),
191
192 TP_fast_assign(
193 __entry->irqchip = irqchip;
194 __entry->pin = pin;
195 ),
196
197 TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
198);
199
200#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
201
202
203
204#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
205#define KVM_TRACE_MMIO_READ 1
206#define KVM_TRACE_MMIO_WRITE 2
207
208#define kvm_trace_symbol_mmio \
209 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
210 { KVM_TRACE_MMIO_READ, "read" }, \
211 { KVM_TRACE_MMIO_WRITE, "write" }
212
213TRACE_EVENT(kvm_mmio,
214 TP_PROTO(int type, int len, u64 gpa, void *val),
215 TP_ARGS(type, len, gpa, val),
216
217 TP_STRUCT__entry(
218 __field( u32, type )
219 __field( u32, len )
220 __field( u64, gpa )
221 __field( u64, val )
222 ),
223
224 TP_fast_assign(
225 __entry->type = type;
226 __entry->len = len;
227 __entry->gpa = gpa;
228 __entry->val = 0;
229 if (val)
230 memcpy(&__entry->val, val,
231 min_t(u32, sizeof(__entry->val), len));
232 ),
233
234 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
235 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
236 __entry->len, __entry->gpa, __entry->val)
237);
238
239#define kvm_fpu_load_symbol \
240 {0, "unload"}, \
241 {1, "load"}
242
243TRACE_EVENT(kvm_fpu,
244 TP_PROTO(int load),
245 TP_ARGS(load),
246
247 TP_STRUCT__entry(
248 __field( u32, load )
249 ),
250
251 TP_fast_assign(
252 __entry->load = load;
253 ),
254
255 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
256);
257
258#ifdef CONFIG_KVM_ASYNC_PF
259DECLARE_EVENT_CLASS(kvm_async_get_page_class,
260
261 TP_PROTO(u64 gva, u64 gfn),
262
263 TP_ARGS(gva, gfn),
264
265 TP_STRUCT__entry(
266 __field(__u64, gva)
267 __field(u64, gfn)
268 ),
269
270 TP_fast_assign(
271 __entry->gva = gva;
272 __entry->gfn = gfn;
273 ),
274
275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
276);
277
278DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
279
280 TP_PROTO(u64 gva, u64 gfn),
281
282 TP_ARGS(gva, gfn)
283);
284
285DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
286
287 TP_PROTO(u64 gva, u64 gfn),
288
289 TP_ARGS(gva, gfn)
290);
291
292DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
293
294 TP_PROTO(u64 token, u64 gva),
295
296 TP_ARGS(token, gva),
297
298 TP_STRUCT__entry(
299 __field(__u64, token)
300 __field(__u64, gva)
301 ),
302
303 TP_fast_assign(
304 __entry->token = token;
305 __entry->gva = gva;
306 ),
307
308 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
309
310);
311
312DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
313
314 TP_PROTO(u64 token, u64 gva),
315
316 TP_ARGS(token, gva)
317);
318
319DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
320
321 TP_PROTO(u64 token, u64 gva),
322
323 TP_ARGS(token, gva)
324);
325
326TRACE_EVENT(
327 kvm_async_pf_completed,
328 TP_PROTO(unsigned long address, u64 gva),
329 TP_ARGS(address, gva),
330
331 TP_STRUCT__entry(
332 __field(unsigned long, address)
333 __field(u64, gva)
334 ),
335
336 TP_fast_assign(
337 __entry->address = address;
338 __entry->gva = gva;
339 ),
340
341 TP_printk("gva %#llx address %#lx", __entry->gva,
342 __entry->address)
343);
344
345#endif
346
347TRACE_EVENT(kvm_halt_poll_ns,
348 TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
349 unsigned int old),
350 TP_ARGS(grow, vcpu_id, new, old),
351
352 TP_STRUCT__entry(
353 __field(bool, grow)
354 __field(unsigned int, vcpu_id)
355 __field(unsigned int, new)
356 __field(unsigned int, old)
357 ),
358
359 TP_fast_assign(
360 __entry->grow = grow;
361 __entry->vcpu_id = vcpu_id;
362 __entry->new = new;
363 __entry->old = old;
364 ),
365
366 TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
367 __entry->vcpu_id,
368 __entry->new,
369 __entry->grow ? "grow" : "shrink",
370 __entry->old)
371);
372
373#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
374 trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
375#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
376 trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
377
378TRACE_EVENT(kvm_dirty_ring_push,
379 TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
380 TP_ARGS(ring, slot, offset),
381
382 TP_STRUCT__entry(
383 __field(int, index)
384 __field(u32, dirty_index)
385 __field(u32, reset_index)
386 __field(u32, slot)
387 __field(u64, offset)
388 ),
389
390 TP_fast_assign(
391 __entry->index = ring->index;
392 __entry->dirty_index = ring->dirty_index;
393 __entry->reset_index = ring->reset_index;
394 __entry->slot = slot;
395 __entry->offset = offset;
396 ),
397
398 TP_printk("ring %d: dirty 0x%x reset 0x%x "
399 "slot %u offset 0x%llx (used %u)",
400 __entry->index, __entry->dirty_index,
401 __entry->reset_index, __entry->slot, __entry->offset,
402 __entry->dirty_index - __entry->reset_index)
403);
404
405TRACE_EVENT(kvm_dirty_ring_reset,
406 TP_PROTO(struct kvm_dirty_ring *ring),
407 TP_ARGS(ring),
408
409 TP_STRUCT__entry(
410 __field(int, index)
411 __field(u32, dirty_index)
412 __field(u32, reset_index)
413 ),
414
415 TP_fast_assign(
416 __entry->index = ring->index;
417 __entry->dirty_index = ring->dirty_index;
418 __entry->reset_index = ring->reset_index;
419 ),
420
421 TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
422 __entry->index, __entry->dirty_index, __entry->reset_index,
423 __entry->dirty_index - __entry->reset_index)
424);
425
426TRACE_EVENT(kvm_dirty_ring_exit,
427 TP_PROTO(struct kvm_vcpu *vcpu),
428 TP_ARGS(vcpu),
429
430 TP_STRUCT__entry(
431 __field(int, vcpu_id)
432 ),
433
434 TP_fast_assign(
435 __entry->vcpu_id = vcpu->vcpu_id;
436 ),
437
438 TP_printk("vcpu %d", __entry->vcpu_id)
439);
440
441TRACE_EVENT(kvm_unmap_hva_range,
442 TP_PROTO(unsigned long start, unsigned long end),
443 TP_ARGS(start, end),
444
445 TP_STRUCT__entry(
446 __field( unsigned long, start )
447 __field( unsigned long, end )
448 ),
449
450 TP_fast_assign(
451 __entry->start = start;
452 __entry->end = end;
453 ),
454
455 TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
456 __entry->start, __entry->end)
457);
458
459TRACE_EVENT(kvm_set_spte_hva,
460 TP_PROTO(unsigned long hva),
461 TP_ARGS(hva),
462
463 TP_STRUCT__entry(
464 __field( unsigned long, hva )
465 ),
466
467 TP_fast_assign(
468 __entry->hva = hva;
469 ),
470
471 TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
472);
473
474TRACE_EVENT(kvm_age_hva,
475 TP_PROTO(unsigned long start, unsigned long end),
476 TP_ARGS(start, end),
477
478 TP_STRUCT__entry(
479 __field( unsigned long, start )
480 __field( unsigned long, end )
481 ),
482
483 TP_fast_assign(
484 __entry->start = start;
485 __entry->end = end;
486 ),
487
488 TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
489 __entry->start, __entry->end)
490);
491
492TRACE_EVENT(kvm_test_age_hva,
493 TP_PROTO(unsigned long hva),
494 TP_ARGS(hva),
495
496 TP_STRUCT__entry(
497 __field( unsigned long, hva )
498 ),
499
500 TP_fast_assign(
501 __entry->hva = hva;
502 ),
503
504 TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
505);
506
507#endif /* _TRACE_KVM_MAIN_H */
508
509/* This part must be outside protection */
510#include <trace/define_trace.h>