Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_MAIN_H
4
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm
9
10#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
11
12#define kvm_trace_exit_reason \
13 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
14 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
15 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
16 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
17 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
18 ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
19 ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
20 ERSN(HYPERV)
21
22TRACE_EVENT(kvm_userspace_exit,
23 TP_PROTO(__u32 reason, int errno),
24 TP_ARGS(reason, errno),
25
26 TP_STRUCT__entry(
27 __field( __u32, reason )
28 __field( int, errno )
29 ),
30
31 TP_fast_assign(
32 __entry->reason = reason;
33 __entry->errno = errno;
34 ),
35
36 TP_printk("reason %s (%d)",
37 __entry->errno < 0 ?
38 (__entry->errno == -EINTR ? "restart" : "error") :
39 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
40 __entry->errno < 0 ? -__entry->errno : __entry->reason)
41);
42
43TRACE_EVENT(kvm_vcpu_wakeup,
44 TP_PROTO(__u64 ns, bool waited, bool valid),
45 TP_ARGS(ns, waited, valid),
46
47 TP_STRUCT__entry(
48 __field( __u64, ns )
49 __field( bool, waited )
50 __field( bool, valid )
51 ),
52
53 TP_fast_assign(
54 __entry->ns = ns;
55 __entry->waited = waited;
56 __entry->valid = valid;
57 ),
58
59 TP_printk("%s time %lld ns, polling %s",
60 __entry->waited ? "wait" : "poll",
61 __entry->ns,
62 __entry->valid ? "valid" : "invalid")
63);
64
65#if defined(CONFIG_HAVE_KVM_IRQFD)
66TRACE_EVENT(kvm_set_irq,
67 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
68 TP_ARGS(gsi, level, irq_source_id),
69
70 TP_STRUCT__entry(
71 __field( unsigned int, gsi )
72 __field( int, level )
73 __field( int, irq_source_id )
74 ),
75
76 TP_fast_assign(
77 __entry->gsi = gsi;
78 __entry->level = level;
79 __entry->irq_source_id = irq_source_id;
80 ),
81
82 TP_printk("gsi %u level %d source %d",
83 __entry->gsi, __entry->level, __entry->irq_source_id)
84);
85#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
86
87#if defined(__KVM_HAVE_IOAPIC)
88#define kvm_deliver_mode \
89 {0x0, "Fixed"}, \
90 {0x1, "LowPrio"}, \
91 {0x2, "SMI"}, \
92 {0x3, "Res3"}, \
93 {0x4, "NMI"}, \
94 {0x5, "INIT"}, \
95 {0x6, "SIPI"}, \
96 {0x7, "ExtINT"}
97
98TRACE_EVENT(kvm_ioapic_set_irq,
99 TP_PROTO(__u64 e, int pin, bool coalesced),
100 TP_ARGS(e, pin, coalesced),
101
102 TP_STRUCT__entry(
103 __field( __u64, e )
104 __field( int, pin )
105 __field( bool, coalesced )
106 ),
107
108 TP_fast_assign(
109 __entry->e = e;
110 __entry->pin = pin;
111 __entry->coalesced = coalesced;
112 ),
113
114 TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
115 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
116 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
117 (__entry->e & (1<<11)) ? "logical" : "physical",
118 (__entry->e & (1<<15)) ? "level" : "edge",
119 (__entry->e & (1<<16)) ? "|masked" : "",
120 __entry->coalesced ? " (coalesced)" : "")
121);
122
123TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
124 TP_PROTO(__u64 e),
125 TP_ARGS(e),
126
127 TP_STRUCT__entry(
128 __field( __u64, e )
129 ),
130
131 TP_fast_assign(
132 __entry->e = e;
133 ),
134
135 TP_printk("dst %x vec %u (%s|%s|%s%s)",
136 (u8)(__entry->e >> 56), (u8)__entry->e,
137 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
138 (__entry->e & (1<<11)) ? "logical" : "physical",
139 (__entry->e & (1<<15)) ? "level" : "edge",
140 (__entry->e & (1<<16)) ? "|masked" : "")
141);
142
143TRACE_EVENT(kvm_msi_set_irq,
144 TP_PROTO(__u64 address, __u64 data),
145 TP_ARGS(address, data),
146
147 TP_STRUCT__entry(
148 __field( __u64, address )
149 __field( __u64, data )
150 ),
151
152 TP_fast_assign(
153 __entry->address = address;
154 __entry->data = data;
155 ),
156
157 TP_printk("dst %llx vec %u (%s|%s|%s%s)",
158 (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
159 (u8)__entry->data,
160 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
161 (__entry->address & (1<<2)) ? "logical" : "physical",
162 (__entry->data & (1<<15)) ? "level" : "edge",
163 (__entry->address & (1<<3)) ? "|rh" : "")
164);
165
166#define kvm_irqchips \
167 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
168 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
169 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
170
171#endif /* defined(__KVM_HAVE_IOAPIC) */
172
173#if defined(CONFIG_HAVE_KVM_IRQFD)
174
175#ifdef kvm_irqchips
176#define kvm_ack_irq_string "irqchip %s pin %u"
177#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
178#else
179#define kvm_ack_irq_string "irqchip %d pin %u"
180#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
181#endif
182
183TRACE_EVENT(kvm_ack_irq,
184 TP_PROTO(unsigned int irqchip, unsigned int pin),
185 TP_ARGS(irqchip, pin),
186
187 TP_STRUCT__entry(
188 __field( unsigned int, irqchip )
189 __field( unsigned int, pin )
190 ),
191
192 TP_fast_assign(
193 __entry->irqchip = irqchip;
194 __entry->pin = pin;
195 ),
196
197 TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
198);
199
200#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
201
202
203
204#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
205#define KVM_TRACE_MMIO_READ 1
206#define KVM_TRACE_MMIO_WRITE 2
207
208#define kvm_trace_symbol_mmio \
209 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
210 { KVM_TRACE_MMIO_READ, "read" }, \
211 { KVM_TRACE_MMIO_WRITE, "write" }
212
213TRACE_EVENT(kvm_mmio,
214 TP_PROTO(int type, int len, u64 gpa, void *val),
215 TP_ARGS(type, len, gpa, val),
216
217 TP_STRUCT__entry(
218 __field( u32, type )
219 __field( u32, len )
220 __field( u64, gpa )
221 __field( u64, val )
222 ),
223
224 TP_fast_assign(
225 __entry->type = type;
226 __entry->len = len;
227 __entry->gpa = gpa;
228 __entry->val = 0;
229 if (val)
230 memcpy(&__entry->val, val,
231 min_t(u32, sizeof(__entry->val), len));
232 ),
233
234 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
235 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
236 __entry->len, __entry->gpa, __entry->val)
237);
238
239#define kvm_fpu_load_symbol \
240 {0, "unload"}, \
241 {1, "load"}
242
243TRACE_EVENT(kvm_fpu,
244 TP_PROTO(int load),
245 TP_ARGS(load),
246
247 TP_STRUCT__entry(
248 __field( u32, load )
249 ),
250
251 TP_fast_assign(
252 __entry->load = load;
253 ),
254
255 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
256);
257
258TRACE_EVENT(kvm_age_page,
259 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
260 TP_ARGS(gfn, level, slot, ref),
261
262 TP_STRUCT__entry(
263 __field( u64, hva )
264 __field( u64, gfn )
265 __field( u8, level )
266 __field( u8, referenced )
267 ),
268
269 TP_fast_assign(
270 __entry->gfn = gfn;
271 __entry->level = level;
272 __entry->hva = ((gfn - slot->base_gfn) <<
273 PAGE_SHIFT) + slot->userspace_addr;
274 __entry->referenced = ref;
275 ),
276
277 TP_printk("hva %llx gfn %llx level %u %s",
278 __entry->hva, __entry->gfn, __entry->level,
279 __entry->referenced ? "YOUNG" : "OLD")
280);
281
282#ifdef CONFIG_KVM_ASYNC_PF
283DECLARE_EVENT_CLASS(kvm_async_get_page_class,
284
285 TP_PROTO(u64 gva, u64 gfn),
286
287 TP_ARGS(gva, gfn),
288
289 TP_STRUCT__entry(
290 __field(__u64, gva)
291 __field(u64, gfn)
292 ),
293
294 TP_fast_assign(
295 __entry->gva = gva;
296 __entry->gfn = gfn;
297 ),
298
299 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
300);
301
302DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
303
304 TP_PROTO(u64 gva, u64 gfn),
305
306 TP_ARGS(gva, gfn)
307);
308
309DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
310
311 TP_PROTO(u64 gva, u64 gfn),
312
313 TP_ARGS(gva, gfn)
314);
315
316DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
317
318 TP_PROTO(u64 token, u64 gva),
319
320 TP_ARGS(token, gva),
321
322 TP_STRUCT__entry(
323 __field(__u64, token)
324 __field(__u64, gva)
325 ),
326
327 TP_fast_assign(
328 __entry->token = token;
329 __entry->gva = gva;
330 ),
331
332 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
333
334);
335
336DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
337
338 TP_PROTO(u64 token, u64 gva),
339
340 TP_ARGS(token, gva)
341);
342
343DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
344
345 TP_PROTO(u64 token, u64 gva),
346
347 TP_ARGS(token, gva)
348);
349
350TRACE_EVENT(
351 kvm_async_pf_completed,
352 TP_PROTO(unsigned long address, u64 gva),
353 TP_ARGS(address, gva),
354
355 TP_STRUCT__entry(
356 __field(unsigned long, address)
357 __field(u64, gva)
358 ),
359
360 TP_fast_assign(
361 __entry->address = address;
362 __entry->gva = gva;
363 ),
364
365 TP_printk("gva %#llx address %#lx", __entry->gva,
366 __entry->address)
367);
368
369#endif
370
371TRACE_EVENT(kvm_halt_poll_ns,
372 TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
373 unsigned int old),
374 TP_ARGS(grow, vcpu_id, new, old),
375
376 TP_STRUCT__entry(
377 __field(bool, grow)
378 __field(unsigned int, vcpu_id)
379 __field(unsigned int, new)
380 __field(unsigned int, old)
381 ),
382
383 TP_fast_assign(
384 __entry->grow = grow;
385 __entry->vcpu_id = vcpu_id;
386 __entry->new = new;
387 __entry->old = old;
388 ),
389
390 TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
391 __entry->vcpu_id,
392 __entry->new,
393 __entry->grow ? "grow" : "shrink",
394 __entry->old)
395);
396
397#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
398 trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
399#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
400 trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
401
402#endif /* _TRACE_KVM_MAIN_H */
403
404/* This part must be outside protection */
405#include <trace/define_trace.h>
1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_MAIN_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10
11#define kvm_trace_exit_reason \
12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
17 ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
18
19TRACE_EVENT(kvm_userspace_exit,
20 TP_PROTO(__u32 reason, int errno),
21 TP_ARGS(reason, errno),
22
23 TP_STRUCT__entry(
24 __field( __u32, reason )
25 __field( int, errno )
26 ),
27
28 TP_fast_assign(
29 __entry->reason = reason;
30 __entry->errno = errno;
31 ),
32
33 TP_printk("reason %s (%d)",
34 __entry->errno < 0 ?
35 (__entry->errno == -EINTR ? "restart" : "error") :
36 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
37 __entry->errno < 0 ? -__entry->errno : __entry->reason)
38);
39
40TRACE_EVENT(kvm_vcpu_wakeup,
41 TP_PROTO(__u64 ns, bool waited),
42 TP_ARGS(ns, waited),
43
44 TP_STRUCT__entry(
45 __field( __u64, ns )
46 __field( bool, waited )
47 ),
48
49 TP_fast_assign(
50 __entry->ns = ns;
51 __entry->waited = waited;
52 ),
53
54 TP_printk("%s time %lld ns",
55 __entry->waited ? "wait" : "poll",
56 __entry->ns)
57);
58
59#if defined(CONFIG_HAVE_KVM_IRQFD)
60TRACE_EVENT(kvm_set_irq,
61 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
62 TP_ARGS(gsi, level, irq_source_id),
63
64 TP_STRUCT__entry(
65 __field( unsigned int, gsi )
66 __field( int, level )
67 __field( int, irq_source_id )
68 ),
69
70 TP_fast_assign(
71 __entry->gsi = gsi;
72 __entry->level = level;
73 __entry->irq_source_id = irq_source_id;
74 ),
75
76 TP_printk("gsi %u level %d source %d",
77 __entry->gsi, __entry->level, __entry->irq_source_id)
78);
79#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
80
81#if defined(__KVM_HAVE_IOAPIC)
82#define kvm_deliver_mode \
83 {0x0, "Fixed"}, \
84 {0x1, "LowPrio"}, \
85 {0x2, "SMI"}, \
86 {0x3, "Res3"}, \
87 {0x4, "NMI"}, \
88 {0x5, "INIT"}, \
89 {0x6, "SIPI"}, \
90 {0x7, "ExtINT"}
91
92TRACE_EVENT(kvm_ioapic_set_irq,
93 TP_PROTO(__u64 e, int pin, bool coalesced),
94 TP_ARGS(e, pin, coalesced),
95
96 TP_STRUCT__entry(
97 __field( __u64, e )
98 __field( int, pin )
99 __field( bool, coalesced )
100 ),
101
102 TP_fast_assign(
103 __entry->e = e;
104 __entry->pin = pin;
105 __entry->coalesced = coalesced;
106 ),
107
108 TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
109 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
110 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
111 (__entry->e & (1<<11)) ? "logical" : "physical",
112 (__entry->e & (1<<15)) ? "level" : "edge",
113 (__entry->e & (1<<16)) ? "|masked" : "",
114 __entry->coalesced ? " (coalesced)" : "")
115);
116
117TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
118 TP_PROTO(__u64 e),
119 TP_ARGS(e),
120
121 TP_STRUCT__entry(
122 __field( __u64, e )
123 ),
124
125 TP_fast_assign(
126 __entry->e = e;
127 ),
128
129 TP_printk("dst %x vec=%u (%s|%s|%s%s)",
130 (u8)(__entry->e >> 56), (u8)__entry->e,
131 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
132 (__entry->e & (1<<11)) ? "logical" : "physical",
133 (__entry->e & (1<<15)) ? "level" : "edge",
134 (__entry->e & (1<<16)) ? "|masked" : "")
135);
136
137TRACE_EVENT(kvm_msi_set_irq,
138 TP_PROTO(__u64 address, __u64 data),
139 TP_ARGS(address, data),
140
141 TP_STRUCT__entry(
142 __field( __u64, address )
143 __field( __u64, data )
144 ),
145
146 TP_fast_assign(
147 __entry->address = address;
148 __entry->data = data;
149 ),
150
151 TP_printk("dst %u vec %x (%s|%s|%s%s)",
152 (u8)(__entry->address >> 12), (u8)__entry->data,
153 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
154 (__entry->address & (1<<2)) ? "logical" : "physical",
155 (__entry->data & (1<<15)) ? "level" : "edge",
156 (__entry->address & (1<<3)) ? "|rh" : "")
157);
158
159#define kvm_irqchips \
160 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
161 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
162 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
163
164#endif /* defined(__KVM_HAVE_IOAPIC) */
165
166#if defined(CONFIG_HAVE_KVM_IRQFD)
167
168#ifdef kvm_irqchips
169#define kvm_ack_irq_string "irqchip %s pin %u"
170#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
171#else
172#define kvm_ack_irq_string "irqchip %d pin %u"
173#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
174#endif
175
176TRACE_EVENT(kvm_ack_irq,
177 TP_PROTO(unsigned int irqchip, unsigned int pin),
178 TP_ARGS(irqchip, pin),
179
180 TP_STRUCT__entry(
181 __field( unsigned int, irqchip )
182 __field( unsigned int, pin )
183 ),
184
185 TP_fast_assign(
186 __entry->irqchip = irqchip;
187 __entry->pin = pin;
188 ),
189
190 TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
191);
192
193#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
194
195
196
197#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
198#define KVM_TRACE_MMIO_READ 1
199#define KVM_TRACE_MMIO_WRITE 2
200
201#define kvm_trace_symbol_mmio \
202 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
203 { KVM_TRACE_MMIO_READ, "read" }, \
204 { KVM_TRACE_MMIO_WRITE, "write" }
205
206TRACE_EVENT(kvm_mmio,
207 TP_PROTO(int type, int len, u64 gpa, u64 val),
208 TP_ARGS(type, len, gpa, val),
209
210 TP_STRUCT__entry(
211 __field( u32, type )
212 __field( u32, len )
213 __field( u64, gpa )
214 __field( u64, val )
215 ),
216
217 TP_fast_assign(
218 __entry->type = type;
219 __entry->len = len;
220 __entry->gpa = gpa;
221 __entry->val = val;
222 ),
223
224 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
225 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
226 __entry->len, __entry->gpa, __entry->val)
227);
228
229#define kvm_fpu_load_symbol \
230 {0, "unload"}, \
231 {1, "load"}
232
233TRACE_EVENT(kvm_fpu,
234 TP_PROTO(int load),
235 TP_ARGS(load),
236
237 TP_STRUCT__entry(
238 __field( u32, load )
239 ),
240
241 TP_fast_assign(
242 __entry->load = load;
243 ),
244
245 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
246);
247
248TRACE_EVENT(kvm_age_page,
249 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
250 TP_ARGS(gfn, level, slot, ref),
251
252 TP_STRUCT__entry(
253 __field( u64, hva )
254 __field( u64, gfn )
255 __field( u8, level )
256 __field( u8, referenced )
257 ),
258
259 TP_fast_assign(
260 __entry->gfn = gfn;
261 __entry->level = level;
262 __entry->hva = ((gfn - slot->base_gfn) <<
263 PAGE_SHIFT) + slot->userspace_addr;
264 __entry->referenced = ref;
265 ),
266
267 TP_printk("hva %llx gfn %llx level %u %s",
268 __entry->hva, __entry->gfn, __entry->level,
269 __entry->referenced ? "YOUNG" : "OLD")
270);
271
272#ifdef CONFIG_KVM_ASYNC_PF
273DECLARE_EVENT_CLASS(kvm_async_get_page_class,
274
275 TP_PROTO(u64 gva, u64 gfn),
276
277 TP_ARGS(gva, gfn),
278
279 TP_STRUCT__entry(
280 __field(__u64, gva)
281 __field(u64, gfn)
282 ),
283
284 TP_fast_assign(
285 __entry->gva = gva;
286 __entry->gfn = gfn;
287 ),
288
289 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
290);
291
292DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
293
294 TP_PROTO(u64 gva, u64 gfn),
295
296 TP_ARGS(gva, gfn)
297);
298
299DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
300
301 TP_PROTO(u64 gva, u64 gfn),
302
303 TP_ARGS(gva, gfn)
304);
305
306DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
307
308 TP_PROTO(u64 token, u64 gva),
309
310 TP_ARGS(token, gva),
311
312 TP_STRUCT__entry(
313 __field(__u64, token)
314 __field(__u64, gva)
315 ),
316
317 TP_fast_assign(
318 __entry->token = token;
319 __entry->gva = gva;
320 ),
321
322 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
323
324);
325
326DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
327
328 TP_PROTO(u64 token, u64 gva),
329
330 TP_ARGS(token, gva)
331);
332
333DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
334
335 TP_PROTO(u64 token, u64 gva),
336
337 TP_ARGS(token, gva)
338);
339
340TRACE_EVENT(
341 kvm_async_pf_completed,
342 TP_PROTO(unsigned long address, u64 gva),
343 TP_ARGS(address, gva),
344
345 TP_STRUCT__entry(
346 __field(unsigned long, address)
347 __field(u64, gva)
348 ),
349
350 TP_fast_assign(
351 __entry->address = address;
352 __entry->gva = gva;
353 ),
354
355 TP_printk("gva %#llx address %#lx", __entry->gva,
356 __entry->address)
357);
358
359#endif
360
361TRACE_EVENT(kvm_halt_poll_ns,
362 TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
363 unsigned int old),
364 TP_ARGS(grow, vcpu_id, new, old),
365
366 TP_STRUCT__entry(
367 __field(bool, grow)
368 __field(unsigned int, vcpu_id)
369 __field(unsigned int, new)
370 __field(unsigned int, old)
371 ),
372
373 TP_fast_assign(
374 __entry->grow = grow;
375 __entry->vcpu_id = vcpu_id;
376 __entry->new = new;
377 __entry->old = old;
378 ),
379
380 TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
381 __entry->vcpu_id,
382 __entry->new,
383 __entry->grow ? "grow" : "shrink",
384 __entry->old)
385);
386
387#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
388 trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
389#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
390 trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
391
392#endif /* _TRACE_KVM_MAIN_H */
393
394/* This part must be outside protection */
395#include <trace/define_trace.h>