Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_MAIN_H
4
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm
9
10#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
11
12#define kvm_trace_exit_reason \
13 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
14 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
15 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
16 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
17 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
18 ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
19 ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
20 ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
21
22TRACE_EVENT(kvm_userspace_exit,
23 TP_PROTO(__u32 reason, int errno),
24 TP_ARGS(reason, errno),
25
26 TP_STRUCT__entry(
27 __field( __u32, reason )
28 __field( int, errno )
29 ),
30
31 TP_fast_assign(
32 __entry->reason = reason;
33 __entry->errno = errno;
34 ),
35
36 TP_printk("reason %s (%d)",
37 __entry->errno < 0 ?
38 (__entry->errno == -EINTR ? "restart" : "error") :
39 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
40 __entry->errno < 0 ? -__entry->errno : __entry->reason)
41);
42
43TRACE_EVENT(kvm_vcpu_wakeup,
44 TP_PROTO(__u64 ns, bool waited, bool valid),
45 TP_ARGS(ns, waited, valid),
46
47 TP_STRUCT__entry(
48 __field( __u64, ns )
49 __field( bool, waited )
50 __field( bool, valid )
51 ),
52
53 TP_fast_assign(
54 __entry->ns = ns;
55 __entry->waited = waited;
56 __entry->valid = valid;
57 ),
58
59 TP_printk("%s time %lld ns, polling %s",
60 __entry->waited ? "wait" : "poll",
61 __entry->ns,
62 __entry->valid ? "valid" : "invalid")
63);
64
65#if defined(CONFIG_HAVE_KVM_IRQCHIP)
66TRACE_EVENT(kvm_set_irq,
67 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
68 TP_ARGS(gsi, level, irq_source_id),
69
70 TP_STRUCT__entry(
71 __field( unsigned int, gsi )
72 __field( int, level )
73 __field( int, irq_source_id )
74 ),
75
76 TP_fast_assign(
77 __entry->gsi = gsi;
78 __entry->level = level;
79 __entry->irq_source_id = irq_source_id;
80 ),
81
82 TP_printk("gsi %u level %d source %d",
83 __entry->gsi, __entry->level, __entry->irq_source_id)
84);
85#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
86
87#if defined(__KVM_HAVE_IOAPIC)
88#define kvm_deliver_mode \
89 {0x0, "Fixed"}, \
90 {0x1, "LowPrio"}, \
91 {0x2, "SMI"}, \
92 {0x3, "Res3"}, \
93 {0x4, "NMI"}, \
94 {0x5, "INIT"}, \
95 {0x6, "SIPI"}, \
96 {0x7, "ExtINT"}
97
98TRACE_EVENT(kvm_ioapic_set_irq,
99 TP_PROTO(__u64 e, int pin, bool coalesced),
100 TP_ARGS(e, pin, coalesced),
101
102 TP_STRUCT__entry(
103 __field( __u64, e )
104 __field( int, pin )
105 __field( bool, coalesced )
106 ),
107
108 TP_fast_assign(
109 __entry->e = e;
110 __entry->pin = pin;
111 __entry->coalesced = coalesced;
112 ),
113
114 TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
115 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
116 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
117 (__entry->e & (1<<11)) ? "logical" : "physical",
118 (__entry->e & (1<<15)) ? "level" : "edge",
119 (__entry->e & (1<<16)) ? "|masked" : "",
120 __entry->coalesced ? " (coalesced)" : "")
121);
122
123TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
124 TP_PROTO(__u64 e),
125 TP_ARGS(e),
126
127 TP_STRUCT__entry(
128 __field( __u64, e )
129 ),
130
131 TP_fast_assign(
132 __entry->e = e;
133 ),
134
135 TP_printk("dst %x vec %u (%s|%s|%s%s)",
136 (u8)(__entry->e >> 56), (u8)__entry->e,
137 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
138 (__entry->e & (1<<11)) ? "logical" : "physical",
139 (__entry->e & (1<<15)) ? "level" : "edge",
140 (__entry->e & (1<<16)) ? "|masked" : "")
141);
142
143TRACE_EVENT(kvm_msi_set_irq,
144 TP_PROTO(__u64 address, __u64 data),
145 TP_ARGS(address, data),
146
147 TP_STRUCT__entry(
148 __field( __u64, address )
149 __field( __u64, data )
150 ),
151
152 TP_fast_assign(
153 __entry->address = address;
154 __entry->data = data;
155 ),
156
157 TP_printk("dst %llx vec %u (%s|%s|%s%s)",
158 (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
159 (u8)__entry->data,
160 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
161 (__entry->address & (1<<2)) ? "logical" : "physical",
162 (__entry->data & (1<<15)) ? "level" : "edge",
163 (__entry->address & (1<<3)) ? "|rh" : "")
164);
165
166#define kvm_irqchips \
167 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
168 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
169 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
170
171#endif /* defined(__KVM_HAVE_IOAPIC) */
172
173#if defined(CONFIG_HAVE_KVM_IRQCHIP)
174
175#ifdef kvm_irqchips
176#define kvm_ack_irq_string "irqchip %s pin %u"
177#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
178#else
179#define kvm_ack_irq_string "irqchip %d pin %u"
180#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
181#endif
182
183TRACE_EVENT(kvm_ack_irq,
184 TP_PROTO(unsigned int irqchip, unsigned int pin),
185 TP_ARGS(irqchip, pin),
186
187 TP_STRUCT__entry(
188 __field( unsigned int, irqchip )
189 __field( unsigned int, pin )
190 ),
191
192 TP_fast_assign(
193 __entry->irqchip = irqchip;
194 __entry->pin = pin;
195 ),
196
197 TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
198);
199
200#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
201
202
203
204#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
205#define KVM_TRACE_MMIO_READ 1
206#define KVM_TRACE_MMIO_WRITE 2
207
208#define kvm_trace_symbol_mmio \
209 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
210 { KVM_TRACE_MMIO_READ, "read" }, \
211 { KVM_TRACE_MMIO_WRITE, "write" }
212
213TRACE_EVENT(kvm_mmio,
214 TP_PROTO(int type, int len, u64 gpa, void *val),
215 TP_ARGS(type, len, gpa, val),
216
217 TP_STRUCT__entry(
218 __field( u32, type )
219 __field( u32, len )
220 __field( u64, gpa )
221 __field( u64, val )
222 ),
223
224 TP_fast_assign(
225 __entry->type = type;
226 __entry->len = len;
227 __entry->gpa = gpa;
228 __entry->val = 0;
229 if (val)
230 memcpy(&__entry->val, val,
231 min_t(u32, sizeof(__entry->val), len));
232 ),
233
234 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
235 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
236 __entry->len, __entry->gpa, __entry->val)
237);
238
239#define KVM_TRACE_IOCSR_READ_UNSATISFIED 0
240#define KVM_TRACE_IOCSR_READ 1
241#define KVM_TRACE_IOCSR_WRITE 2
242
243#define kvm_trace_symbol_iocsr \
244 { KVM_TRACE_IOCSR_READ_UNSATISFIED, "unsatisfied-read" }, \
245 { KVM_TRACE_IOCSR_READ, "read" }, \
246 { KVM_TRACE_IOCSR_WRITE, "write" }
247
248TRACE_EVENT(kvm_iocsr,
249 TP_PROTO(int type, int len, u64 gpa, void *val),
250 TP_ARGS(type, len, gpa, val),
251
252 TP_STRUCT__entry(
253 __field( u32, type )
254 __field( u32, len )
255 __field( u64, gpa )
256 __field( u64, val )
257 ),
258
259 TP_fast_assign(
260 __entry->type = type;
261 __entry->len = len;
262 __entry->gpa = gpa;
263 __entry->val = 0;
264 if (val)
265 memcpy(&__entry->val, val,
266 min_t(u32, sizeof(__entry->val), len));
267 ),
268
269 TP_printk("iocsr %s len %u gpa 0x%llx val 0x%llx",
270 __print_symbolic(__entry->type, kvm_trace_symbol_iocsr),
271 __entry->len, __entry->gpa, __entry->val)
272);
273
274#define kvm_fpu_load_symbol \
275 {0, "unload"}, \
276 {1, "load"}
277
278TRACE_EVENT(kvm_fpu,
279 TP_PROTO(int load),
280 TP_ARGS(load),
281
282 TP_STRUCT__entry(
283 __field( u32, load )
284 ),
285
286 TP_fast_assign(
287 __entry->load = load;
288 ),
289
290 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
291);
292
293#ifdef CONFIG_KVM_ASYNC_PF
294DECLARE_EVENT_CLASS(kvm_async_get_page_class,
295
296 TP_PROTO(u64 gva, u64 gfn),
297
298 TP_ARGS(gva, gfn),
299
300 TP_STRUCT__entry(
301 __field(__u64, gva)
302 __field(u64, gfn)
303 ),
304
305 TP_fast_assign(
306 __entry->gva = gva;
307 __entry->gfn = gfn;
308 ),
309
310 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
311);
312
313DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
314
315 TP_PROTO(u64 gva, u64 gfn),
316
317 TP_ARGS(gva, gfn)
318);
319
320DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
321
322 TP_PROTO(u64 gva, u64 gfn),
323
324 TP_ARGS(gva, gfn)
325);
326
327DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
328
329 TP_PROTO(u64 token, u64 gva),
330
331 TP_ARGS(token, gva),
332
333 TP_STRUCT__entry(
334 __field(__u64, token)
335 __field(__u64, gva)
336 ),
337
338 TP_fast_assign(
339 __entry->token = token;
340 __entry->gva = gva;
341 ),
342
343 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
344
345);
346
347DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
348
349 TP_PROTO(u64 token, u64 gva),
350
351 TP_ARGS(token, gva)
352);
353
354DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
355
356 TP_PROTO(u64 token, u64 gva),
357
358 TP_ARGS(token, gva)
359);
360
361TRACE_EVENT(
362 kvm_async_pf_completed,
363 TP_PROTO(unsigned long address, u64 gva),
364 TP_ARGS(address, gva),
365
366 TP_STRUCT__entry(
367 __field(unsigned long, address)
368 __field(u64, gva)
369 ),
370
371 TP_fast_assign(
372 __entry->address = address;
373 __entry->gva = gva;
374 ),
375
376 TP_printk("gva %#llx address %#lx", __entry->gva,
377 __entry->address)
378);
379
380#endif
381
382TRACE_EVENT(kvm_halt_poll_ns,
383 TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
384 unsigned int old),
385 TP_ARGS(grow, vcpu_id, new, old),
386
387 TP_STRUCT__entry(
388 __field(bool, grow)
389 __field(unsigned int, vcpu_id)
390 __field(unsigned int, new)
391 __field(unsigned int, old)
392 ),
393
394 TP_fast_assign(
395 __entry->grow = grow;
396 __entry->vcpu_id = vcpu_id;
397 __entry->new = new;
398 __entry->old = old;
399 ),
400
401 TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
402 __entry->vcpu_id,
403 __entry->new,
404 __entry->grow ? "grow" : "shrink",
405 __entry->old)
406);
407
408#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
409 trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
410#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
411 trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
412
413TRACE_EVENT(kvm_dirty_ring_push,
414 TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
415 TP_ARGS(ring, slot, offset),
416
417 TP_STRUCT__entry(
418 __field(int, index)
419 __field(u32, dirty_index)
420 __field(u32, reset_index)
421 __field(u32, slot)
422 __field(u64, offset)
423 ),
424
425 TP_fast_assign(
426 __entry->index = ring->index;
427 __entry->dirty_index = ring->dirty_index;
428 __entry->reset_index = ring->reset_index;
429 __entry->slot = slot;
430 __entry->offset = offset;
431 ),
432
433 TP_printk("ring %d: dirty 0x%x reset 0x%x "
434 "slot %u offset 0x%llx (used %u)",
435 __entry->index, __entry->dirty_index,
436 __entry->reset_index, __entry->slot, __entry->offset,
437 __entry->dirty_index - __entry->reset_index)
438);
439
440TRACE_EVENT(kvm_dirty_ring_reset,
441 TP_PROTO(struct kvm_dirty_ring *ring),
442 TP_ARGS(ring),
443
444 TP_STRUCT__entry(
445 __field(int, index)
446 __field(u32, dirty_index)
447 __field(u32, reset_index)
448 ),
449
450 TP_fast_assign(
451 __entry->index = ring->index;
452 __entry->dirty_index = ring->dirty_index;
453 __entry->reset_index = ring->reset_index;
454 ),
455
456 TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
457 __entry->index, __entry->dirty_index, __entry->reset_index,
458 __entry->dirty_index - __entry->reset_index)
459);
460
461TRACE_EVENT(kvm_dirty_ring_exit,
462 TP_PROTO(struct kvm_vcpu *vcpu),
463 TP_ARGS(vcpu),
464
465 TP_STRUCT__entry(
466 __field(int, vcpu_id)
467 ),
468
469 TP_fast_assign(
470 __entry->vcpu_id = vcpu->vcpu_id;
471 ),
472
473 TP_printk("vcpu %d", __entry->vcpu_id)
474);
475
476TRACE_EVENT(kvm_unmap_hva_range,
477 TP_PROTO(unsigned long start, unsigned long end),
478 TP_ARGS(start, end),
479
480 TP_STRUCT__entry(
481 __field( unsigned long, start )
482 __field( unsigned long, end )
483 ),
484
485 TP_fast_assign(
486 __entry->start = start;
487 __entry->end = end;
488 ),
489
490 TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
491 __entry->start, __entry->end)
492);
493
494TRACE_EVENT(kvm_age_hva,
495 TP_PROTO(unsigned long start, unsigned long end),
496 TP_ARGS(start, end),
497
498 TP_STRUCT__entry(
499 __field( unsigned long, start )
500 __field( unsigned long, end )
501 ),
502
503 TP_fast_assign(
504 __entry->start = start;
505 __entry->end = end;
506 ),
507
508 TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
509 __entry->start, __entry->end)
510);
511
512TRACE_EVENT(kvm_test_age_hva,
513 TP_PROTO(unsigned long hva),
514 TP_ARGS(hva),
515
516 TP_STRUCT__entry(
517 __field( unsigned long, hva )
518 ),
519
520 TP_fast_assign(
521 __entry->hva = hva;
522 ),
523
524 TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
525);
526
527#endif /* _TRACE_KVM_MAIN_H */
528
529/* This part must be outside protection */
530#include <trace/define_trace.h>
1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_MAIN_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10
11#define kvm_trace_exit_reason \
12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
17 ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
18
19TRACE_EVENT(kvm_userspace_exit,
20 TP_PROTO(__u32 reason, int errno),
21 TP_ARGS(reason, errno),
22
23 TP_STRUCT__entry(
24 __field( __u32, reason )
25 __field( int, errno )
26 ),
27
28 TP_fast_assign(
29 __entry->reason = reason;
30 __entry->errno = errno;
31 ),
32
33 TP_printk("reason %s (%d)",
34 __entry->errno < 0 ?
35 (__entry->errno == -EINTR ? "restart" : "error") :
36 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
37 __entry->errno < 0 ? -__entry->errno : __entry->reason)
38);
39
40TRACE_EVENT(kvm_vcpu_wakeup,
41 TP_PROTO(__u64 ns, bool waited, bool valid),
42 TP_ARGS(ns, waited, valid),
43
44 TP_STRUCT__entry(
45 __field( __u64, ns )
46 __field( bool, waited )
47 __field( bool, valid )
48 ),
49
50 TP_fast_assign(
51 __entry->ns = ns;
52 __entry->waited = waited;
53 __entry->valid = valid;
54 ),
55
56 TP_printk("%s time %lld ns, polling %s",
57 __entry->waited ? "wait" : "poll",
58 __entry->ns,
59 __entry->valid ? "valid" : "invalid")
60);
61
62#if defined(CONFIG_HAVE_KVM_IRQFD)
63TRACE_EVENT(kvm_set_irq,
64 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
65 TP_ARGS(gsi, level, irq_source_id),
66
67 TP_STRUCT__entry(
68 __field( unsigned int, gsi )
69 __field( int, level )
70 __field( int, irq_source_id )
71 ),
72
73 TP_fast_assign(
74 __entry->gsi = gsi;
75 __entry->level = level;
76 __entry->irq_source_id = irq_source_id;
77 ),
78
79 TP_printk("gsi %u level %d source %d",
80 __entry->gsi, __entry->level, __entry->irq_source_id)
81);
82#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
83
84#if defined(__KVM_HAVE_IOAPIC)
85#define kvm_deliver_mode \
86 {0x0, "Fixed"}, \
87 {0x1, "LowPrio"}, \
88 {0x2, "SMI"}, \
89 {0x3, "Res3"}, \
90 {0x4, "NMI"}, \
91 {0x5, "INIT"}, \
92 {0x6, "SIPI"}, \
93 {0x7, "ExtINT"}
94
95TRACE_EVENT(kvm_ioapic_set_irq,
96 TP_PROTO(__u64 e, int pin, bool coalesced),
97 TP_ARGS(e, pin, coalesced),
98
99 TP_STRUCT__entry(
100 __field( __u64, e )
101 __field( int, pin )
102 __field( bool, coalesced )
103 ),
104
105 TP_fast_assign(
106 __entry->e = e;
107 __entry->pin = pin;
108 __entry->coalesced = coalesced;
109 ),
110
111 TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
112 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
113 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
114 (__entry->e & (1<<11)) ? "logical" : "physical",
115 (__entry->e & (1<<15)) ? "level" : "edge",
116 (__entry->e & (1<<16)) ? "|masked" : "",
117 __entry->coalesced ? " (coalesced)" : "")
118);
119
120TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
121 TP_PROTO(__u64 e),
122 TP_ARGS(e),
123
124 TP_STRUCT__entry(
125 __field( __u64, e )
126 ),
127
128 TP_fast_assign(
129 __entry->e = e;
130 ),
131
132 TP_printk("dst %x vec %u (%s|%s|%s%s)",
133 (u8)(__entry->e >> 56), (u8)__entry->e,
134 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
135 (__entry->e & (1<<11)) ? "logical" : "physical",
136 (__entry->e & (1<<15)) ? "level" : "edge",
137 (__entry->e & (1<<16)) ? "|masked" : "")
138);
139
140TRACE_EVENT(kvm_msi_set_irq,
141 TP_PROTO(__u64 address, __u64 data),
142 TP_ARGS(address, data),
143
144 TP_STRUCT__entry(
145 __field( __u64, address )
146 __field( __u64, data )
147 ),
148
149 TP_fast_assign(
150 __entry->address = address;
151 __entry->data = data;
152 ),
153
154 TP_printk("dst %llx vec %u (%s|%s|%s%s)",
155 (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
156 (u8)__entry->data,
157 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
158 (__entry->address & (1<<2)) ? "logical" : "physical",
159 (__entry->data & (1<<15)) ? "level" : "edge",
160 (__entry->address & (1<<3)) ? "|rh" : "")
161);
162
163#define kvm_irqchips \
164 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
165 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
166 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
167
168#endif /* defined(__KVM_HAVE_IOAPIC) */
169
170#if defined(CONFIG_HAVE_KVM_IRQFD)
171
172#ifdef kvm_irqchips
173#define kvm_ack_irq_string "irqchip %s pin %u"
174#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
175#else
176#define kvm_ack_irq_string "irqchip %d pin %u"
177#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
178#endif
179
180TRACE_EVENT(kvm_ack_irq,
181 TP_PROTO(unsigned int irqchip, unsigned int pin),
182 TP_ARGS(irqchip, pin),
183
184 TP_STRUCT__entry(
185 __field( unsigned int, irqchip )
186 __field( unsigned int, pin )
187 ),
188
189 TP_fast_assign(
190 __entry->irqchip = irqchip;
191 __entry->pin = pin;
192 ),
193
194 TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
195);
196
197#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
198
199
200
201#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
202#define KVM_TRACE_MMIO_READ 1
203#define KVM_TRACE_MMIO_WRITE 2
204
205#define kvm_trace_symbol_mmio \
206 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
207 { KVM_TRACE_MMIO_READ, "read" }, \
208 { KVM_TRACE_MMIO_WRITE, "write" }
209
210TRACE_EVENT(kvm_mmio,
211 TP_PROTO(int type, int len, u64 gpa, u64 val),
212 TP_ARGS(type, len, gpa, val),
213
214 TP_STRUCT__entry(
215 __field( u32, type )
216 __field( u32, len )
217 __field( u64, gpa )
218 __field( u64, val )
219 ),
220
221 TP_fast_assign(
222 __entry->type = type;
223 __entry->len = len;
224 __entry->gpa = gpa;
225 __entry->val = val;
226 ),
227
228 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
229 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
230 __entry->len, __entry->gpa, __entry->val)
231);
232
233#define kvm_fpu_load_symbol \
234 {0, "unload"}, \
235 {1, "load"}
236
237TRACE_EVENT(kvm_fpu,
238 TP_PROTO(int load),
239 TP_ARGS(load),
240
241 TP_STRUCT__entry(
242 __field( u32, load )
243 ),
244
245 TP_fast_assign(
246 __entry->load = load;
247 ),
248
249 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
250);
251
252TRACE_EVENT(kvm_age_page,
253 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
254 TP_ARGS(gfn, level, slot, ref),
255
256 TP_STRUCT__entry(
257 __field( u64, hva )
258 __field( u64, gfn )
259 __field( u8, level )
260 __field( u8, referenced )
261 ),
262
263 TP_fast_assign(
264 __entry->gfn = gfn;
265 __entry->level = level;
266 __entry->hva = ((gfn - slot->base_gfn) <<
267 PAGE_SHIFT) + slot->userspace_addr;
268 __entry->referenced = ref;
269 ),
270
271 TP_printk("hva %llx gfn %llx level %u %s",
272 __entry->hva, __entry->gfn, __entry->level,
273 __entry->referenced ? "YOUNG" : "OLD")
274);
275
276#ifdef CONFIG_KVM_ASYNC_PF
277DECLARE_EVENT_CLASS(kvm_async_get_page_class,
278
279 TP_PROTO(u64 gva, u64 gfn),
280
281 TP_ARGS(gva, gfn),
282
283 TP_STRUCT__entry(
284 __field(__u64, gva)
285 __field(u64, gfn)
286 ),
287
288 TP_fast_assign(
289 __entry->gva = gva;
290 __entry->gfn = gfn;
291 ),
292
293 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
294);
295
296DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
297
298 TP_PROTO(u64 gva, u64 gfn),
299
300 TP_ARGS(gva, gfn)
301);
302
303DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
304
305 TP_PROTO(u64 gva, u64 gfn),
306
307 TP_ARGS(gva, gfn)
308);
309
310DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
311
312 TP_PROTO(u64 token, u64 gva),
313
314 TP_ARGS(token, gva),
315
316 TP_STRUCT__entry(
317 __field(__u64, token)
318 __field(__u64, gva)
319 ),
320
321 TP_fast_assign(
322 __entry->token = token;
323 __entry->gva = gva;
324 ),
325
326 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
327
328);
329
330DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
331
332 TP_PROTO(u64 token, u64 gva),
333
334 TP_ARGS(token, gva)
335);
336
337DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
338
339 TP_PROTO(u64 token, u64 gva),
340
341 TP_ARGS(token, gva)
342);
343
344TRACE_EVENT(
345 kvm_async_pf_completed,
346 TP_PROTO(unsigned long address, u64 gva),
347 TP_ARGS(address, gva),
348
349 TP_STRUCT__entry(
350 __field(unsigned long, address)
351 __field(u64, gva)
352 ),
353
354 TP_fast_assign(
355 __entry->address = address;
356 __entry->gva = gva;
357 ),
358
359 TP_printk("gva %#llx address %#lx", __entry->gva,
360 __entry->address)
361);
362
363#endif
364
365TRACE_EVENT(kvm_halt_poll_ns,
366 TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
367 unsigned int old),
368 TP_ARGS(grow, vcpu_id, new, old),
369
370 TP_STRUCT__entry(
371 __field(bool, grow)
372 __field(unsigned int, vcpu_id)
373 __field(unsigned int, new)
374 __field(unsigned int, old)
375 ),
376
377 TP_fast_assign(
378 __entry->grow = grow;
379 __entry->vcpu_id = vcpu_id;
380 __entry->new = new;
381 __entry->old = old;
382 ),
383
384 TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
385 __entry->vcpu_id,
386 __entry->new,
387 __entry->grow ? "grow" : "shrink",
388 __entry->old)
389);
390
391#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
392 trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
393#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
394 trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
395
396#endif /* _TRACE_KVM_MAIN_H */
397
398/* This part must be outside protection */
399#include <trace/define_trace.h>