Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
  3#define _TRACE_KVM_MAIN_H
  4
  5#include <linux/tracepoint.h>
  6
  7#undef TRACE_SYSTEM
  8#define TRACE_SYSTEM kvm
  9
 10#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
 11
 12#define kvm_trace_exit_reason						\
 13	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
 14	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
 15	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
 16	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
 17	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\
 18	ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
 19	ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI),          \
 20	ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
 21
 22TRACE_EVENT(kvm_userspace_exit,
 23	    TP_PROTO(__u32 reason, int errno),
 24	    TP_ARGS(reason, errno),
 25
 26	TP_STRUCT__entry(
 27		__field(	__u32,		reason		)
 28		__field(	int,		errno		)
 29	),
 30
 31	TP_fast_assign(
 32		__entry->reason		= reason;
 33		__entry->errno		= errno;
 34	),
 35
 36	TP_printk("reason %s (%d)",
 37		  __entry->errno < 0 ?
 38		  (__entry->errno == -EINTR ? "restart" : "error") :
 39		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
 40		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
 41);
 42
 43TRACE_EVENT(kvm_vcpu_wakeup,
 44	    TP_PROTO(__u64 ns, bool waited, bool valid),
 45	    TP_ARGS(ns, waited, valid),
 46
 47	TP_STRUCT__entry(
 48		__field(	__u64,		ns		)
 49		__field(	bool,		waited		)
 50		__field(	bool,		valid		)
 51	),
 52
 53	TP_fast_assign(
 54		__entry->ns		= ns;
 55		__entry->waited		= waited;
 56		__entry->valid		= valid;
 57	),
 58
 59	TP_printk("%s time %lld ns, polling %s",
 60		  __entry->waited ? "wait" : "poll",
 61		  __entry->ns,
 62		  __entry->valid ? "valid" : "invalid")
 63);
 64
 65#if defined(CONFIG_HAVE_KVM_IRQFD)
 66TRACE_EVENT(kvm_set_irq,
 67	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
 68	TP_ARGS(gsi, level, irq_source_id),
 69
 70	TP_STRUCT__entry(
 71		__field(	unsigned int,	gsi		)
 72		__field(	int,		level		)
 73		__field(	int,		irq_source_id	)
 74	),
 75
 76	TP_fast_assign(
 77		__entry->gsi		= gsi;
 78		__entry->level		= level;
 79		__entry->irq_source_id	= irq_source_id;
 80	),
 81
 82	TP_printk("gsi %u level %d source %d",
 83		  __entry->gsi, __entry->level, __entry->irq_source_id)
 84);
 85#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
 86
 87#if defined(__KVM_HAVE_IOAPIC)
 88#define kvm_deliver_mode		\
 89	{0x0, "Fixed"},			\
 90	{0x1, "LowPrio"},		\
 91	{0x2, "SMI"},			\
 92	{0x3, "Res3"},			\
 93	{0x4, "NMI"},			\
 94	{0x5, "INIT"},			\
 95	{0x6, "SIPI"},			\
 96	{0x7, "ExtINT"}
 97
 98TRACE_EVENT(kvm_ioapic_set_irq,
 99	    TP_PROTO(__u64 e, int pin, bool coalesced),
100	    TP_ARGS(e, pin, coalesced),
101
102	TP_STRUCT__entry(
103		__field(	__u64,		e		)
104		__field(	int,		pin		)
105		__field(	bool,		coalesced	)
106	),
107
108	TP_fast_assign(
109		__entry->e		= e;
110		__entry->pin		= pin;
111		__entry->coalesced	= coalesced;
112	),
113
114	TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
115		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
116		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
117		  (__entry->e & (1<<11)) ? "logical" : "physical",
118		  (__entry->e & (1<<15)) ? "level" : "edge",
119		  (__entry->e & (1<<16)) ? "|masked" : "",
120		  __entry->coalesced ? " (coalesced)" : "")
121);
122
123TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
124	    TP_PROTO(__u64 e),
125	    TP_ARGS(e),
126
127	TP_STRUCT__entry(
128		__field(	__u64,		e		)
129	),
130
131	TP_fast_assign(
132		__entry->e		= e;
133	),
134
135	TP_printk("dst %x vec %u (%s|%s|%s%s)",
136		  (u8)(__entry->e >> 56), (u8)__entry->e,
137		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
138		  (__entry->e & (1<<11)) ? "logical" : "physical",
139		  (__entry->e & (1<<15)) ? "level" : "edge",
140		  (__entry->e & (1<<16)) ? "|masked" : "")
141);
142
143TRACE_EVENT(kvm_msi_set_irq,
144	    TP_PROTO(__u64 address, __u64 data),
145	    TP_ARGS(address, data),
146
147	TP_STRUCT__entry(
148		__field(	__u64,		address		)
149		__field(	__u64,		data		)
150	),
151
152	TP_fast_assign(
153		__entry->address	= address;
154		__entry->data		= data;
155	),
156
157	TP_printk("dst %llx vec %u (%s|%s|%s%s)",
158		  (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
159		  (u8)__entry->data,
160		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
161		  (__entry->address & (1<<2)) ? "logical" : "physical",
162		  (__entry->data & (1<<15)) ? "level" : "edge",
163		  (__entry->address & (1<<3)) ? "|rh" : "")
164);
165
166#define kvm_irqchips						\
167	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
168	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
169	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
170
171#endif /* defined(__KVM_HAVE_IOAPIC) */
172
173#if defined(CONFIG_HAVE_KVM_IRQFD)
174
175#ifdef kvm_irqchips
176#define kvm_ack_irq_string "irqchip %s pin %u"
177#define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
178#else
179#define kvm_ack_irq_string "irqchip %d pin %u"
180#define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
181#endif
182
183TRACE_EVENT(kvm_ack_irq,
184	TP_PROTO(unsigned int irqchip, unsigned int pin),
185	TP_ARGS(irqchip, pin),
186
187	TP_STRUCT__entry(
188		__field(	unsigned int,	irqchip		)
189		__field(	unsigned int,	pin		)
190	),
191
192	TP_fast_assign(
193		__entry->irqchip	= irqchip;
194		__entry->pin		= pin;
195	),
196
197	TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
198);
199
200#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
201
202
203
204#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
205#define KVM_TRACE_MMIO_READ 1
206#define KVM_TRACE_MMIO_WRITE 2
207
208#define kvm_trace_symbol_mmio \
209	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
210	{ KVM_TRACE_MMIO_READ, "read" }, \
211	{ KVM_TRACE_MMIO_WRITE, "write" }
212
213TRACE_EVENT(kvm_mmio,
214	TP_PROTO(int type, int len, u64 gpa, void *val),
215	TP_ARGS(type, len, gpa, val),
216
217	TP_STRUCT__entry(
218		__field(	u32,	type		)
219		__field(	u32,	len		)
220		__field(	u64,	gpa		)
221		__field(	u64,	val		)
222	),
223
224	TP_fast_assign(
225		__entry->type		= type;
226		__entry->len		= len;
227		__entry->gpa		= gpa;
228		__entry->val		= 0;
229		if (val)
230			memcpy(&__entry->val, val,
231			       min_t(u32, sizeof(__entry->val), len));
232	),
233
234	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
235		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
236		  __entry->len, __entry->gpa, __entry->val)
237);
238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239#define kvm_fpu_load_symbol	\
240	{0, "unload"},		\
241	{1, "load"}
242
243TRACE_EVENT(kvm_fpu,
244	TP_PROTO(int load),
245	TP_ARGS(load),
246
247	TP_STRUCT__entry(
248		__field(	u32,	        load		)
249	),
250
251	TP_fast_assign(
252		__entry->load		= load;
253	),
254
255	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
256);
257
258#ifdef CONFIG_KVM_ASYNC_PF
259DECLARE_EVENT_CLASS(kvm_async_get_page_class,
260
261	TP_PROTO(u64 gva, u64 gfn),
262
263	TP_ARGS(gva, gfn),
264
265	TP_STRUCT__entry(
266		__field(__u64, gva)
267		__field(u64, gfn)
268	),
269
270	TP_fast_assign(
271		__entry->gva = gva;
272		__entry->gfn = gfn;
273	),
274
275	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
276);
277
278DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
279
280	TP_PROTO(u64 gva, u64 gfn),
281
282	TP_ARGS(gva, gfn)
283);
284
285DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
286
287	TP_PROTO(u64 gva, u64 gfn),
288
289	TP_ARGS(gva, gfn)
290);
291
292DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
293
294	TP_PROTO(u64 token, u64 gva),
295
296	TP_ARGS(token, gva),
297
298	TP_STRUCT__entry(
299		__field(__u64, token)
300		__field(__u64, gva)
301	),
302
303	TP_fast_assign(
304		__entry->token = token;
305		__entry->gva = gva;
306	),
307
308	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
309
310);
311
312DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
313
314	TP_PROTO(u64 token, u64 gva),
315
316	TP_ARGS(token, gva)
317);
318
319DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
320
321	TP_PROTO(u64 token, u64 gva),
322
323	TP_ARGS(token, gva)
324);
325
326TRACE_EVENT(
327	kvm_async_pf_completed,
328	TP_PROTO(unsigned long address, u64 gva),
329	TP_ARGS(address, gva),
330
331	TP_STRUCT__entry(
332		__field(unsigned long, address)
333		__field(u64, gva)
334		),
335
336	TP_fast_assign(
337		__entry->address = address;
338		__entry->gva = gva;
339		),
340
341	TP_printk("gva %#llx address %#lx",  __entry->gva,
342		  __entry->address)
343);
344
345#endif
346
347TRACE_EVENT(kvm_halt_poll_ns,
348	TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
349		 unsigned int old),
350	TP_ARGS(grow, vcpu_id, new, old),
351
352	TP_STRUCT__entry(
353		__field(bool, grow)
354		__field(unsigned int, vcpu_id)
355		__field(unsigned int, new)
356		__field(unsigned int, old)
357	),
358
359	TP_fast_assign(
360		__entry->grow           = grow;
361		__entry->vcpu_id        = vcpu_id;
362		__entry->new            = new;
363		__entry->old            = old;
364	),
365
366	TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
367			__entry->vcpu_id,
368			__entry->new,
369			__entry->grow ? "grow" : "shrink",
370			__entry->old)
371);
372
373#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
374	trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
375#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
376	trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
377
378TRACE_EVENT(kvm_dirty_ring_push,
379	TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
380	TP_ARGS(ring, slot, offset),
381
382	TP_STRUCT__entry(
383		__field(int, index)
384		__field(u32, dirty_index)
385		__field(u32, reset_index)
386		__field(u32, slot)
387		__field(u64, offset)
388	),
389
390	TP_fast_assign(
391		__entry->index          = ring->index;
392		__entry->dirty_index    = ring->dirty_index;
393		__entry->reset_index    = ring->reset_index;
394		__entry->slot           = slot;
395		__entry->offset         = offset;
396	),
397
398	TP_printk("ring %d: dirty 0x%x reset 0x%x "
399		  "slot %u offset 0x%llx (used %u)",
400		  __entry->index, __entry->dirty_index,
401		  __entry->reset_index,  __entry->slot, __entry->offset,
402		  __entry->dirty_index - __entry->reset_index)
403);
404
405TRACE_EVENT(kvm_dirty_ring_reset,
406	TP_PROTO(struct kvm_dirty_ring *ring),
407	TP_ARGS(ring),
408
409	TP_STRUCT__entry(
410		__field(int, index)
411		__field(u32, dirty_index)
412		__field(u32, reset_index)
413	),
414
415	TP_fast_assign(
416		__entry->index          = ring->index;
417		__entry->dirty_index    = ring->dirty_index;
418		__entry->reset_index    = ring->reset_index;
419	),
420
421	TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
422		  __entry->index, __entry->dirty_index, __entry->reset_index,
423		  __entry->dirty_index - __entry->reset_index)
424);
425
426TRACE_EVENT(kvm_dirty_ring_exit,
427	TP_PROTO(struct kvm_vcpu *vcpu),
428	TP_ARGS(vcpu),
429
430	TP_STRUCT__entry(
431	    __field(int, vcpu_id)
432	),
433
434	TP_fast_assign(
435	    __entry->vcpu_id = vcpu->vcpu_id;
436	),
437
438	TP_printk("vcpu %d", __entry->vcpu_id)
439);
440
441TRACE_EVENT(kvm_unmap_hva_range,
442	TP_PROTO(unsigned long start, unsigned long end),
443	TP_ARGS(start, end),
444
445	TP_STRUCT__entry(
446		__field(	unsigned long,	start		)
447		__field(	unsigned long,	end		)
448	),
449
450	TP_fast_assign(
451		__entry->start		= start;
452		__entry->end		= end;
453	),
454
455	TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
456		  __entry->start, __entry->end)
457);
458
459TRACE_EVENT(kvm_set_spte_hva,
460	TP_PROTO(unsigned long hva),
461	TP_ARGS(hva),
462
463	TP_STRUCT__entry(
464		__field(	unsigned long,	hva		)
465	),
466
467	TP_fast_assign(
468		__entry->hva		= hva;
469	),
470
471	TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
472);
473
474TRACE_EVENT(kvm_age_hva,
475	TP_PROTO(unsigned long start, unsigned long end),
476	TP_ARGS(start, end),
477
478	TP_STRUCT__entry(
479		__field(	unsigned long,	start		)
480		__field(	unsigned long,	end		)
481	),
482
483	TP_fast_assign(
484		__entry->start		= start;
485		__entry->end		= end;
486	),
487
488	TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
489		  __entry->start, __entry->end)
490);
491
492TRACE_EVENT(kvm_test_age_hva,
493	TP_PROTO(unsigned long hva),
494	TP_ARGS(hva),
495
496	TP_STRUCT__entry(
497		__field(	unsigned long,	hva		)
498	),
499
500	TP_fast_assign(
501		__entry->hva		= hva;
502	),
503
504	TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
505);
506
507#endif /* _TRACE_KVM_MAIN_H */
508
509/* This part must be outside protection */
510#include <trace/define_trace.h>
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
  3#define _TRACE_KVM_MAIN_H
  4
  5#include <linux/tracepoint.h>
  6
  7#undef TRACE_SYSTEM
  8#define TRACE_SYSTEM kvm
  9
 10#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
 11
 12#define kvm_trace_exit_reason						\
 13	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
 14	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
 15	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
 16	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
 17	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\
 18	ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
 19	ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI),          \
 20	ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
 21
 22TRACE_EVENT(kvm_userspace_exit,
 23	    TP_PROTO(__u32 reason, int errno),
 24	    TP_ARGS(reason, errno),
 25
 26	TP_STRUCT__entry(
 27		__field(	__u32,		reason		)
 28		__field(	int,		errno		)
 29	),
 30
 31	TP_fast_assign(
 32		__entry->reason		= reason;
 33		__entry->errno		= errno;
 34	),
 35
 36	TP_printk("reason %s (%d)",
 37		  __entry->errno < 0 ?
 38		  (__entry->errno == -EINTR ? "restart" : "error") :
 39		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
 40		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
 41);
 42
 43TRACE_EVENT(kvm_vcpu_wakeup,
 44	    TP_PROTO(__u64 ns, bool waited, bool valid),
 45	    TP_ARGS(ns, waited, valid),
 46
 47	TP_STRUCT__entry(
 48		__field(	__u64,		ns		)
 49		__field(	bool,		waited		)
 50		__field(	bool,		valid		)
 51	),
 52
 53	TP_fast_assign(
 54		__entry->ns		= ns;
 55		__entry->waited		= waited;
 56		__entry->valid		= valid;
 57	),
 58
 59	TP_printk("%s time %lld ns, polling %s",
 60		  __entry->waited ? "wait" : "poll",
 61		  __entry->ns,
 62		  __entry->valid ? "valid" : "invalid")
 63);
 64
 65#if defined(CONFIG_HAVE_KVM_IRQCHIP)
 66TRACE_EVENT(kvm_set_irq,
 67	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
 68	TP_ARGS(gsi, level, irq_source_id),
 69
 70	TP_STRUCT__entry(
 71		__field(	unsigned int,	gsi		)
 72		__field(	int,		level		)
 73		__field(	int,		irq_source_id	)
 74	),
 75
 76	TP_fast_assign(
 77		__entry->gsi		= gsi;
 78		__entry->level		= level;
 79		__entry->irq_source_id	= irq_source_id;
 80	),
 81
 82	TP_printk("gsi %u level %d source %d",
 83		  __entry->gsi, __entry->level, __entry->irq_source_id)
 84);
 85#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
 86
 87#if defined(__KVM_HAVE_IOAPIC)
 88#define kvm_deliver_mode		\
 89	{0x0, "Fixed"},			\
 90	{0x1, "LowPrio"},		\
 91	{0x2, "SMI"},			\
 92	{0x3, "Res3"},			\
 93	{0x4, "NMI"},			\
 94	{0x5, "INIT"},			\
 95	{0x6, "SIPI"},			\
 96	{0x7, "ExtINT"}
 97
 98TRACE_EVENT(kvm_ioapic_set_irq,
 99	    TP_PROTO(__u64 e, int pin, bool coalesced),
100	    TP_ARGS(e, pin, coalesced),
101
102	TP_STRUCT__entry(
103		__field(	__u64,		e		)
104		__field(	int,		pin		)
105		__field(	bool,		coalesced	)
106	),
107
108	TP_fast_assign(
109		__entry->e		= e;
110		__entry->pin		= pin;
111		__entry->coalesced	= coalesced;
112	),
113
114	TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
115		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
116		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
117		  (__entry->e & (1<<11)) ? "logical" : "physical",
118		  (__entry->e & (1<<15)) ? "level" : "edge",
119		  (__entry->e & (1<<16)) ? "|masked" : "",
120		  __entry->coalesced ? " (coalesced)" : "")
121);
122
123TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
124	    TP_PROTO(__u64 e),
125	    TP_ARGS(e),
126
127	TP_STRUCT__entry(
128		__field(	__u64,		e		)
129	),
130
131	TP_fast_assign(
132		__entry->e		= e;
133	),
134
135	TP_printk("dst %x vec %u (%s|%s|%s%s)",
136		  (u8)(__entry->e >> 56), (u8)__entry->e,
137		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
138		  (__entry->e & (1<<11)) ? "logical" : "physical",
139		  (__entry->e & (1<<15)) ? "level" : "edge",
140		  (__entry->e & (1<<16)) ? "|masked" : "")
141);
142
143TRACE_EVENT(kvm_msi_set_irq,
144	    TP_PROTO(__u64 address, __u64 data),
145	    TP_ARGS(address, data),
146
147	TP_STRUCT__entry(
148		__field(	__u64,		address		)
149		__field(	__u64,		data		)
150	),
151
152	TP_fast_assign(
153		__entry->address	= address;
154		__entry->data		= data;
155	),
156
157	TP_printk("dst %llx vec %u (%s|%s|%s%s)",
158		  (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
159		  (u8)__entry->data,
160		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
161		  (__entry->address & (1<<2)) ? "logical" : "physical",
162		  (__entry->data & (1<<15)) ? "level" : "edge",
163		  (__entry->address & (1<<3)) ? "|rh" : "")
164);
165
166#define kvm_irqchips						\
167	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
168	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
169	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
170
171#endif /* defined(__KVM_HAVE_IOAPIC) */
172
173#if defined(CONFIG_HAVE_KVM_IRQCHIP)
174
175#ifdef kvm_irqchips
176#define kvm_ack_irq_string "irqchip %s pin %u"
177#define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
178#else
179#define kvm_ack_irq_string "irqchip %d pin %u"
180#define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
181#endif
182
183TRACE_EVENT(kvm_ack_irq,
184	TP_PROTO(unsigned int irqchip, unsigned int pin),
185	TP_ARGS(irqchip, pin),
186
187	TP_STRUCT__entry(
188		__field(	unsigned int,	irqchip		)
189		__field(	unsigned int,	pin		)
190	),
191
192	TP_fast_assign(
193		__entry->irqchip	= irqchip;
194		__entry->pin		= pin;
195	),
196
197	TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
198);
199
200#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
201
202
203
204#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
205#define KVM_TRACE_MMIO_READ 1
206#define KVM_TRACE_MMIO_WRITE 2
207
208#define kvm_trace_symbol_mmio \
209	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
210	{ KVM_TRACE_MMIO_READ, "read" }, \
211	{ KVM_TRACE_MMIO_WRITE, "write" }
212
213TRACE_EVENT(kvm_mmio,
214	TP_PROTO(int type, int len, u64 gpa, void *val),
215	TP_ARGS(type, len, gpa, val),
216
217	TP_STRUCT__entry(
218		__field(	u32,	type		)
219		__field(	u32,	len		)
220		__field(	u64,	gpa		)
221		__field(	u64,	val		)
222	),
223
224	TP_fast_assign(
225		__entry->type		= type;
226		__entry->len		= len;
227		__entry->gpa		= gpa;
228		__entry->val		= 0;
229		if (val)
230			memcpy(&__entry->val, val,
231			       min_t(u32, sizeof(__entry->val), len));
232	),
233
234	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
235		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
236		  __entry->len, __entry->gpa, __entry->val)
237);
238
239#define KVM_TRACE_IOCSR_READ_UNSATISFIED 0
240#define KVM_TRACE_IOCSR_READ 1
241#define KVM_TRACE_IOCSR_WRITE 2
242
243#define kvm_trace_symbol_iocsr \
244	{ KVM_TRACE_IOCSR_READ_UNSATISFIED, "unsatisfied-read" }, \
245	{ KVM_TRACE_IOCSR_READ, "read" }, \
246	{ KVM_TRACE_IOCSR_WRITE, "write" }
247
248TRACE_EVENT(kvm_iocsr,
249	TP_PROTO(int type, int len, u64 gpa, void *val),
250	TP_ARGS(type, len, gpa, val),
251
252	TP_STRUCT__entry(
253		__field(	u32,	type	)
254		__field(	u32,	len	)
255		__field(	u64,	gpa	)
256		__field(	u64,	val	)
257	),
258
259	TP_fast_assign(
260		__entry->type		= type;
261		__entry->len		= len;
262		__entry->gpa		= gpa;
263		__entry->val		= 0;
264		if (val)
265			memcpy(&__entry->val, val,
266			       min_t(u32, sizeof(__entry->val), len));
267	),
268
269	TP_printk("iocsr %s len %u gpa 0x%llx val 0x%llx",
270		  __print_symbolic(__entry->type, kvm_trace_symbol_iocsr),
271		  __entry->len, __entry->gpa, __entry->val)
272);
273
274#define kvm_fpu_load_symbol	\
275	{0, "unload"},		\
276	{1, "load"}
277
278TRACE_EVENT(kvm_fpu,
279	TP_PROTO(int load),
280	TP_ARGS(load),
281
282	TP_STRUCT__entry(
283		__field(	u32,	        load		)
284	),
285
286	TP_fast_assign(
287		__entry->load		= load;
288	),
289
290	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
291);
292
293#ifdef CONFIG_KVM_ASYNC_PF
294DECLARE_EVENT_CLASS(kvm_async_get_page_class,
295
296	TP_PROTO(u64 gva, u64 gfn),
297
298	TP_ARGS(gva, gfn),
299
300	TP_STRUCT__entry(
301		__field(__u64, gva)
302		__field(u64, gfn)
303	),
304
305	TP_fast_assign(
306		__entry->gva = gva;
307		__entry->gfn = gfn;
308	),
309
310	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
311);
312
313DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
314
315	TP_PROTO(u64 gva, u64 gfn),
316
317	TP_ARGS(gva, gfn)
318);
319
320DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
321
322	TP_PROTO(u64 gva, u64 gfn),
323
324	TP_ARGS(gva, gfn)
325);
326
327DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
328
329	TP_PROTO(u64 token, u64 gva),
330
331	TP_ARGS(token, gva),
332
333	TP_STRUCT__entry(
334		__field(__u64, token)
335		__field(__u64, gva)
336	),
337
338	TP_fast_assign(
339		__entry->token = token;
340		__entry->gva = gva;
341	),
342
343	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
344
345);
346
347DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
348
349	TP_PROTO(u64 token, u64 gva),
350
351	TP_ARGS(token, gva)
352);
353
354DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
355
356	TP_PROTO(u64 token, u64 gva),
357
358	TP_ARGS(token, gva)
359);
360
361TRACE_EVENT(
362	kvm_async_pf_completed,
363	TP_PROTO(unsigned long address, u64 gva),
364	TP_ARGS(address, gva),
365
366	TP_STRUCT__entry(
367		__field(unsigned long, address)
368		__field(u64, gva)
369		),
370
371	TP_fast_assign(
372		__entry->address = address;
373		__entry->gva = gva;
374		),
375
376	TP_printk("gva %#llx address %#lx",  __entry->gva,
377		  __entry->address)
378);
379
380#endif
381
382TRACE_EVENT(kvm_halt_poll_ns,
383	TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
384		 unsigned int old),
385	TP_ARGS(grow, vcpu_id, new, old),
386
387	TP_STRUCT__entry(
388		__field(bool, grow)
389		__field(unsigned int, vcpu_id)
390		__field(unsigned int, new)
391		__field(unsigned int, old)
392	),
393
394	TP_fast_assign(
395		__entry->grow           = grow;
396		__entry->vcpu_id        = vcpu_id;
397		__entry->new            = new;
398		__entry->old            = old;
399	),
400
401	TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
402			__entry->vcpu_id,
403			__entry->new,
404			__entry->grow ? "grow" : "shrink",
405			__entry->old)
406);
407
408#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
409	trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
410#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
411	trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
412
413TRACE_EVENT(kvm_dirty_ring_push,
414	TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
415	TP_ARGS(ring, slot, offset),
416
417	TP_STRUCT__entry(
418		__field(int, index)
419		__field(u32, dirty_index)
420		__field(u32, reset_index)
421		__field(u32, slot)
422		__field(u64, offset)
423	),
424
425	TP_fast_assign(
426		__entry->index          = ring->index;
427		__entry->dirty_index    = ring->dirty_index;
428		__entry->reset_index    = ring->reset_index;
429		__entry->slot           = slot;
430		__entry->offset         = offset;
431	),
432
433	TP_printk("ring %d: dirty 0x%x reset 0x%x "
434		  "slot %u offset 0x%llx (used %u)",
435		  __entry->index, __entry->dirty_index,
436		  __entry->reset_index,  __entry->slot, __entry->offset,
437		  __entry->dirty_index - __entry->reset_index)
438);
439
440TRACE_EVENT(kvm_dirty_ring_reset,
441	TP_PROTO(struct kvm_dirty_ring *ring),
442	TP_ARGS(ring),
443
444	TP_STRUCT__entry(
445		__field(int, index)
446		__field(u32, dirty_index)
447		__field(u32, reset_index)
448	),
449
450	TP_fast_assign(
451		__entry->index          = ring->index;
452		__entry->dirty_index    = ring->dirty_index;
453		__entry->reset_index    = ring->reset_index;
454	),
455
456	TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
457		  __entry->index, __entry->dirty_index, __entry->reset_index,
458		  __entry->dirty_index - __entry->reset_index)
459);
460
461TRACE_EVENT(kvm_dirty_ring_exit,
462	TP_PROTO(struct kvm_vcpu *vcpu),
463	TP_ARGS(vcpu),
464
465	TP_STRUCT__entry(
466	    __field(int, vcpu_id)
467	),
468
469	TP_fast_assign(
470	    __entry->vcpu_id = vcpu->vcpu_id;
471	),
472
473	TP_printk("vcpu %d", __entry->vcpu_id)
474);
475
476TRACE_EVENT(kvm_unmap_hva_range,
477	TP_PROTO(unsigned long start, unsigned long end),
478	TP_ARGS(start, end),
479
480	TP_STRUCT__entry(
481		__field(	unsigned long,	start		)
482		__field(	unsigned long,	end		)
483	),
484
485	TP_fast_assign(
486		__entry->start		= start;
487		__entry->end		= end;
488	),
489
490	TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
491		  __entry->start, __entry->end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
492);
493
494TRACE_EVENT(kvm_age_hva,
495	TP_PROTO(unsigned long start, unsigned long end),
496	TP_ARGS(start, end),
497
498	TP_STRUCT__entry(
499		__field(	unsigned long,	start		)
500		__field(	unsigned long,	end		)
501	),
502
503	TP_fast_assign(
504		__entry->start		= start;
505		__entry->end		= end;
506	),
507
508	TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
509		  __entry->start, __entry->end)
510);
511
512TRACE_EVENT(kvm_test_age_hva,
513	TP_PROTO(unsigned long hva),
514	TP_ARGS(hva),
515
516	TP_STRUCT__entry(
517		__field(	unsigned long,	hva		)
518	),
519
520	TP_fast_assign(
521		__entry->hva		= hva;
522	),
523
524	TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
525);
526
527#endif /* _TRACE_KVM_MAIN_H */
528
529/* This part must be outside protection */
530#include <trace/define_trace.h>