Linux Audio

Check our new training course

Loading...
v3.15
 
  1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  2#define _TRACE_KVM_H
  3
  4#include <linux/tracepoint.h>
  5#include <asm/vmx.h>
  6#include <asm/svm.h>
  7#include <asm/clocksource.h>
 
  8
  9#undef TRACE_SYSTEM
 10#define TRACE_SYSTEM kvm
 11
 12/*
 13 * Tracepoint for guest mode entry.
 14 */
 15TRACE_EVENT(kvm_entry,
 16	TP_PROTO(unsigned int vcpu_id),
 17	TP_ARGS(vcpu_id),
 18
 19	TP_STRUCT__entry(
 20		__field(	unsigned int,	vcpu_id		)
 
 21	),
 22
 23	TP_fast_assign(
 24		__entry->vcpu_id	= vcpu_id;
 
 25	),
 26
 27	TP_printk("vcpu %u", __entry->vcpu_id)
 28);
 29
 30/*
 31 * Tracepoint for hypercall.
 32 */
 33TRACE_EVENT(kvm_hypercall,
 34	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
 35		 unsigned long a2, unsigned long a3),
 36	TP_ARGS(nr, a0, a1, a2, a3),
 37
 38	TP_STRUCT__entry(
 39		__field(	unsigned long, 	nr		)
 40		__field(	unsigned long,	a0		)
 41		__field(	unsigned long,	a1		)
 42		__field(	unsigned long,	a2		)
 43		__field(	unsigned long,	a3		)
 44	),
 45
 46	TP_fast_assign(
 47		__entry->nr		= nr;
 48		__entry->a0		= a0;
 49		__entry->a1		= a1;
 50		__entry->a2		= a2;
 51		__entry->a3		= a3;
 52	),
 53
 54	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
 55		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
 56		 __entry->a3)
 57);
 58
 59/*
 60 * Tracepoint for hypercall.
 61 */
 62TRACE_EVENT(kvm_hv_hypercall,
 63	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
 64		 __u64 ingpa, __u64 outgpa),
 65	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
 66
 67	TP_STRUCT__entry(
 68		__field(	__u16,		rep_cnt		)
 69		__field(	__u16,		rep_idx		)
 70		__field(	__u64,		ingpa		)
 71		__field(	__u64,		outgpa		)
 72		__field(	__u16, 		code		)
 73		__field(	bool,		fast		)
 74	),
 75
 76	TP_fast_assign(
 77		__entry->rep_cnt	= rep_cnt;
 78		__entry->rep_idx	= rep_idx;
 79		__entry->ingpa		= ingpa;
 80		__entry->outgpa		= outgpa;
 81		__entry->code		= code;
 82		__entry->fast		= fast;
 83	),
 84
 85	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
 86		  __entry->code, __entry->fast ? "fast" : "slow",
 87		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
 88		  __entry->outgpa)
 89);
 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91/*
 92 * Tracepoint for PIO.
 93 */
 
 
 
 
 94TRACE_EVENT(kvm_pio,
 95	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 96		 unsigned int count),
 97	TP_ARGS(rw, port, size, count),
 98
 99	TP_STRUCT__entry(
100		__field(	unsigned int, 	rw		)
101		__field(	unsigned int, 	port		)
102		__field(	unsigned int, 	size		)
103		__field(	unsigned int,	count		)
 
104	),
105
106	TP_fast_assign(
107		__entry->rw		= rw;
108		__entry->port		= port;
109		__entry->size		= size;
110		__entry->count		= count;
 
 
 
 
 
 
111	),
112
113	TP_printk("pio_%s at 0x%x size %d count %d",
114		  __entry->rw ? "write" : "read",
115		  __entry->port, __entry->size, __entry->count)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116);
117
118/*
119 * Tracepoint for cpuid.
120 */
121TRACE_EVENT(kvm_cpuid,
122	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
123		 unsigned long rcx, unsigned long rdx),
124	TP_ARGS(function, rax, rbx, rcx, rdx),
 
125
126	TP_STRUCT__entry(
127		__field(	unsigned int,	function	)
 
128		__field(	unsigned long,	rax		)
129		__field(	unsigned long,	rbx		)
130		__field(	unsigned long,	rcx		)
131		__field(	unsigned long,	rdx		)
 
 
132	),
133
134	TP_fast_assign(
135		__entry->function	= function;
 
136		__entry->rax		= rax;
137		__entry->rbx		= rbx;
138		__entry->rcx		= rcx;
139		__entry->rdx		= rdx;
 
 
140	),
141
142	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
143		  __entry->function, __entry->rax,
144		  __entry->rbx, __entry->rcx, __entry->rdx)
 
 
145);
146
147#define AREG(x) { APIC_##x, "APIC_" #x }
148
149#define kvm_trace_symbol_apic						    \
150	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
151	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
152	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
153	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
154	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
155	AREG(ECTRL)
156/*
157 * Tracepoint for apic access.
158 */
159TRACE_EVENT(kvm_apic,
160	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
161	TP_ARGS(rw, reg, val),
162
163	TP_STRUCT__entry(
164		__field(	unsigned int,	rw		)
165		__field(	unsigned int,	reg		)
166		__field(	unsigned int,	val		)
167	),
168
169	TP_fast_assign(
170		__entry->rw		= rw;
171		__entry->reg		= reg;
172		__entry->val		= val;
173	),
174
175	TP_printk("apic_%s %s = 0x%x",
176		  __entry->rw ? "write" : "read",
177		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
178		  __entry->val)
179);
180
181#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
182#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
183
184#define KVM_ISA_VMX   1
185#define KVM_ISA_SVM   2
186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187/*
188 * Tracepoint for kvm guest exit:
189 */
190TRACE_EVENT(kvm_exit,
191	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
192	TP_ARGS(exit_reason, vcpu, isa),
193
194	TP_STRUCT__entry(
195		__field(	unsigned int,	exit_reason	)
196		__field(	unsigned long,	guest_rip	)
197		__field(	u32,	        isa             )
198		__field(	u64,	        info1           )
199		__field(	u64,	        info2           )
200	),
201
202	TP_fast_assign(
203		__entry->exit_reason	= exit_reason;
204		__entry->guest_rip	= kvm_rip_read(vcpu);
205		__entry->isa            = isa;
206		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
207					   &__entry->info2);
208	),
209
210	TP_printk("reason %s rip 0x%lx info %llx %llx",
211		 (__entry->isa == KVM_ISA_VMX) ?
212		 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
213		 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
214		 __entry->guest_rip, __entry->info1, __entry->info2)
215);
216
217/*
218 * Tracepoint for kvm interrupt injection:
219 */
220TRACE_EVENT(kvm_inj_virq,
221	TP_PROTO(unsigned int irq),
222	TP_ARGS(irq),
223
224	TP_STRUCT__entry(
225		__field(	unsigned int,	irq		)
226	),
227
228	TP_fast_assign(
229		__entry->irq		= irq;
230	),
231
232	TP_printk("irq %u", __entry->irq)
233);
234
235#define EXS(x) { x##_VECTOR, "#" #x }
236
237#define kvm_trace_sym_exc						\
238	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
239	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
240	EXS(MF), EXS(MC)
241
242/*
243 * Tracepoint for kvm interrupt injection:
244 */
245TRACE_EVENT(kvm_inj_exception,
246	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
247	TP_ARGS(exception, has_error, error_code),
248
249	TP_STRUCT__entry(
250		__field(	u8,	exception	)
251		__field(	u8,	has_error	)
252		__field(	u32,	error_code	)
253	),
254
255	TP_fast_assign(
256		__entry->exception	= exception;
257		__entry->has_error	= has_error;
258		__entry->error_code	= error_code;
259	),
260
261	TP_printk("%s (0x%x)",
262		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
263		  /* FIXME: don't print error_code if not present */
264		  __entry->has_error ? __entry->error_code : 0)
265);
266
267/*
268 * Tracepoint for page fault.
269 */
270TRACE_EVENT(kvm_page_fault,
271	TP_PROTO(unsigned long fault_address, unsigned int error_code),
272	TP_ARGS(fault_address, error_code),
273
274	TP_STRUCT__entry(
275		__field(	unsigned long,	fault_address	)
276		__field(	unsigned int,	error_code	)
277	),
278
279	TP_fast_assign(
280		__entry->fault_address	= fault_address;
281		__entry->error_code	= error_code;
282	),
283
284	TP_printk("address %lx error_code %x",
285		  __entry->fault_address, __entry->error_code)
286);
287
288/*
289 * Tracepoint for guest MSR access.
290 */
291TRACE_EVENT(kvm_msr,
292	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
293	TP_ARGS(write, ecx, data, exception),
294
295	TP_STRUCT__entry(
296		__field(	unsigned,	write		)
297		__field(	u32,		ecx		)
298		__field(	u64,		data		)
299		__field(	u8,		exception	)
300	),
301
302	TP_fast_assign(
303		__entry->write		= write;
304		__entry->ecx		= ecx;
305		__entry->data		= data;
306		__entry->exception	= exception;
307	),
308
309	TP_printk("msr_%s %x = 0x%llx%s",
310		  __entry->write ? "write" : "read",
311		  __entry->ecx, __entry->data,
312		  __entry->exception ? " (#GP)" : "")
313);
314
315#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
316#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
317#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
318#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
319
320/*
321 * Tracepoint for guest CR access.
322 */
323TRACE_EVENT(kvm_cr,
324	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
325	TP_ARGS(rw, cr, val),
326
327	TP_STRUCT__entry(
328		__field(	unsigned int,	rw		)
329		__field(	unsigned int,	cr		)
330		__field(	unsigned long,	val		)
331	),
332
333	TP_fast_assign(
334		__entry->rw		= rw;
335		__entry->cr		= cr;
336		__entry->val		= val;
337	),
338
339	TP_printk("cr_%s %x = 0x%lx",
340		  __entry->rw ? "write" : "read",
341		  __entry->cr, __entry->val)
342);
343
344#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
345#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
346
347TRACE_EVENT(kvm_pic_set_irq,
348	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
349	    TP_ARGS(chip, pin, elcr, imr, coalesced),
350
351	TP_STRUCT__entry(
352		__field(	__u8,		chip		)
353		__field(	__u8,		pin		)
354		__field(	__u8,		elcr		)
355		__field(	__u8,		imr		)
356		__field(	bool,		coalesced	)
357	),
358
359	TP_fast_assign(
360		__entry->chip		= chip;
361		__entry->pin		= pin;
362		__entry->elcr		= elcr;
363		__entry->imr		= imr;
364		__entry->coalesced	= coalesced;
365	),
366
367	TP_printk("chip %u pin %u (%s%s)%s",
368		  __entry->chip, __entry->pin,
369		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
370		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
371		  __entry->coalesced ? " (coalesced)" : "")
372);
373
374#define kvm_apic_dst_shorthand		\
375	{0x0, "dst"},			\
376	{0x1, "self"},			\
377	{0x2, "all"},			\
378	{0x3, "all-but-self"}
379
380TRACE_EVENT(kvm_apic_ipi,
381	    TP_PROTO(__u32 icr_low, __u32 dest_id),
382	    TP_ARGS(icr_low, dest_id),
383
384	TP_STRUCT__entry(
385		__field(	__u32,		icr_low		)
386		__field(	__u32,		dest_id		)
387	),
388
389	TP_fast_assign(
390		__entry->icr_low	= icr_low;
391		__entry->dest_id	= dest_id;
392	),
393
394	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
395		  __entry->dest_id, (u8)__entry->icr_low,
396		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
397				   kvm_deliver_mode),
398		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
399		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
400		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
401		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
402				   kvm_apic_dst_shorthand))
403);
404
405TRACE_EVENT(kvm_apic_accept_irq,
406	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
407	    TP_ARGS(apicid, dm, tm, vec, coalesced),
408
409	TP_STRUCT__entry(
410		__field(	__u32,		apicid		)
411		__field(	__u16,		dm		)
412		__field(	__u8,		tm		)
413		__field(	__u8,		vec		)
414		__field(	bool,		coalesced	)
415	),
416
417	TP_fast_assign(
418		__entry->apicid		= apicid;
419		__entry->dm		= dm;
420		__entry->tm		= tm;
421		__entry->vec		= vec;
422		__entry->coalesced	= coalesced;
423	),
424
425	TP_printk("apicid %x vec %u (%s|%s)%s",
426		  __entry->apicid, __entry->vec,
427		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
428		  __entry->tm ? "level" : "edge",
429		  __entry->coalesced ? " (coalesced)" : "")
430);
431
432TRACE_EVENT(kvm_eoi,
433	    TP_PROTO(struct kvm_lapic *apic, int vector),
434	    TP_ARGS(apic, vector),
435
436	TP_STRUCT__entry(
437		__field(	__u32,		apicid		)
438		__field(	int,		vector		)
439	),
440
441	TP_fast_assign(
442		__entry->apicid		= apic->vcpu->vcpu_id;
443		__entry->vector		= vector;
444	),
445
446	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
447);
448
449TRACE_EVENT(kvm_pv_eoi,
450	    TP_PROTO(struct kvm_lapic *apic, int vector),
451	    TP_ARGS(apic, vector),
452
453	TP_STRUCT__entry(
454		__field(	__u32,		apicid		)
455		__field(	int,		vector		)
456	),
457
458	TP_fast_assign(
459		__entry->apicid		= apic->vcpu->vcpu_id;
460		__entry->vector		= vector;
461	),
462
463	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
464);
465
466/*
467 * Tracepoint for nested VMRUN
468 */
469TRACE_EVENT(kvm_nested_vmrun,
470	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
471		     __u32 event_inj, bool npt),
472	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
473
474	TP_STRUCT__entry(
475		__field(	__u64,		rip		)
476		__field(	__u64,		vmcb		)
477		__field(	__u64,		nested_rip	)
478		__field(	__u32,		int_ctl		)
479		__field(	__u32,		event_inj	)
480		__field(	bool,		npt		)
481	),
482
483	TP_fast_assign(
484		__entry->rip		= rip;
485		__entry->vmcb		= vmcb;
486		__entry->nested_rip	= nested_rip;
487		__entry->int_ctl	= int_ctl;
488		__entry->event_inj	= event_inj;
489		__entry->npt		= npt;
490	),
491
492	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
493		  "event_inj: 0x%08x npt: %s",
494		__entry->rip, __entry->vmcb, __entry->nested_rip,
495		__entry->int_ctl, __entry->event_inj,
496		__entry->npt ? "on" : "off")
497);
498
499TRACE_EVENT(kvm_nested_intercepts,
500	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
501	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
 
 
502
503	TP_STRUCT__entry(
504		__field(	__u16,		cr_read		)
505		__field(	__u16,		cr_write	)
506		__field(	__u32,		exceptions	)
507		__field(	__u64,		intercept	)
 
 
508	),
509
510	TP_fast_assign(
511		__entry->cr_read	= cr_read;
512		__entry->cr_write	= cr_write;
513		__entry->exceptions	= exceptions;
514		__entry->intercept	= intercept;
 
 
515	),
516
517	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
518		__entry->cr_read, __entry->cr_write, __entry->exceptions,
519		__entry->intercept)
 
520);
521/*
522 * Tracepoint for #VMEXIT while nested
523 */
524TRACE_EVENT(kvm_nested_vmexit,
525	    TP_PROTO(__u64 rip, __u32 exit_code,
526		     __u64 exit_info1, __u64 exit_info2,
527		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
528	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
529		    exit_int_info, exit_int_info_err, isa),
530
531	TP_STRUCT__entry(
532		__field(	__u64,		rip			)
533		__field(	__u32,		exit_code		)
534		__field(	__u64,		exit_info1		)
535		__field(	__u64,		exit_info2		)
536		__field(	__u32,		exit_int_info		)
537		__field(	__u32,		exit_int_info_err	)
538		__field(	__u32,		isa			)
539	),
540
541	TP_fast_assign(
542		__entry->rip			= rip;
543		__entry->exit_code		= exit_code;
544		__entry->exit_info1		= exit_info1;
545		__entry->exit_info2		= exit_info2;
546		__entry->exit_int_info		= exit_int_info;
547		__entry->exit_int_info_err	= exit_int_info_err;
548		__entry->isa			= isa;
549	),
550	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
551		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
552		  __entry->rip,
553		 (__entry->isa == KVM_ISA_VMX) ?
554		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
555		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
556		  __entry->exit_info1, __entry->exit_info2,
557		  __entry->exit_int_info, __entry->exit_int_info_err)
558);
559
560/*
561 * Tracepoint for #VMEXIT reinjected to the guest
562 */
563TRACE_EVENT(kvm_nested_vmexit_inject,
564	    TP_PROTO(__u32 exit_code,
565		     __u64 exit_info1, __u64 exit_info2,
566		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
567	    TP_ARGS(exit_code, exit_info1, exit_info2,
568		    exit_int_info, exit_int_info_err, isa),
569
570	TP_STRUCT__entry(
571		__field(	__u32,		exit_code		)
572		__field(	__u64,		exit_info1		)
573		__field(	__u64,		exit_info2		)
574		__field(	__u32,		exit_int_info		)
575		__field(	__u32,		exit_int_info_err	)
576		__field(	__u32,		isa			)
577	),
578
579	TP_fast_assign(
580		__entry->exit_code		= exit_code;
581		__entry->exit_info1		= exit_info1;
582		__entry->exit_info2		= exit_info2;
583		__entry->exit_int_info		= exit_int_info;
584		__entry->exit_int_info_err	= exit_int_info_err;
585		__entry->isa			= isa;
586	),
587
588	TP_printk("reason: %s ext_inf1: 0x%016llx "
589		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
590		 (__entry->isa == KVM_ISA_VMX) ?
591		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
592		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
593		__entry->exit_info1, __entry->exit_info2,
594		__entry->exit_int_info, __entry->exit_int_info_err)
595);
596
597/*
598 * Tracepoint for nested #vmexit because of interrupt pending
599 */
600TRACE_EVENT(kvm_nested_intr_vmexit,
601	    TP_PROTO(__u64 rip),
602	    TP_ARGS(rip),
603
604	TP_STRUCT__entry(
605		__field(	__u64,	rip	)
606	),
607
608	TP_fast_assign(
609		__entry->rip	=	rip
610	),
611
612	TP_printk("rip: 0x%016llx", __entry->rip)
613);
614
615/*
616 * Tracepoint for nested #vmexit because of interrupt pending
617 */
618TRACE_EVENT(kvm_invlpga,
619	    TP_PROTO(__u64 rip, int asid, u64 address),
620	    TP_ARGS(rip, asid, address),
621
622	TP_STRUCT__entry(
623		__field(	__u64,	rip	)
624		__field(	int,	asid	)
625		__field(	__u64,	address	)
626	),
627
628	TP_fast_assign(
629		__entry->rip		=	rip;
630		__entry->asid		=	asid;
631		__entry->address	=	address;
632	),
633
634	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
635		  __entry->rip, __entry->asid, __entry->address)
636);
637
638/*
639 * Tracepoint for nested #vmexit because of interrupt pending
640 */
641TRACE_EVENT(kvm_skinit,
642	    TP_PROTO(__u64 rip, __u32 slb),
643	    TP_ARGS(rip, slb),
644
645	TP_STRUCT__entry(
646		__field(	__u64,	rip	)
647		__field(	__u32,	slb	)
648	),
649
650	TP_fast_assign(
651		__entry->rip		=	rip;
652		__entry->slb		=	slb;
653	),
654
655	TP_printk("rip: 0x%016llx slb: 0x%08x",
656		  __entry->rip, __entry->slb)
657);
658
659#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
660#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
661#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
662#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
663
664#define kvm_trace_symbol_emul_flags	                  \
665	{ 0,   			    "real" },		  \
666	{ KVM_EMUL_INSN_F_CR0_PE			  \
667	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
668	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
669	{ KVM_EMUL_INSN_F_CR0_PE			  \
670	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
671	{ KVM_EMUL_INSN_F_CR0_PE			  \
672	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
673
674#define kei_decode_mode(mode) ({			\
675	u8 flags = 0xff;				\
676	switch (mode) {					\
677	case X86EMUL_MODE_REAL:				\
678		flags = 0;				\
679		break;					\
680	case X86EMUL_MODE_VM86:				\
681		flags = KVM_EMUL_INSN_F_EFL_VM;		\
682		break;					\
683	case X86EMUL_MODE_PROT16:			\
684		flags = KVM_EMUL_INSN_F_CR0_PE;		\
685		break;					\
686	case X86EMUL_MODE_PROT32:			\
687		flags = KVM_EMUL_INSN_F_CR0_PE		\
688			| KVM_EMUL_INSN_F_CS_D;		\
689		break;					\
690	case X86EMUL_MODE_PROT64:			\
691		flags = KVM_EMUL_INSN_F_CR0_PE		\
692			| KVM_EMUL_INSN_F_CS_L;		\
693		break;					\
694	}						\
695	flags;						\
696	})
697
698TRACE_EVENT(kvm_emulate_insn,
699	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
700	TP_ARGS(vcpu, failed),
701
702	TP_STRUCT__entry(
703		__field(    __u64, rip                       )
704		__field(    __u32, csbase                    )
705		__field(    __u8,  len                       )
706		__array(    __u8,  insn,    15	             )
707		__field(    __u8,  flags       	   	     )
708		__field(    __u8,  failed                    )
709		),
710
711	TP_fast_assign(
712		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
713		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
714		__entry->len = vcpu->arch.emulate_ctxt._eip
715			       - vcpu->arch.emulate_ctxt.fetch.start;
716		memcpy(__entry->insn,
717		       vcpu->arch.emulate_ctxt.fetch.data,
718		       15);
719		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
720		__entry->failed = failed;
721		),
722
723	TP_printk("%x:%llx:%s (%s)%s",
724		  __entry->csbase, __entry->rip,
725		  __print_hex(__entry->insn, __entry->len),
726		  __print_symbolic(__entry->flags,
727				   kvm_trace_symbol_emul_flags),
728		  __entry->failed ? " failed" : ""
729		)
730	);
731
732#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
733#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
734
735TRACE_EVENT(
736	vcpu_match_mmio,
737	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
738	TP_ARGS(gva, gpa, write, gpa_match),
739
740	TP_STRUCT__entry(
741		__field(gva_t, gva)
742		__field(gpa_t, gpa)
743		__field(bool, write)
744		__field(bool, gpa_match)
745		),
746
747	TP_fast_assign(
748		__entry->gva = gva;
749		__entry->gpa = gpa;
750		__entry->write = write;
751		__entry->gpa_match = gpa_match
752		),
753
754	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
755		  __entry->write ? "Write" : "Read",
756		  __entry->gpa_match ? "GPA" : "GVA")
757);
758
759TRACE_EVENT(kvm_write_tsc_offset,
760	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
761		 __u64 next_tsc_offset),
762	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
763
764	TP_STRUCT__entry(
765		__field( unsigned int,	vcpu_id				)
766		__field(	__u64,	previous_tsc_offset		)
767		__field(	__u64,	next_tsc_offset			)
768	),
769
770	TP_fast_assign(
771		__entry->vcpu_id		= vcpu_id;
772		__entry->previous_tsc_offset	= previous_tsc_offset;
773		__entry->next_tsc_offset	= next_tsc_offset;
774	),
775
776	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
777		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
778);
779
780#ifdef CONFIG_X86_64
781
782#define host_clocks					\
783	{VCLOCK_NONE, "none"},				\
784	{VCLOCK_TSC,  "tsc"},				\
785	{VCLOCK_HPET, "hpet"}				\
786
787TRACE_EVENT(kvm_update_master_clock,
788	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
789	TP_ARGS(use_master_clock, host_clock, offset_matched),
790
791	TP_STRUCT__entry(
792		__field(		bool,	use_master_clock	)
793		__field(	unsigned int,	host_clock		)
794		__field(		bool,	offset_matched		)
795	),
796
797	TP_fast_assign(
798		__entry->use_master_clock	= use_master_clock;
799		__entry->host_clock		= host_clock;
800		__entry->offset_matched		= offset_matched;
801	),
802
803	TP_printk("masterclock %d hostclock %s offsetmatched %u",
804		  __entry->use_master_clock,
805		  __print_symbolic(__entry->host_clock, host_clocks),
806		  __entry->offset_matched)
807);
808
809TRACE_EVENT(kvm_track_tsc,
810	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
811		 unsigned int online_vcpus, bool use_master_clock,
812		 unsigned int host_clock),
813	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
814		host_clock),
815
816	TP_STRUCT__entry(
817		__field(	unsigned int,	vcpu_id			)
818		__field(	unsigned int,	nr_vcpus_matched_tsc	)
819		__field(	unsigned int,	online_vcpus		)
820		__field(	bool,		use_master_clock	)
821		__field(	unsigned int,	host_clock		)
822	),
823
824	TP_fast_assign(
825		__entry->vcpu_id		= vcpu_id;
826		__entry->nr_vcpus_matched_tsc	= nr_matched;
827		__entry->online_vcpus		= online_vcpus;
828		__entry->use_master_clock	= use_master_clock;
829		__entry->host_clock		= host_clock;
830	),
831
832	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
833		  " hostclock %s",
834		  __entry->vcpu_id, __entry->use_master_clock,
835		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
836		  __print_symbolic(__entry->host_clock, host_clocks))
837);
838
839#endif /* CONFIG_X86_64 */
840
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
841#endif /* _TRACE_KVM_H */
842
843#undef TRACE_INCLUDE_PATH
844#define TRACE_INCLUDE_PATH arch/x86/kvm
845#undef TRACE_INCLUDE_FILE
846#define TRACE_INCLUDE_FILE trace
847
848/* This part must be outside protection */
849#include <trace/define_trace.h>
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
   3#define _TRACE_KVM_H
   4
   5#include <linux/tracepoint.h>
   6#include <asm/vmx.h>
   7#include <asm/svm.h>
   8#include <asm/clocksource.h>
   9#include <asm/pvclock-abi.h>
  10
  11#undef TRACE_SYSTEM
  12#define TRACE_SYSTEM kvm
  13
  14/*
  15 * Tracepoint for guest mode entry.
  16 */
  17TRACE_EVENT(kvm_entry,
  18	TP_PROTO(struct kvm_vcpu *vcpu),
  19	TP_ARGS(vcpu),
  20
  21	TP_STRUCT__entry(
  22		__field(	unsigned int,	vcpu_id		)
  23		__field(	unsigned long,	rip		)
  24	),
  25
  26	TP_fast_assign(
  27		__entry->vcpu_id        = vcpu->vcpu_id;
  28		__entry->rip		= kvm_rip_read(vcpu);
  29	),
  30
  31	TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip)
  32);
  33
  34/*
  35 * Tracepoint for hypercall.
  36 */
  37TRACE_EVENT(kvm_hypercall,
  38	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
  39		 unsigned long a2, unsigned long a3),
  40	TP_ARGS(nr, a0, a1, a2, a3),
  41
  42	TP_STRUCT__entry(
  43		__field(	unsigned long, 	nr		)
  44		__field(	unsigned long,	a0		)
  45		__field(	unsigned long,	a1		)
  46		__field(	unsigned long,	a2		)
  47		__field(	unsigned long,	a3		)
  48	),
  49
  50	TP_fast_assign(
  51		__entry->nr		= nr;
  52		__entry->a0		= a0;
  53		__entry->a1		= a1;
  54		__entry->a2		= a2;
  55		__entry->a3		= a3;
  56	),
  57
  58	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
  59		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
  60		 __entry->a3)
  61);
  62
  63/*
  64 * Tracepoint for hypercall.
  65 */
  66TRACE_EVENT(kvm_hv_hypercall,
  67	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
  68		 __u64 ingpa, __u64 outgpa),
  69	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
  70
  71	TP_STRUCT__entry(
  72		__field(	__u16,		rep_cnt		)
  73		__field(	__u16,		rep_idx		)
  74		__field(	__u64,		ingpa		)
  75		__field(	__u64,		outgpa		)
  76		__field(	__u16, 		code		)
  77		__field(	bool,		fast		)
  78	),
  79
  80	TP_fast_assign(
  81		__entry->rep_cnt	= rep_cnt;
  82		__entry->rep_idx	= rep_idx;
  83		__entry->ingpa		= ingpa;
  84		__entry->outgpa		= outgpa;
  85		__entry->code		= code;
  86		__entry->fast		= fast;
  87	),
  88
  89	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
  90		  __entry->code, __entry->fast ? "fast" : "slow",
  91		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
  92		  __entry->outgpa)
  93);
  94
  95TRACE_EVENT(kvm_hv_hypercall_done,
  96	TP_PROTO(u64 result),
  97	TP_ARGS(result),
  98
  99	TP_STRUCT__entry(
 100		__field(__u64, result)
 101	),
 102
 103	TP_fast_assign(
 104		__entry->result	= result;
 105	),
 106
 107	TP_printk("result 0x%llx", __entry->result)
 108);
 109
 110/*
 111 * Tracepoint for Xen hypercall.
 112 */
 113TRACE_EVENT(kvm_xen_hypercall,
 114	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
 115		 unsigned long a2, unsigned long a3, unsigned long a4,
 116		 unsigned long a5),
 117	    TP_ARGS(nr, a0, a1, a2, a3, a4, a5),
 118
 119	TP_STRUCT__entry(
 120		__field(unsigned long, nr)
 121		__field(unsigned long, a0)
 122		__field(unsigned long, a1)
 123		__field(unsigned long, a2)
 124		__field(unsigned long, a3)
 125		__field(unsigned long, a4)
 126		__field(unsigned long, a5)
 127	),
 128
 129	TP_fast_assign(
 130		__entry->nr = nr;
 131		__entry->a0 = a0;
 132		__entry->a1 = a1;
 133		__entry->a2 = a2;
 134		__entry->a3 = a3;
 135		__entry->a4 = a4;
 136		__entry->a4 = a5;
 137	),
 138
 139	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx",
 140		  __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
 141		  __entry->a3, __entry->a4, __entry->a5)
 142);
 143
 144
 145
 146/*
 147 * Tracepoint for PIO.
 148 */
 149
 150#define KVM_PIO_IN   0
 151#define KVM_PIO_OUT  1
 152
 153TRACE_EVENT(kvm_pio,
 154	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 155		 unsigned int count, void *data),
 156	TP_ARGS(rw, port, size, count, data),
 157
 158	TP_STRUCT__entry(
 159		__field(	unsigned int, 	rw		)
 160		__field(	unsigned int, 	port		)
 161		__field(	unsigned int, 	size		)
 162		__field(	unsigned int,	count		)
 163		__field(	unsigned int,	val		)
 164	),
 165
 166	TP_fast_assign(
 167		__entry->rw		= rw;
 168		__entry->port		= port;
 169		__entry->size		= size;
 170		__entry->count		= count;
 171		if (size == 1)
 172			__entry->val	= *(unsigned char *)data;
 173		else if (size == 2)
 174			__entry->val	= *(unsigned short *)data;
 175		else
 176			__entry->val	= *(unsigned int *)data;
 177	),
 178
 179	TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s",
 180		  __entry->rw ? "write" : "read",
 181		  __entry->port, __entry->size, __entry->count, __entry->val,
 182		  __entry->count > 1 ? "(...)" : "")
 183);
 184
 185/*
 186 * Tracepoint for fast mmio.
 187 */
 188TRACE_EVENT(kvm_fast_mmio,
 189	TP_PROTO(u64 gpa),
 190	TP_ARGS(gpa),
 191
 192	TP_STRUCT__entry(
 193		__field(u64,	gpa)
 194	),
 195
 196	TP_fast_assign(
 197		__entry->gpa		= gpa;
 198	),
 199
 200	TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
 201);
 202
 203/*
 204 * Tracepoint for cpuid.
 205 */
 206TRACE_EVENT(kvm_cpuid,
 207	TP_PROTO(unsigned int function, unsigned int index, unsigned long rax,
 208		 unsigned long rbx, unsigned long rcx, unsigned long rdx,
 209		 bool found, bool used_max_basic),
 210	TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic),
 211
 212	TP_STRUCT__entry(
 213		__field(	unsigned int,	function	)
 214		__field(	unsigned int,	index		)
 215		__field(	unsigned long,	rax		)
 216		__field(	unsigned long,	rbx		)
 217		__field(	unsigned long,	rcx		)
 218		__field(	unsigned long,	rdx		)
 219		__field(	bool,		found		)
 220		__field(	bool,		used_max_basic	)
 221	),
 222
 223	TP_fast_assign(
 224		__entry->function	= function;
 225		__entry->index		= index;
 226		__entry->rax		= rax;
 227		__entry->rbx		= rbx;
 228		__entry->rcx		= rcx;
 229		__entry->rdx		= rdx;
 230		__entry->found		= found;
 231		__entry->used_max_basic	= used_max_basic;
 232	),
 233
 234	TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s",
 235		  __entry->function, __entry->index, __entry->rax,
 236		  __entry->rbx, __entry->rcx, __entry->rdx,
 237		  __entry->found ? "found" : "not found",
 238		  __entry->used_max_basic ? ", used max basic" : "")
 239);
 240
 241#define AREG(x) { APIC_##x, "APIC_" #x }
 242
 243#define kvm_trace_symbol_apic						    \
 244	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
 245	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
 246	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
 247	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
 248	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
 249	AREG(ECTRL)
 250/*
 251 * Tracepoint for apic access.
 252 */
 253TRACE_EVENT(kvm_apic,
 254	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
 255	TP_ARGS(rw, reg, val),
 256
 257	TP_STRUCT__entry(
 258		__field(	unsigned int,	rw		)
 259		__field(	unsigned int,	reg		)
 260		__field(	unsigned int,	val		)
 261	),
 262
 263	TP_fast_assign(
 264		__entry->rw		= rw;
 265		__entry->reg		= reg;
 266		__entry->val		= val;
 267	),
 268
 269	TP_printk("apic_%s %s = 0x%x",
 270		  __entry->rw ? "write" : "read",
 271		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
 272		  __entry->val)
 273);
 274
 275#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
 276#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
 277
 278#define KVM_ISA_VMX   1
 279#define KVM_ISA_SVM   2
 280
 281#define kvm_print_exit_reason(exit_reason, isa)				\
 282	(isa == KVM_ISA_VMX) ?						\
 283	__print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) :	\
 284	__print_symbolic(exit_reason, SVM_EXIT_REASONS),		\
 285	(isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "",	\
 286	(isa == KVM_ISA_VMX) ?						\
 287	__print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : ""
 288
 289#define TRACE_EVENT_KVM_EXIT(name)					     \
 290TRACE_EVENT(name,							     \
 291	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),  \
 292	TP_ARGS(exit_reason, vcpu, isa),				     \
 293									     \
 294	TP_STRUCT__entry(						     \
 295		__field(	unsigned int,	exit_reason	)	     \
 296		__field(	unsigned long,	guest_rip	)	     \
 297		__field(	u32,	        isa             )	     \
 298		__field(	u64,	        info1           )	     \
 299		__field(	u64,	        info2           )	     \
 300		__field(	u32,	        intr_info	)	     \
 301		__field(	u32,	        error_code	)	     \
 302		__field(	unsigned int,	vcpu_id         )	     \
 303	),								     \
 304									     \
 305	TP_fast_assign(							     \
 306		__entry->exit_reason	= exit_reason;			     \
 307		__entry->guest_rip	= kvm_rip_read(vcpu);		     \
 308		__entry->isa            = isa;				     \
 309		__entry->vcpu_id        = vcpu->vcpu_id;		     \
 310		static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1,    \
 311					  &__entry->info2,		     \
 312					  &__entry->intr_info,		     \
 313					  &__entry->error_code);	     \
 314	),								     \
 315									     \
 316	TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx "	     \
 317		  "info2 0x%016llx intr_info 0x%08x error_code 0x%08x",	     \
 318		  __entry->vcpu_id,					     \
 319		  kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \
 320		  __entry->guest_rip, __entry->info1, __entry->info2,	     \
 321		  __entry->intr_info, __entry->error_code)		     \
 322)
 323
 324/*
 325 * Tracepoint for kvm guest exit:
 326 */
 327TRACE_EVENT_KVM_EXIT(kvm_exit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328
 329/*
 330 * Tracepoint for kvm interrupt injection:
 331 */
 332TRACE_EVENT(kvm_inj_virq,
 333	TP_PROTO(unsigned int irq),
 334	TP_ARGS(irq),
 335
 336	TP_STRUCT__entry(
 337		__field(	unsigned int,	irq		)
 338	),
 339
 340	TP_fast_assign(
 341		__entry->irq		= irq;
 342	),
 343
 344	TP_printk("irq %u", __entry->irq)
 345);
 346
 347#define EXS(x) { x##_VECTOR, "#" #x }
 348
 349#define kvm_trace_sym_exc						\
 350	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
 351	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
 352	EXS(MF), EXS(AC), EXS(MC)
 353
 354/*
 355 * Tracepoint for kvm interrupt injection:
 356 */
 357TRACE_EVENT(kvm_inj_exception,
 358	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
 359	TP_ARGS(exception, has_error, error_code),
 360
 361	TP_STRUCT__entry(
 362		__field(	u8,	exception	)
 363		__field(	u8,	has_error	)
 364		__field(	u32,	error_code	)
 365	),
 366
 367	TP_fast_assign(
 368		__entry->exception	= exception;
 369		__entry->has_error	= has_error;
 370		__entry->error_code	= error_code;
 371	),
 372
 373	TP_printk("%s (0x%x)",
 374		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
 375		  /* FIXME: don't print error_code if not present */
 376		  __entry->has_error ? __entry->error_code : 0)
 377);
 378
 379/*
 380 * Tracepoint for page fault.
 381 */
 382TRACE_EVENT(kvm_page_fault,
 383	TP_PROTO(unsigned long fault_address, unsigned int error_code),
 384	TP_ARGS(fault_address, error_code),
 385
 386	TP_STRUCT__entry(
 387		__field(	unsigned long,	fault_address	)
 388		__field(	unsigned int,	error_code	)
 389	),
 390
 391	TP_fast_assign(
 392		__entry->fault_address	= fault_address;
 393		__entry->error_code	= error_code;
 394	),
 395
 396	TP_printk("address %lx error_code %x",
 397		  __entry->fault_address, __entry->error_code)
 398);
 399
 400/*
 401 * Tracepoint for guest MSR access.
 402 */
 403TRACE_EVENT(kvm_msr,
 404	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
 405	TP_ARGS(write, ecx, data, exception),
 406
 407	TP_STRUCT__entry(
 408		__field(	unsigned,	write		)
 409		__field(	u32,		ecx		)
 410		__field(	u64,		data		)
 411		__field(	u8,		exception	)
 412	),
 413
 414	TP_fast_assign(
 415		__entry->write		= write;
 416		__entry->ecx		= ecx;
 417		__entry->data		= data;
 418		__entry->exception	= exception;
 419	),
 420
 421	TP_printk("msr_%s %x = 0x%llx%s",
 422		  __entry->write ? "write" : "read",
 423		  __entry->ecx, __entry->data,
 424		  __entry->exception ? " (#GP)" : "")
 425);
 426
 427#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
 428#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
 429#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
 430#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
 431
 432/*
 433 * Tracepoint for guest CR access.
 434 */
 435TRACE_EVENT(kvm_cr,
 436	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
 437	TP_ARGS(rw, cr, val),
 438
 439	TP_STRUCT__entry(
 440		__field(	unsigned int,	rw		)
 441		__field(	unsigned int,	cr		)
 442		__field(	unsigned long,	val		)
 443	),
 444
 445	TP_fast_assign(
 446		__entry->rw		= rw;
 447		__entry->cr		= cr;
 448		__entry->val		= val;
 449	),
 450
 451	TP_printk("cr_%s %x = 0x%lx",
 452		  __entry->rw ? "write" : "read",
 453		  __entry->cr, __entry->val)
 454);
 455
 456#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
 457#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
 458
 459TRACE_EVENT(kvm_pic_set_irq,
 460	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
 461	    TP_ARGS(chip, pin, elcr, imr, coalesced),
 462
 463	TP_STRUCT__entry(
 464		__field(	__u8,		chip		)
 465		__field(	__u8,		pin		)
 466		__field(	__u8,		elcr		)
 467		__field(	__u8,		imr		)
 468		__field(	bool,		coalesced	)
 469	),
 470
 471	TP_fast_assign(
 472		__entry->chip		= chip;
 473		__entry->pin		= pin;
 474		__entry->elcr		= elcr;
 475		__entry->imr		= imr;
 476		__entry->coalesced	= coalesced;
 477	),
 478
 479	TP_printk("chip %u pin %u (%s%s)%s",
 480		  __entry->chip, __entry->pin,
 481		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
 482		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
 483		  __entry->coalesced ? " (coalesced)" : "")
 484);
 485
 486#define kvm_apic_dst_shorthand		\
 487	{0x0, "dst"},			\
 488	{0x1, "self"},			\
 489	{0x2, "all"},			\
 490	{0x3, "all-but-self"}
 491
 492TRACE_EVENT(kvm_apic_ipi,
 493	    TP_PROTO(__u32 icr_low, __u32 dest_id),
 494	    TP_ARGS(icr_low, dest_id),
 495
 496	TP_STRUCT__entry(
 497		__field(	__u32,		icr_low		)
 498		__field(	__u32,		dest_id		)
 499	),
 500
 501	TP_fast_assign(
 502		__entry->icr_low	= icr_low;
 503		__entry->dest_id	= dest_id;
 504	),
 505
 506	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
 507		  __entry->dest_id, (u8)__entry->icr_low,
 508		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
 509				   kvm_deliver_mode),
 510		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
 511		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
 512		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
 513		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
 514				   kvm_apic_dst_shorthand))
 515);
 516
 517TRACE_EVENT(kvm_apic_accept_irq,
 518	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
 519	    TP_ARGS(apicid, dm, tm, vec),
 520
 521	TP_STRUCT__entry(
 522		__field(	__u32,		apicid		)
 523		__field(	__u16,		dm		)
 524		__field(	__u16,		tm		)
 525		__field(	__u8,		vec		)
 
 526	),
 527
 528	TP_fast_assign(
 529		__entry->apicid		= apicid;
 530		__entry->dm		= dm;
 531		__entry->tm		= tm;
 532		__entry->vec		= vec;
 
 533	),
 534
 535	TP_printk("apicid %x vec %u (%s|%s)",
 536		  __entry->apicid, __entry->vec,
 537		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
 538		  __entry->tm ? "level" : "edge")
 
 539);
 540
 541TRACE_EVENT(kvm_eoi,
 542	    TP_PROTO(struct kvm_lapic *apic, int vector),
 543	    TP_ARGS(apic, vector),
 544
 545	TP_STRUCT__entry(
 546		__field(	__u32,		apicid		)
 547		__field(	int,		vector		)
 548	),
 549
 550	TP_fast_assign(
 551		__entry->apicid		= apic->vcpu->vcpu_id;
 552		__entry->vector		= vector;
 553	),
 554
 555	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
 556);
 557
 558TRACE_EVENT(kvm_pv_eoi,
 559	    TP_PROTO(struct kvm_lapic *apic, int vector),
 560	    TP_ARGS(apic, vector),
 561
 562	TP_STRUCT__entry(
 563		__field(	__u32,		apicid		)
 564		__field(	int,		vector		)
 565	),
 566
 567	TP_fast_assign(
 568		__entry->apicid		= apic->vcpu->vcpu_id;
 569		__entry->vector		= vector;
 570	),
 571
 572	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
 573);
 574
 575/*
 576 * Tracepoint for nested VMRUN
 577 */
 578TRACE_EVENT(kvm_nested_vmrun,
 579	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
 580		     __u32 event_inj, bool npt),
 581	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
 582
 583	TP_STRUCT__entry(
 584		__field(	__u64,		rip		)
 585		__field(	__u64,		vmcb		)
 586		__field(	__u64,		nested_rip	)
 587		__field(	__u32,		int_ctl		)
 588		__field(	__u32,		event_inj	)
 589		__field(	bool,		npt		)
 590	),
 591
 592	TP_fast_assign(
 593		__entry->rip		= rip;
 594		__entry->vmcb		= vmcb;
 595		__entry->nested_rip	= nested_rip;
 596		__entry->int_ctl	= int_ctl;
 597		__entry->event_inj	= event_inj;
 598		__entry->npt		= npt;
 599	),
 600
 601	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
 602		  "event_inj: 0x%08x npt: %s",
 603		__entry->rip, __entry->vmcb, __entry->nested_rip,
 604		__entry->int_ctl, __entry->event_inj,
 605		__entry->npt ? "on" : "off")
 606);
 607
 608TRACE_EVENT(kvm_nested_intercepts,
 609	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions,
 610		     __u32 intercept1, __u32 intercept2, __u32 intercept3),
 611	    TP_ARGS(cr_read, cr_write, exceptions, intercept1,
 612		    intercept2, intercept3),
 613
 614	TP_STRUCT__entry(
 615		__field(	__u16,		cr_read		)
 616		__field(	__u16,		cr_write	)
 617		__field(	__u32,		exceptions	)
 618		__field(	__u32,		intercept1	)
 619		__field(	__u32,		intercept2	)
 620		__field(	__u32,		intercept3	)
 621	),
 622
 623	TP_fast_assign(
 624		__entry->cr_read	= cr_read;
 625		__entry->cr_write	= cr_write;
 626		__entry->exceptions	= exceptions;
 627		__entry->intercept1	= intercept1;
 628		__entry->intercept2	= intercept2;
 629		__entry->intercept3	= intercept3;
 630	),
 631
 632	TP_printk("cr_read: %04x cr_write: %04x excp: %08x "
 633		  "intercepts: %08x %08x %08x",
 634		  __entry->cr_read, __entry->cr_write, __entry->exceptions,
 635		  __entry->intercept1, __entry->intercept2, __entry->intercept3)
 636);
 637/*
 638 * Tracepoint for #VMEXIT while nested
 639 */
 640TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 641
 642/*
 643 * Tracepoint for #VMEXIT reinjected to the guest
 644 */
 645TRACE_EVENT(kvm_nested_vmexit_inject,
 646	    TP_PROTO(__u32 exit_code,
 647		     __u64 exit_info1, __u64 exit_info2,
 648		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
 649	    TP_ARGS(exit_code, exit_info1, exit_info2,
 650		    exit_int_info, exit_int_info_err, isa),
 651
 652	TP_STRUCT__entry(
 653		__field(	__u32,		exit_code		)
 654		__field(	__u64,		exit_info1		)
 655		__field(	__u64,		exit_info2		)
 656		__field(	__u32,		exit_int_info		)
 657		__field(	__u32,		exit_int_info_err	)
 658		__field(	__u32,		isa			)
 659	),
 660
 661	TP_fast_assign(
 662		__entry->exit_code		= exit_code;
 663		__entry->exit_info1		= exit_info1;
 664		__entry->exit_info2		= exit_info2;
 665		__entry->exit_int_info		= exit_int_info;
 666		__entry->exit_int_info_err	= exit_int_info_err;
 667		__entry->isa			= isa;
 668	),
 669
 670	TP_printk("reason: %s%s%s ext_inf1: 0x%016llx "
 671		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
 672		  kvm_print_exit_reason(__entry->exit_code, __entry->isa),
 673		  __entry->exit_info1, __entry->exit_info2,
 674		  __entry->exit_int_info, __entry->exit_int_info_err)
 
 
 675);
 676
 677/*
 678 * Tracepoint for nested #vmexit because of interrupt pending
 679 */
 680TRACE_EVENT(kvm_nested_intr_vmexit,
 681	    TP_PROTO(__u64 rip),
 682	    TP_ARGS(rip),
 683
 684	TP_STRUCT__entry(
 685		__field(	__u64,	rip	)
 686	),
 687
 688	TP_fast_assign(
 689		__entry->rip	=	rip
 690	),
 691
 692	TP_printk("rip: 0x%016llx", __entry->rip)
 693);
 694
 695/*
 696 * Tracepoint for nested #vmexit because of interrupt pending
 697 */
 698TRACE_EVENT(kvm_invlpga,
 699	    TP_PROTO(__u64 rip, int asid, u64 address),
 700	    TP_ARGS(rip, asid, address),
 701
 702	TP_STRUCT__entry(
 703		__field(	__u64,	rip	)
 704		__field(	int,	asid	)
 705		__field(	__u64,	address	)
 706	),
 707
 708	TP_fast_assign(
 709		__entry->rip		=	rip;
 710		__entry->asid		=	asid;
 711		__entry->address	=	address;
 712	),
 713
 714	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
 715		  __entry->rip, __entry->asid, __entry->address)
 716);
 717
 718/*
 719 * Tracepoint for nested #vmexit because of interrupt pending
 720 */
 721TRACE_EVENT(kvm_skinit,
 722	    TP_PROTO(__u64 rip, __u32 slb),
 723	    TP_ARGS(rip, slb),
 724
 725	TP_STRUCT__entry(
 726		__field(	__u64,	rip	)
 727		__field(	__u32,	slb	)
 728	),
 729
 730	TP_fast_assign(
 731		__entry->rip		=	rip;
 732		__entry->slb		=	slb;
 733	),
 734
 735	TP_printk("rip: 0x%016llx slb: 0x%08x",
 736		  __entry->rip, __entry->slb)
 737);
 738
 739#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
 740#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
 741#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
 742#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
 743
 744#define kvm_trace_symbol_emul_flags	                  \
 745	{ 0,   			    "real" },		  \
 746	{ KVM_EMUL_INSN_F_CR0_PE			  \
 747	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
 748	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
 749	{ KVM_EMUL_INSN_F_CR0_PE			  \
 750	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
 751	{ KVM_EMUL_INSN_F_CR0_PE			  \
 752	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
 753
 754#define kei_decode_mode(mode) ({			\
 755	u8 flags = 0xff;				\
 756	switch (mode) {					\
 757	case X86EMUL_MODE_REAL:				\
 758		flags = 0;				\
 759		break;					\
 760	case X86EMUL_MODE_VM86:				\
 761		flags = KVM_EMUL_INSN_F_EFL_VM;		\
 762		break;					\
 763	case X86EMUL_MODE_PROT16:			\
 764		flags = KVM_EMUL_INSN_F_CR0_PE;		\
 765		break;					\
 766	case X86EMUL_MODE_PROT32:			\
 767		flags = KVM_EMUL_INSN_F_CR0_PE		\
 768			| KVM_EMUL_INSN_F_CS_D;		\
 769		break;					\
 770	case X86EMUL_MODE_PROT64:			\
 771		flags = KVM_EMUL_INSN_F_CR0_PE		\
 772			| KVM_EMUL_INSN_F_CS_L;		\
 773		break;					\
 774	}						\
 775	flags;						\
 776	})
 777
 778TRACE_EVENT(kvm_emulate_insn,
 779	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
 780	TP_ARGS(vcpu, failed),
 781
 782	TP_STRUCT__entry(
 783		__field(    __u64, rip                       )
 784		__field(    __u32, csbase                    )
 785		__field(    __u8,  len                       )
 786		__array(    __u8,  insn,    15	             )
 787		__field(    __u8,  flags       	   	     )
 788		__field(    __u8,  failed                    )
 789		),
 790
 791	TP_fast_assign(
 792		__entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS);
 793		__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
 794			       - vcpu->arch.emulate_ctxt->fetch.data;
 795		__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
 796		memcpy(__entry->insn,
 797		       vcpu->arch.emulate_ctxt->fetch.data,
 798		       15);
 799		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode);
 800		__entry->failed = failed;
 801		),
 802
 803	TP_printk("%x:%llx:%s (%s)%s",
 804		  __entry->csbase, __entry->rip,
 805		  __print_hex(__entry->insn, __entry->len),
 806		  __print_symbolic(__entry->flags,
 807				   kvm_trace_symbol_emul_flags),
 808		  __entry->failed ? " failed" : ""
 809		)
 810	);
 811
 812#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
 813#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
 814
 815TRACE_EVENT(
 816	vcpu_match_mmio,
 817	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
 818	TP_ARGS(gva, gpa, write, gpa_match),
 819
 820	TP_STRUCT__entry(
 821		__field(gva_t, gva)
 822		__field(gpa_t, gpa)
 823		__field(bool, write)
 824		__field(bool, gpa_match)
 825		),
 826
 827	TP_fast_assign(
 828		__entry->gva = gva;
 829		__entry->gpa = gpa;
 830		__entry->write = write;
 831		__entry->gpa_match = gpa_match
 832		),
 833
 834	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
 835		  __entry->write ? "Write" : "Read",
 836		  __entry->gpa_match ? "GPA" : "GVA")
 837);
 838
 839TRACE_EVENT(kvm_write_tsc_offset,
 840	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
 841		 __u64 next_tsc_offset),
 842	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
 843
 844	TP_STRUCT__entry(
 845		__field( unsigned int,	vcpu_id				)
 846		__field(	__u64,	previous_tsc_offset		)
 847		__field(	__u64,	next_tsc_offset			)
 848	),
 849
 850	TP_fast_assign(
 851		__entry->vcpu_id		= vcpu_id;
 852		__entry->previous_tsc_offset	= previous_tsc_offset;
 853		__entry->next_tsc_offset	= next_tsc_offset;
 854	),
 855
 856	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
 857		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
 858);
 859
 860#ifdef CONFIG_X86_64
 861
 862#define host_clocks					\
 863	{VDSO_CLOCKMODE_NONE, "none"},			\
 864	{VDSO_CLOCKMODE_TSC,  "tsc"}			\
 
 865
 866TRACE_EVENT(kvm_update_master_clock,
 867	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
 868	TP_ARGS(use_master_clock, host_clock, offset_matched),
 869
 870	TP_STRUCT__entry(
 871		__field(		bool,	use_master_clock	)
 872		__field(	unsigned int,	host_clock		)
 873		__field(		bool,	offset_matched		)
 874	),
 875
 876	TP_fast_assign(
 877		__entry->use_master_clock	= use_master_clock;
 878		__entry->host_clock		= host_clock;
 879		__entry->offset_matched		= offset_matched;
 880	),
 881
 882	TP_printk("masterclock %d hostclock %s offsetmatched %u",
 883		  __entry->use_master_clock,
 884		  __print_symbolic(__entry->host_clock, host_clocks),
 885		  __entry->offset_matched)
 886);
 887
 888TRACE_EVENT(kvm_track_tsc,
 889	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
 890		 unsigned int online_vcpus, bool use_master_clock,
 891		 unsigned int host_clock),
 892	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
 893		host_clock),
 894
 895	TP_STRUCT__entry(
 896		__field(	unsigned int,	vcpu_id			)
 897		__field(	unsigned int,	nr_vcpus_matched_tsc	)
 898		__field(	unsigned int,	online_vcpus		)
 899		__field(	bool,		use_master_clock	)
 900		__field(	unsigned int,	host_clock		)
 901	),
 902
 903	TP_fast_assign(
 904		__entry->vcpu_id		= vcpu_id;
 905		__entry->nr_vcpus_matched_tsc	= nr_matched;
 906		__entry->online_vcpus		= online_vcpus;
 907		__entry->use_master_clock	= use_master_clock;
 908		__entry->host_clock		= host_clock;
 909	),
 910
 911	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
 912		  " hostclock %s",
 913		  __entry->vcpu_id, __entry->use_master_clock,
 914		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
 915		  __print_symbolic(__entry->host_clock, host_clocks))
 916);
 917
 918#endif /* CONFIG_X86_64 */
 919
 920/*
 921 * Tracepoint for PML full VMEXIT.
 922 */
 923TRACE_EVENT(kvm_pml_full,
 924	TP_PROTO(unsigned int vcpu_id),
 925	TP_ARGS(vcpu_id),
 926
 927	TP_STRUCT__entry(
 928		__field(	unsigned int,	vcpu_id			)
 929	),
 930
 931	TP_fast_assign(
 932		__entry->vcpu_id		= vcpu_id;
 933	),
 934
 935	TP_printk("vcpu %d: PML full", __entry->vcpu_id)
 936);
 937
 938TRACE_EVENT(kvm_ple_window_update,
 939	TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old),
 940	TP_ARGS(vcpu_id, new, old),
 941
 942	TP_STRUCT__entry(
 943		__field(        unsigned int,   vcpu_id         )
 944		__field(        unsigned int,       new         )
 945		__field(        unsigned int,       old         )
 946	),
 947
 948	TP_fast_assign(
 949		__entry->vcpu_id        = vcpu_id;
 950		__entry->new            = new;
 951		__entry->old            = old;
 952	),
 953
 954	TP_printk("vcpu %u old %u new %u (%s)",
 955	          __entry->vcpu_id, __entry->old, __entry->new,
 956		  __entry->old < __entry->new ? "growed" : "shrinked")
 957);
 958
 959TRACE_EVENT(kvm_pvclock_update,
 960	TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
 961	TP_ARGS(vcpu_id, pvclock),
 962
 963	TP_STRUCT__entry(
 964		__field(	unsigned int,	vcpu_id			)
 965		__field(	__u32,		version			)
 966		__field(	__u64,		tsc_timestamp		)
 967		__field(	__u64,		system_time		)
 968		__field(	__u32,		tsc_to_system_mul	)
 969		__field(	__s8,		tsc_shift		)
 970		__field(	__u8,		flags			)
 971	),
 972
 973	TP_fast_assign(
 974		__entry->vcpu_id	   = vcpu_id;
 975		__entry->version	   = pvclock->version;
 976		__entry->tsc_timestamp	   = pvclock->tsc_timestamp;
 977		__entry->system_time	   = pvclock->system_time;
 978		__entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
 979		__entry->tsc_shift	   = pvclock->tsc_shift;
 980		__entry->flags		   = pvclock->flags;
 981	),
 982
 983	TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
 984		  "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
 985		  "flags 0x%x }",
 986		  __entry->vcpu_id,
 987		  __entry->version,
 988		  __entry->tsc_timestamp,
 989		  __entry->system_time,
 990		  __entry->tsc_to_system_mul,
 991		  __entry->tsc_shift,
 992		  __entry->flags)
 993);
 994
 995TRACE_EVENT(kvm_wait_lapic_expire,
 996	TP_PROTO(unsigned int vcpu_id, s64 delta),
 997	TP_ARGS(vcpu_id, delta),
 998
 999	TP_STRUCT__entry(
1000		__field(	unsigned int,	vcpu_id		)
1001		__field(	s64,		delta		)
1002	),
1003
1004	TP_fast_assign(
1005		__entry->vcpu_id	   = vcpu_id;
1006		__entry->delta             = delta;
1007	),
1008
1009	TP_printk("vcpu %u: delta %lld (%s)",
1010		  __entry->vcpu_id,
1011		  __entry->delta,
1012		  __entry->delta < 0 ? "early" : "late")
1013);
1014
1015TRACE_EVENT(kvm_smm_transition,
1016	TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
1017	TP_ARGS(vcpu_id, smbase, entering),
1018
1019	TP_STRUCT__entry(
1020		__field(	unsigned int,	vcpu_id		)
1021		__field(	u64,		smbase		)
1022		__field(	bool,		entering	)
1023	),
1024
1025	TP_fast_assign(
1026		__entry->vcpu_id	= vcpu_id;
1027		__entry->smbase		= smbase;
1028		__entry->entering	= entering;
1029	),
1030
1031	TP_printk("vcpu %u: %s SMM, smbase 0x%llx",
1032		  __entry->vcpu_id,
1033		  __entry->entering ? "entering" : "leaving",
1034		  __entry->smbase)
1035);
1036
1037/*
1038 * Tracepoint for VT-d posted-interrupts.
1039 */
1040TRACE_EVENT(kvm_pi_irte_update,
1041	TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
1042		 unsigned int gsi, unsigned int gvec,
1043		 u64 pi_desc_addr, bool set),
1044	TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
1045
1046	TP_STRUCT__entry(
1047		__field(	unsigned int,	host_irq	)
1048		__field(	unsigned int,	vcpu_id		)
1049		__field(	unsigned int,	gsi		)
1050		__field(	unsigned int,	gvec		)
1051		__field(	u64,		pi_desc_addr	)
1052		__field(	bool,		set		)
1053	),
1054
1055	TP_fast_assign(
1056		__entry->host_irq	= host_irq;
1057		__entry->vcpu_id	= vcpu_id;
1058		__entry->gsi		= gsi;
1059		__entry->gvec		= gvec;
1060		__entry->pi_desc_addr	= pi_desc_addr;
1061		__entry->set		= set;
1062	),
1063
1064	TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
1065		  "gvec: 0x%x, pi_desc_addr: 0x%llx",
1066		  __entry->set ? "enabled and being updated" : "disabled",
1067		  __entry->host_irq,
1068		  __entry->vcpu_id,
1069		  __entry->gsi,
1070		  __entry->gvec,
1071		  __entry->pi_desc_addr)
1072);
1073
1074/*
1075 * Tracepoint for kvm_hv_notify_acked_sint.
1076 */
1077TRACE_EVENT(kvm_hv_notify_acked_sint,
1078	TP_PROTO(int vcpu_id, u32 sint),
1079	TP_ARGS(vcpu_id, sint),
1080
1081	TP_STRUCT__entry(
1082		__field(int, vcpu_id)
1083		__field(u32, sint)
1084	),
1085
1086	TP_fast_assign(
1087		__entry->vcpu_id = vcpu_id;
1088		__entry->sint = sint;
1089	),
1090
1091	TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint)
1092);
1093
1094/*
1095 * Tracepoint for synic_set_irq.
1096 */
1097TRACE_EVENT(kvm_hv_synic_set_irq,
1098	TP_PROTO(int vcpu_id, u32 sint, int vector, int ret),
1099	TP_ARGS(vcpu_id, sint, vector, ret),
1100
1101	TP_STRUCT__entry(
1102		__field(int, vcpu_id)
1103		__field(u32, sint)
1104		__field(int, vector)
1105		__field(int, ret)
1106	),
1107
1108	TP_fast_assign(
1109		__entry->vcpu_id = vcpu_id;
1110		__entry->sint = sint;
1111		__entry->vector = vector;
1112		__entry->ret = ret;
1113	),
1114
1115	TP_printk("vcpu_id %d sint %u vector %d ret %d",
1116		  __entry->vcpu_id, __entry->sint, __entry->vector,
1117		  __entry->ret)
1118);
1119
1120/*
1121 * Tracepoint for kvm_hv_synic_send_eoi.
1122 */
1123TRACE_EVENT(kvm_hv_synic_send_eoi,
1124	TP_PROTO(int vcpu_id, int vector),
1125	TP_ARGS(vcpu_id, vector),
1126
1127	TP_STRUCT__entry(
1128		__field(int, vcpu_id)
1129		__field(u32, sint)
1130		__field(int, vector)
1131		__field(int, ret)
1132	),
1133
1134	TP_fast_assign(
1135		__entry->vcpu_id = vcpu_id;
1136		__entry->vector	= vector;
1137	),
1138
1139	TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector)
1140);
1141
1142/*
1143 * Tracepoint for synic_set_msr.
1144 */
1145TRACE_EVENT(kvm_hv_synic_set_msr,
1146	TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host),
1147	TP_ARGS(vcpu_id, msr, data, host),
1148
1149	TP_STRUCT__entry(
1150		__field(int, vcpu_id)
1151		__field(u32, msr)
1152		__field(u64, data)
1153		__field(bool, host)
1154	),
1155
1156	TP_fast_assign(
1157		__entry->vcpu_id = vcpu_id;
1158		__entry->msr = msr;
1159		__entry->data = data;
1160		__entry->host = host
1161	),
1162
1163	TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d",
1164		  __entry->vcpu_id, __entry->msr, __entry->data, __entry->host)
1165);
1166
1167/*
1168 * Tracepoint for stimer_set_config.
1169 */
1170TRACE_EVENT(kvm_hv_stimer_set_config,
1171	TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host),
1172	TP_ARGS(vcpu_id, timer_index, config, host),
1173
1174	TP_STRUCT__entry(
1175		__field(int, vcpu_id)
1176		__field(int, timer_index)
1177		__field(u64, config)
1178		__field(bool, host)
1179	),
1180
1181	TP_fast_assign(
1182		__entry->vcpu_id = vcpu_id;
1183		__entry->timer_index = timer_index;
1184		__entry->config = config;
1185		__entry->host = host;
1186	),
1187
1188	TP_printk("vcpu_id %d timer %d config 0x%llx host %d",
1189		  __entry->vcpu_id, __entry->timer_index, __entry->config,
1190		  __entry->host)
1191);
1192
1193/*
1194 * Tracepoint for stimer_set_count.
1195 */
1196TRACE_EVENT(kvm_hv_stimer_set_count,
1197	TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host),
1198	TP_ARGS(vcpu_id, timer_index, count, host),
1199
1200	TP_STRUCT__entry(
1201		__field(int, vcpu_id)
1202		__field(int, timer_index)
1203		__field(u64, count)
1204		__field(bool, host)
1205	),
1206
1207	TP_fast_assign(
1208		__entry->vcpu_id = vcpu_id;
1209		__entry->timer_index = timer_index;
1210		__entry->count = count;
1211		__entry->host = host;
1212	),
1213
1214	TP_printk("vcpu_id %d timer %d count %llu host %d",
1215		  __entry->vcpu_id, __entry->timer_index, __entry->count,
1216		  __entry->host)
1217);
1218
1219/*
1220 * Tracepoint for stimer_start(periodic timer case).
1221 */
1222TRACE_EVENT(kvm_hv_stimer_start_periodic,
1223	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time),
1224	TP_ARGS(vcpu_id, timer_index, time_now, exp_time),
1225
1226	TP_STRUCT__entry(
1227		__field(int, vcpu_id)
1228		__field(int, timer_index)
1229		__field(u64, time_now)
1230		__field(u64, exp_time)
1231	),
1232
1233	TP_fast_assign(
1234		__entry->vcpu_id = vcpu_id;
1235		__entry->timer_index = timer_index;
1236		__entry->time_now = time_now;
1237		__entry->exp_time = exp_time;
1238	),
1239
1240	TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu",
1241		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1242		  __entry->exp_time)
1243);
1244
1245/*
1246 * Tracepoint for stimer_start(one-shot timer case).
1247 */
1248TRACE_EVENT(kvm_hv_stimer_start_one_shot,
1249	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count),
1250	TP_ARGS(vcpu_id, timer_index, time_now, count),
1251
1252	TP_STRUCT__entry(
1253		__field(int, vcpu_id)
1254		__field(int, timer_index)
1255		__field(u64, time_now)
1256		__field(u64, count)
1257	),
1258
1259	TP_fast_assign(
1260		__entry->vcpu_id = vcpu_id;
1261		__entry->timer_index = timer_index;
1262		__entry->time_now = time_now;
1263		__entry->count = count;
1264	),
1265
1266	TP_printk("vcpu_id %d timer %d time_now %llu count %llu",
1267		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1268		  __entry->count)
1269);
1270
1271/*
1272 * Tracepoint for stimer_timer_callback.
1273 */
1274TRACE_EVENT(kvm_hv_stimer_callback,
1275	TP_PROTO(int vcpu_id, int timer_index),
1276	TP_ARGS(vcpu_id, timer_index),
1277
1278	TP_STRUCT__entry(
1279		__field(int, vcpu_id)
1280		__field(int, timer_index)
1281	),
1282
1283	TP_fast_assign(
1284		__entry->vcpu_id = vcpu_id;
1285		__entry->timer_index = timer_index;
1286	),
1287
1288	TP_printk("vcpu_id %d timer %d",
1289		  __entry->vcpu_id, __entry->timer_index)
1290);
1291
1292/*
1293 * Tracepoint for stimer_expiration.
1294 */
1295TRACE_EVENT(kvm_hv_stimer_expiration,
1296	TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result),
1297	TP_ARGS(vcpu_id, timer_index, direct, msg_send_result),
1298
1299	TP_STRUCT__entry(
1300		__field(int, vcpu_id)
1301		__field(int, timer_index)
1302		__field(int, direct)
1303		__field(int, msg_send_result)
1304	),
1305
1306	TP_fast_assign(
1307		__entry->vcpu_id = vcpu_id;
1308		__entry->timer_index = timer_index;
1309		__entry->direct = direct;
1310		__entry->msg_send_result = msg_send_result;
1311	),
1312
1313	TP_printk("vcpu_id %d timer %d direct %d send result %d",
1314		  __entry->vcpu_id, __entry->timer_index,
1315		  __entry->direct, __entry->msg_send_result)
1316);
1317
1318/*
1319 * Tracepoint for stimer_cleanup.
1320 */
1321TRACE_EVENT(kvm_hv_stimer_cleanup,
1322	TP_PROTO(int vcpu_id, int timer_index),
1323	TP_ARGS(vcpu_id, timer_index),
1324
1325	TP_STRUCT__entry(
1326		__field(int, vcpu_id)
1327		__field(int, timer_index)
1328	),
1329
1330	TP_fast_assign(
1331		__entry->vcpu_id = vcpu_id;
1332		__entry->timer_index = timer_index;
1333	),
1334
1335	TP_printk("vcpu_id %d timer %d",
1336		  __entry->vcpu_id, __entry->timer_index)
1337);
1338
1339TRACE_EVENT(kvm_apicv_update_request,
1340	    TP_PROTO(bool activate, unsigned long bit),
1341	    TP_ARGS(activate, bit),
1342
1343	TP_STRUCT__entry(
1344		__field(bool, activate)
1345		__field(unsigned long, bit)
1346	),
1347
1348	TP_fast_assign(
1349		__entry->activate = activate;
1350		__entry->bit = bit;
1351	),
1352
1353	TP_printk("%s bit=%lu",
1354		  __entry->activate ? "activate" : "deactivate",
1355		  __entry->bit)
1356);
1357
1358/*
1359 * Tracepoint for AMD AVIC
1360 */
1361TRACE_EVENT(kvm_avic_incomplete_ipi,
1362	    TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
1363	    TP_ARGS(vcpu, icrh, icrl, id, index),
1364
1365	TP_STRUCT__entry(
1366		__field(u32, vcpu)
1367		__field(u32, icrh)
1368		__field(u32, icrl)
1369		__field(u32, id)
1370		__field(u32, index)
1371	),
1372
1373	TP_fast_assign(
1374		__entry->vcpu = vcpu;
1375		__entry->icrh = icrh;
1376		__entry->icrl = icrl;
1377		__entry->id = id;
1378		__entry->index = index;
1379	),
1380
1381	TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u",
1382		  __entry->vcpu, __entry->icrh, __entry->icrl,
1383		  __entry->id, __entry->index)
1384);
1385
1386TRACE_EVENT(kvm_avic_unaccelerated_access,
1387	    TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
1388	    TP_ARGS(vcpu, offset, ft, rw, vec),
1389
1390	TP_STRUCT__entry(
1391		__field(u32, vcpu)
1392		__field(u32, offset)
1393		__field(bool, ft)
1394		__field(bool, rw)
1395		__field(u32, vec)
1396	),
1397
1398	TP_fast_assign(
1399		__entry->vcpu = vcpu;
1400		__entry->offset = offset;
1401		__entry->ft = ft;
1402		__entry->rw = rw;
1403		__entry->vec = vec;
1404	),
1405
1406	TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x",
1407		  __entry->vcpu,
1408		  __entry->offset,
1409		  __print_symbolic(__entry->offset, kvm_trace_symbol_apic),
1410		  __entry->ft ? "trap" : "fault",
1411		  __entry->rw ? "write" : "read",
1412		  __entry->vec)
1413);
1414
1415TRACE_EVENT(kvm_avic_ga_log,
1416	    TP_PROTO(u32 vmid, u32 vcpuid),
1417	    TP_ARGS(vmid, vcpuid),
1418
1419	TP_STRUCT__entry(
1420		__field(u32, vmid)
1421		__field(u32, vcpuid)
1422	),
1423
1424	TP_fast_assign(
1425		__entry->vmid = vmid;
1426		__entry->vcpuid = vcpuid;
1427	),
1428
1429	TP_printk("vmid=%u, vcpuid=%u",
1430		  __entry->vmid, __entry->vcpuid)
1431);
1432
1433TRACE_EVENT(kvm_hv_timer_state,
1434		TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
1435		TP_ARGS(vcpu_id, hv_timer_in_use),
1436		TP_STRUCT__entry(
1437			__field(unsigned int, vcpu_id)
1438			__field(unsigned int, hv_timer_in_use)
1439			),
1440		TP_fast_assign(
1441			__entry->vcpu_id = vcpu_id;
1442			__entry->hv_timer_in_use = hv_timer_in_use;
1443			),
1444		TP_printk("vcpu_id %x hv_timer %x",
1445			__entry->vcpu_id,
1446			__entry->hv_timer_in_use)
1447);
1448
1449/*
1450 * Tracepoint for kvm_hv_flush_tlb.
1451 */
1452TRACE_EVENT(kvm_hv_flush_tlb,
1453	TP_PROTO(u64 processor_mask, u64 address_space, u64 flags),
1454	TP_ARGS(processor_mask, address_space, flags),
1455
1456	TP_STRUCT__entry(
1457		__field(u64, processor_mask)
1458		__field(u64, address_space)
1459		__field(u64, flags)
1460	),
1461
1462	TP_fast_assign(
1463		__entry->processor_mask = processor_mask;
1464		__entry->address_space = address_space;
1465		__entry->flags = flags;
1466	),
1467
1468	TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx",
1469		  __entry->processor_mask, __entry->address_space,
1470		  __entry->flags)
1471);
1472
1473/*
1474 * Tracepoint for kvm_hv_flush_tlb_ex.
1475 */
1476TRACE_EVENT(kvm_hv_flush_tlb_ex,
1477	TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags),
1478	TP_ARGS(valid_bank_mask, format, address_space, flags),
1479
1480	TP_STRUCT__entry(
1481		__field(u64, valid_bank_mask)
1482		__field(u64, format)
1483		__field(u64, address_space)
1484		__field(u64, flags)
1485	),
1486
1487	TP_fast_assign(
1488		__entry->valid_bank_mask = valid_bank_mask;
1489		__entry->format = format;
1490		__entry->address_space = address_space;
1491		__entry->flags = flags;
1492	),
1493
1494	TP_printk("valid_bank_mask 0x%llx format 0x%llx "
1495		  "address_space 0x%llx flags 0x%llx",
1496		  __entry->valid_bank_mask, __entry->format,
1497		  __entry->address_space, __entry->flags)
1498);
1499
1500/*
1501 * Tracepoints for kvm_hv_send_ipi.
1502 */
1503TRACE_EVENT(kvm_hv_send_ipi,
1504	TP_PROTO(u32 vector, u64 processor_mask),
1505	TP_ARGS(vector, processor_mask),
1506
1507	TP_STRUCT__entry(
1508		__field(u32, vector)
1509		__field(u64, processor_mask)
1510	),
1511
1512	TP_fast_assign(
1513		__entry->vector = vector;
1514		__entry->processor_mask = processor_mask;
1515	),
1516
1517	TP_printk("vector %x processor_mask 0x%llx",
1518		  __entry->vector, __entry->processor_mask)
1519);
1520
1521TRACE_EVENT(kvm_hv_send_ipi_ex,
1522	TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
1523	TP_ARGS(vector, format, valid_bank_mask),
1524
1525	TP_STRUCT__entry(
1526		__field(u32, vector)
1527		__field(u64, format)
1528		__field(u64, valid_bank_mask)
1529	),
1530
1531	TP_fast_assign(
1532		__entry->vector = vector;
1533		__entry->format = format;
1534		__entry->valid_bank_mask = valid_bank_mask;
1535	),
1536
1537	TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
1538		  __entry->vector, __entry->format,
1539		  __entry->valid_bank_mask)
1540);
1541
1542TRACE_EVENT(kvm_pv_tlb_flush,
1543	TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb),
1544	TP_ARGS(vcpu_id, need_flush_tlb),
1545
1546	TP_STRUCT__entry(
1547		__field(	unsigned int,	vcpu_id		)
1548		__field(	bool,	need_flush_tlb		)
1549	),
1550
1551	TP_fast_assign(
1552		__entry->vcpu_id	= vcpu_id;
1553		__entry->need_flush_tlb = need_flush_tlb;
1554	),
1555
1556	TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id,
1557		__entry->need_flush_tlb ? "true" : "false")
1558);
1559
1560/*
1561 * Tracepoint for failed nested VMX VM-Enter.
1562 */
1563TRACE_EVENT(kvm_nested_vmenter_failed,
1564	TP_PROTO(const char *msg, u32 err),
1565	TP_ARGS(msg, err),
1566
1567	TP_STRUCT__entry(
1568		__string(msg, msg)
1569		__field(u32, err)
1570	),
1571
1572	TP_fast_assign(
1573		__assign_str(msg, msg);
1574		__entry->err = err;
1575	),
1576
1577	TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
1578		__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
1579);
1580
1581/*
1582 * Tracepoint for syndbg_set_msr.
1583 */
1584TRACE_EVENT(kvm_hv_syndbg_set_msr,
1585	TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
1586	TP_ARGS(vcpu_id, vp_index, msr, data),
1587
1588	TP_STRUCT__entry(
1589		__field(int, vcpu_id)
1590		__field(u32, vp_index)
1591		__field(u32, msr)
1592		__field(u64, data)
1593	),
1594
1595	TP_fast_assign(
1596		__entry->vcpu_id = vcpu_id;
1597		__entry->vp_index = vp_index;
1598		__entry->msr = msr;
1599		__entry->data = data;
1600	),
1601
1602	TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
1603		  __entry->vcpu_id, __entry->vp_index, __entry->msr,
1604		  __entry->data)
1605);
1606
1607/*
1608 * Tracepoint for syndbg_get_msr.
1609 */
1610TRACE_EVENT(kvm_hv_syndbg_get_msr,
1611	TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
1612	TP_ARGS(vcpu_id, vp_index, msr, data),
1613
1614	TP_STRUCT__entry(
1615		__field(int, vcpu_id)
1616		__field(u32, vp_index)
1617		__field(u32, msr)
1618		__field(u64, data)
1619	),
1620
1621	TP_fast_assign(
1622		__entry->vcpu_id = vcpu_id;
1623		__entry->vp_index = vp_index;
1624		__entry->msr = msr;
1625		__entry->data = data;
1626	),
1627
1628	TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
1629		  __entry->vcpu_id, __entry->vp_index, __entry->msr,
1630		  __entry->data)
1631);
1632
1633/*
1634 * Tracepoint for the start of VMGEXIT processing
1635 */
1636TRACE_EVENT(kvm_vmgexit_enter,
1637	TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
1638	TP_ARGS(vcpu_id, ghcb),
1639
1640	TP_STRUCT__entry(
1641		__field(unsigned int, vcpu_id)
1642		__field(u64, exit_reason)
1643		__field(u64, info1)
1644		__field(u64, info2)
1645	),
1646
1647	TP_fast_assign(
1648		__entry->vcpu_id     = vcpu_id;
1649		__entry->exit_reason = ghcb->save.sw_exit_code;
1650		__entry->info1       = ghcb->save.sw_exit_info_1;
1651		__entry->info2       = ghcb->save.sw_exit_info_2;
1652	),
1653
1654	TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
1655		  __entry->vcpu_id, __entry->exit_reason,
1656		  __entry->info1, __entry->info2)
1657);
1658
1659/*
1660 * Tracepoint for the end of VMGEXIT processing
1661 */
1662TRACE_EVENT(kvm_vmgexit_exit,
1663	TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
1664	TP_ARGS(vcpu_id, ghcb),
1665
1666	TP_STRUCT__entry(
1667		__field(unsigned int, vcpu_id)
1668		__field(u64, exit_reason)
1669		__field(u64, info1)
1670		__field(u64, info2)
1671	),
1672
1673	TP_fast_assign(
1674		__entry->vcpu_id     = vcpu_id;
1675		__entry->exit_reason = ghcb->save.sw_exit_code;
1676		__entry->info1       = ghcb->save.sw_exit_info_1;
1677		__entry->info2       = ghcb->save.sw_exit_info_2;
1678	),
1679
1680	TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
1681		  __entry->vcpu_id, __entry->exit_reason,
1682		  __entry->info1, __entry->info2)
1683);
1684
1685/*
1686 * Tracepoint for the start of VMGEXIT MSR procotol processing
1687 */
1688TRACE_EVENT(kvm_vmgexit_msr_protocol_enter,
1689	TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa),
1690	TP_ARGS(vcpu_id, ghcb_gpa),
1691
1692	TP_STRUCT__entry(
1693		__field(unsigned int, vcpu_id)
1694		__field(u64, ghcb_gpa)
1695	),
1696
1697	TP_fast_assign(
1698		__entry->vcpu_id  = vcpu_id;
1699		__entry->ghcb_gpa = ghcb_gpa;
1700	),
1701
1702	TP_printk("vcpu %u, ghcb_gpa %016llx",
1703		  __entry->vcpu_id, __entry->ghcb_gpa)
1704);
1705
1706/*
1707 * Tracepoint for the end of VMGEXIT MSR procotol processing
1708 */
1709TRACE_EVENT(kvm_vmgexit_msr_protocol_exit,
1710	TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result),
1711	TP_ARGS(vcpu_id, ghcb_gpa, result),
1712
1713	TP_STRUCT__entry(
1714		__field(unsigned int, vcpu_id)
1715		__field(u64, ghcb_gpa)
1716		__field(int, result)
1717	),
1718
1719	TP_fast_assign(
1720		__entry->vcpu_id  = vcpu_id;
1721		__entry->ghcb_gpa = ghcb_gpa;
1722		__entry->result   = result;
1723	),
1724
1725	TP_printk("vcpu %u, ghcb_gpa %016llx, result %d",
1726		  __entry->vcpu_id, __entry->ghcb_gpa, __entry->result)
1727);
1728
1729#endif /* _TRACE_KVM_H */
1730
1731#undef TRACE_INCLUDE_PATH
1732#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
1733#undef TRACE_INCLUDE_FILE
1734#define TRACE_INCLUDE_FILE trace
1735
1736/* This part must be outside protection */
1737#include <trace/define_trace.h>