Linux Audio

Check our new training course

Loading...
v3.1
 
  1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  2#define _TRACE_KVM_H
  3
  4#include <linux/tracepoint.h>
 
 
 
 
  5
  6#undef TRACE_SYSTEM
  7#define TRACE_SYSTEM kvm
  8
  9/*
 10 * Tracepoint for guest mode entry.
 11 */
 12TRACE_EVENT(kvm_entry,
 13	TP_PROTO(unsigned int vcpu_id),
 14	TP_ARGS(vcpu_id),
 15
 16	TP_STRUCT__entry(
 17		__field(	unsigned int,	vcpu_id		)
 18	),
 19
 20	TP_fast_assign(
 21		__entry->vcpu_id	= vcpu_id;
 22	),
 23
 24	TP_printk("vcpu %u", __entry->vcpu_id)
 25);
 26
 27/*
 28 * Tracepoint for hypercall.
 29 */
 30TRACE_EVENT(kvm_hypercall,
 31	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
 32		 unsigned long a2, unsigned long a3),
 33	TP_ARGS(nr, a0, a1, a2, a3),
 34
 35	TP_STRUCT__entry(
 36		__field(	unsigned long, 	nr		)
 37		__field(	unsigned long,	a0		)
 38		__field(	unsigned long,	a1		)
 39		__field(	unsigned long,	a2		)
 40		__field(	unsigned long,	a3		)
 41	),
 42
 43	TP_fast_assign(
 44		__entry->nr		= nr;
 45		__entry->a0		= a0;
 46		__entry->a1		= a1;
 47		__entry->a2		= a2;
 48		__entry->a3		= a3;
 49	),
 50
 51	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
 52		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
 53		 __entry->a3)
 54);
 55
 56/*
 57 * Tracepoint for hypercall.
 58 */
 59TRACE_EVENT(kvm_hv_hypercall,
 60	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
 61		 __u64 ingpa, __u64 outgpa),
 62	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
 63
 64	TP_STRUCT__entry(
 65		__field(	__u16,		rep_cnt		)
 66		__field(	__u16,		rep_idx		)
 67		__field(	__u64,		ingpa		)
 68		__field(	__u64,		outgpa		)
 69		__field(	__u16, 		code		)
 70		__field(	bool,		fast		)
 71	),
 72
 73	TP_fast_assign(
 74		__entry->rep_cnt	= rep_cnt;
 75		__entry->rep_idx	= rep_idx;
 76		__entry->ingpa		= ingpa;
 77		__entry->outgpa		= outgpa;
 78		__entry->code		= code;
 79		__entry->fast		= fast;
 80	),
 81
 82	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
 83		  __entry->code, __entry->fast ? "fast" : "slow",
 84		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
 85		  __entry->outgpa)
 86);
 87
 88/*
 89 * Tracepoint for PIO.
 90 */
 
 
 
 
 91TRACE_EVENT(kvm_pio,
 92	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 93		 unsigned int count),
 94	TP_ARGS(rw, port, size, count),
 95
 96	TP_STRUCT__entry(
 97		__field(	unsigned int, 	rw		)
 98		__field(	unsigned int, 	port		)
 99		__field(	unsigned int, 	size		)
100		__field(	unsigned int,	count		)
 
101	),
102
103	TP_fast_assign(
104		__entry->rw		= rw;
105		__entry->port		= port;
106		__entry->size		= size;
107		__entry->count		= count;
 
 
 
 
 
 
108	),
109
110	TP_printk("pio_%s at 0x%x size %d count %d",
111		  __entry->rw ? "write" : "read",
112		  __entry->port, __entry->size, __entry->count)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113);
114
115/*
116 * Tracepoint for cpuid.
117 */
118TRACE_EVENT(kvm_cpuid,
119	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
120		 unsigned long rcx, unsigned long rdx),
121	TP_ARGS(function, rax, rbx, rcx, rdx),
122
123	TP_STRUCT__entry(
124		__field(	unsigned int,	function	)
125		__field(	unsigned long,	rax		)
126		__field(	unsigned long,	rbx		)
127		__field(	unsigned long,	rcx		)
128		__field(	unsigned long,	rdx		)
 
129	),
130
131	TP_fast_assign(
132		__entry->function	= function;
133		__entry->rax		= rax;
134		__entry->rbx		= rbx;
135		__entry->rcx		= rcx;
136		__entry->rdx		= rdx;
 
137	),
138
139	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
140		  __entry->function, __entry->rax,
141		  __entry->rbx, __entry->rcx, __entry->rdx)
 
142);
143
144#define AREG(x) { APIC_##x, "APIC_" #x }
145
146#define kvm_trace_symbol_apic						    \
147	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
148	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
149	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
150	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
151	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
152	AREG(ECTRL)
153/*
154 * Tracepoint for apic access.
155 */
156TRACE_EVENT(kvm_apic,
157	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
158	TP_ARGS(rw, reg, val),
159
160	TP_STRUCT__entry(
161		__field(	unsigned int,	rw		)
162		__field(	unsigned int,	reg		)
163		__field(	unsigned int,	val		)
164	),
165
166	TP_fast_assign(
167		__entry->rw		= rw;
168		__entry->reg		= reg;
169		__entry->val		= val;
170	),
171
172	TP_printk("apic_%s %s = 0x%x",
173		  __entry->rw ? "write" : "read",
174		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
175		  __entry->val)
176);
177
178#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
179#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
180
181#define KVM_ISA_VMX   1
182#define KVM_ISA_SVM   2
183
184/*
185 * Tracepoint for kvm guest exit:
186 */
187TRACE_EVENT(kvm_exit,
188	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
189	TP_ARGS(exit_reason, vcpu, isa),
190
191	TP_STRUCT__entry(
192		__field(	unsigned int,	exit_reason	)
193		__field(	unsigned long,	guest_rip	)
194		__field(	u32,	        isa             )
195		__field(	u64,	        info1           )
196		__field(	u64,	        info2           )
 
197	),
198
199	TP_fast_assign(
200		__entry->exit_reason	= exit_reason;
201		__entry->guest_rip	= kvm_rip_read(vcpu);
202		__entry->isa            = isa;
 
203		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
204					   &__entry->info2);
205	),
206
207	TP_printk("reason %s rip 0x%lx info %llx %llx",
208		 ftrace_print_symbols_seq(p, __entry->exit_reason,
209					  kvm_x86_ops->exit_reasons_str),
 
 
210		 __entry->guest_rip, __entry->info1, __entry->info2)
211);
212
213/*
214 * Tracepoint for kvm interrupt injection:
215 */
216TRACE_EVENT(kvm_inj_virq,
217	TP_PROTO(unsigned int irq),
218	TP_ARGS(irq),
219
220	TP_STRUCT__entry(
221		__field(	unsigned int,	irq		)
222	),
223
224	TP_fast_assign(
225		__entry->irq		= irq;
226	),
227
228	TP_printk("irq %u", __entry->irq)
229);
230
231#define EXS(x) { x##_VECTOR, "#" #x }
232
233#define kvm_trace_sym_exc						\
234	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
235	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
236	EXS(MF), EXS(MC)
237
238/*
239 * Tracepoint for kvm interrupt injection:
240 */
241TRACE_EVENT(kvm_inj_exception,
242	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
243	TP_ARGS(exception, has_error, error_code),
244
245	TP_STRUCT__entry(
246		__field(	u8,	exception	)
247		__field(	u8,	has_error	)
248		__field(	u32,	error_code	)
249	),
250
251	TP_fast_assign(
252		__entry->exception	= exception;
253		__entry->has_error	= has_error;
254		__entry->error_code	= error_code;
255	),
256
257	TP_printk("%s (0x%x)",
258		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
259		  /* FIXME: don't print error_code if not present */
260		  __entry->has_error ? __entry->error_code : 0)
261);
262
263/*
264 * Tracepoint for page fault.
265 */
266TRACE_EVENT(kvm_page_fault,
267	TP_PROTO(unsigned long fault_address, unsigned int error_code),
268	TP_ARGS(fault_address, error_code),
269
270	TP_STRUCT__entry(
271		__field(	unsigned long,	fault_address	)
272		__field(	unsigned int,	error_code	)
273	),
274
275	TP_fast_assign(
276		__entry->fault_address	= fault_address;
277		__entry->error_code	= error_code;
278	),
279
280	TP_printk("address %lx error_code %x",
281		  __entry->fault_address, __entry->error_code)
282);
283
284/*
285 * Tracepoint for guest MSR access.
286 */
287TRACE_EVENT(kvm_msr,
288	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
289	TP_ARGS(write, ecx, data, exception),
290
291	TP_STRUCT__entry(
292		__field(	unsigned,	write		)
293		__field(	u32,		ecx		)
294		__field(	u64,		data		)
295		__field(	u8,		exception	)
296	),
297
298	TP_fast_assign(
299		__entry->write		= write;
300		__entry->ecx		= ecx;
301		__entry->data		= data;
302		__entry->exception	= exception;
303	),
304
305	TP_printk("msr_%s %x = 0x%llx%s",
306		  __entry->write ? "write" : "read",
307		  __entry->ecx, __entry->data,
308		  __entry->exception ? " (#GP)" : "")
309);
310
311#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
312#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
313#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
314#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
315
316/*
317 * Tracepoint for guest CR access.
318 */
319TRACE_EVENT(kvm_cr,
320	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
321	TP_ARGS(rw, cr, val),
322
323	TP_STRUCT__entry(
324		__field(	unsigned int,	rw		)
325		__field(	unsigned int,	cr		)
326		__field(	unsigned long,	val		)
327	),
328
329	TP_fast_assign(
330		__entry->rw		= rw;
331		__entry->cr		= cr;
332		__entry->val		= val;
333	),
334
335	TP_printk("cr_%s %x = 0x%lx",
336		  __entry->rw ? "write" : "read",
337		  __entry->cr, __entry->val)
338);
339
340#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
341#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
342
343TRACE_EVENT(kvm_pic_set_irq,
344	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
345	    TP_ARGS(chip, pin, elcr, imr, coalesced),
346
347	TP_STRUCT__entry(
348		__field(	__u8,		chip		)
349		__field(	__u8,		pin		)
350		__field(	__u8,		elcr		)
351		__field(	__u8,		imr		)
352		__field(	bool,		coalesced	)
353	),
354
355	TP_fast_assign(
356		__entry->chip		= chip;
357		__entry->pin		= pin;
358		__entry->elcr		= elcr;
359		__entry->imr		= imr;
360		__entry->coalesced	= coalesced;
361	),
362
363	TP_printk("chip %u pin %u (%s%s)%s",
364		  __entry->chip, __entry->pin,
365		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
366		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
367		  __entry->coalesced ? " (coalesced)" : "")
368);
369
370#define kvm_apic_dst_shorthand		\
371	{0x0, "dst"},			\
372	{0x1, "self"},			\
373	{0x2, "all"},			\
374	{0x3, "all-but-self"}
375
376TRACE_EVENT(kvm_apic_ipi,
377	    TP_PROTO(__u32 icr_low, __u32 dest_id),
378	    TP_ARGS(icr_low, dest_id),
379
380	TP_STRUCT__entry(
381		__field(	__u32,		icr_low		)
382		__field(	__u32,		dest_id		)
383	),
384
385	TP_fast_assign(
386		__entry->icr_low	= icr_low;
387		__entry->dest_id	= dest_id;
388	),
389
390	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
391		  __entry->dest_id, (u8)__entry->icr_low,
392		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
393				   kvm_deliver_mode),
394		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
395		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
396		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
397		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
398				   kvm_apic_dst_shorthand))
399);
400
401TRACE_EVENT(kvm_apic_accept_irq,
402	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
403	    TP_ARGS(apicid, dm, tm, vec, coalesced),
404
405	TP_STRUCT__entry(
406		__field(	__u32,		apicid		)
407		__field(	__u16,		dm		)
408		__field(	__u8,		tm		)
409		__field(	__u8,		vec		)
410		__field(	bool,		coalesced	)
411	),
412
413	TP_fast_assign(
414		__entry->apicid		= apicid;
415		__entry->dm		= dm;
416		__entry->tm		= tm;
417		__entry->vec		= vec;
418		__entry->coalesced	= coalesced;
419	),
420
421	TP_printk("apicid %x vec %u (%s|%s)%s",
422		  __entry->apicid, __entry->vec,
423		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
424		  __entry->tm ? "level" : "edge",
425		  __entry->coalesced ? " (coalesced)" : "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426);
427
428/*
429 * Tracepoint for nested VMRUN
430 */
431TRACE_EVENT(kvm_nested_vmrun,
432	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
433		     __u32 event_inj, bool npt),
434	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
435
436	TP_STRUCT__entry(
437		__field(	__u64,		rip		)
438		__field(	__u64,		vmcb		)
439		__field(	__u64,		nested_rip	)
440		__field(	__u32,		int_ctl		)
441		__field(	__u32,		event_inj	)
442		__field(	bool,		npt		)
443	),
444
445	TP_fast_assign(
446		__entry->rip		= rip;
447		__entry->vmcb		= vmcb;
448		__entry->nested_rip	= nested_rip;
449		__entry->int_ctl	= int_ctl;
450		__entry->event_inj	= event_inj;
451		__entry->npt		= npt;
452	),
453
454	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
455		  "event_inj: 0x%08x npt: %s",
456		__entry->rip, __entry->vmcb, __entry->nested_rip,
457		__entry->int_ctl, __entry->event_inj,
458		__entry->npt ? "on" : "off")
459);
460
461TRACE_EVENT(kvm_nested_intercepts,
462	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
463	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
464
465	TP_STRUCT__entry(
466		__field(	__u16,		cr_read		)
467		__field(	__u16,		cr_write	)
468		__field(	__u32,		exceptions	)
469		__field(	__u64,		intercept	)
470	),
471
472	TP_fast_assign(
473		__entry->cr_read	= cr_read;
474		__entry->cr_write	= cr_write;
475		__entry->exceptions	= exceptions;
476		__entry->intercept	= intercept;
477	),
478
479	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
480		__entry->cr_read, __entry->cr_write, __entry->exceptions,
481		__entry->intercept)
482);
483/*
484 * Tracepoint for #VMEXIT while nested
485 */
486TRACE_EVENT(kvm_nested_vmexit,
487	    TP_PROTO(__u64 rip, __u32 exit_code,
488		     __u64 exit_info1, __u64 exit_info2,
489		     __u32 exit_int_info, __u32 exit_int_info_err),
490	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
491		    exit_int_info, exit_int_info_err),
492
493	TP_STRUCT__entry(
494		__field(	__u64,		rip			)
495		__field(	__u32,		exit_code		)
496		__field(	__u64,		exit_info1		)
497		__field(	__u64,		exit_info2		)
498		__field(	__u32,		exit_int_info		)
499		__field(	__u32,		exit_int_info_err	)
 
500	),
501
502	TP_fast_assign(
503		__entry->rip			= rip;
504		__entry->exit_code		= exit_code;
505		__entry->exit_info1		= exit_info1;
506		__entry->exit_info2		= exit_info2;
507		__entry->exit_int_info		= exit_int_info;
508		__entry->exit_int_info_err	= exit_int_info_err;
 
509	),
510	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
511		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
512		  __entry->rip,
513		  ftrace_print_symbols_seq(p, __entry->exit_code,
514					   kvm_x86_ops->exit_reasons_str),
 
515		  __entry->exit_info1, __entry->exit_info2,
516		  __entry->exit_int_info, __entry->exit_int_info_err)
517);
518
519/*
520 * Tracepoint for #VMEXIT reinjected to the guest
521 */
522TRACE_EVENT(kvm_nested_vmexit_inject,
523	    TP_PROTO(__u32 exit_code,
524		     __u64 exit_info1, __u64 exit_info2,
525		     __u32 exit_int_info, __u32 exit_int_info_err),
526	    TP_ARGS(exit_code, exit_info1, exit_info2,
527		    exit_int_info, exit_int_info_err),
528
529	TP_STRUCT__entry(
530		__field(	__u32,		exit_code		)
531		__field(	__u64,		exit_info1		)
532		__field(	__u64,		exit_info2		)
533		__field(	__u32,		exit_int_info		)
534		__field(	__u32,		exit_int_info_err	)
 
535	),
536
537	TP_fast_assign(
538		__entry->exit_code		= exit_code;
539		__entry->exit_info1		= exit_info1;
540		__entry->exit_info2		= exit_info2;
541		__entry->exit_int_info		= exit_int_info;
542		__entry->exit_int_info_err	= exit_int_info_err;
 
543	),
544
545	TP_printk("reason: %s ext_inf1: 0x%016llx "
546		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
547		  ftrace_print_symbols_seq(p, __entry->exit_code,
548					   kvm_x86_ops->exit_reasons_str),
 
549		__entry->exit_info1, __entry->exit_info2,
550		__entry->exit_int_info, __entry->exit_int_info_err)
551);
552
553/*
554 * Tracepoint for nested #vmexit because of interrupt pending
555 */
556TRACE_EVENT(kvm_nested_intr_vmexit,
557	    TP_PROTO(__u64 rip),
558	    TP_ARGS(rip),
559
560	TP_STRUCT__entry(
561		__field(	__u64,	rip	)
562	),
563
564	TP_fast_assign(
565		__entry->rip	=	rip
566	),
567
568	TP_printk("rip: 0x%016llx", __entry->rip)
569);
570
571/*
572 * Tracepoint for nested #vmexit because of interrupt pending
573 */
574TRACE_EVENT(kvm_invlpga,
575	    TP_PROTO(__u64 rip, int asid, u64 address),
576	    TP_ARGS(rip, asid, address),
577
578	TP_STRUCT__entry(
579		__field(	__u64,	rip	)
580		__field(	int,	asid	)
581		__field(	__u64,	address	)
582	),
583
584	TP_fast_assign(
585		__entry->rip		=	rip;
586		__entry->asid		=	asid;
587		__entry->address	=	address;
588	),
589
590	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
591		  __entry->rip, __entry->asid, __entry->address)
592);
593
594/*
595 * Tracepoint for nested #vmexit because of interrupt pending
596 */
597TRACE_EVENT(kvm_skinit,
598	    TP_PROTO(__u64 rip, __u32 slb),
599	    TP_ARGS(rip, slb),
600
601	TP_STRUCT__entry(
602		__field(	__u64,	rip	)
603		__field(	__u32,	slb	)
604	),
605
606	TP_fast_assign(
607		__entry->rip		=	rip;
608		__entry->slb		=	slb;
609	),
610
611	TP_printk("rip: 0x%016llx slb: 0x%08x",
612		  __entry->rip, __entry->slb)
613);
614
615#define __print_insn(insn, ilen) ({		                 \
616	int i;							 \
617	const char *ret = p->buffer + p->len;			 \
618								 \
619	for (i = 0; i < ilen; ++i)				 \
620		trace_seq_printf(p, " %02x", insn[i]);		 \
621	trace_seq_printf(p, "%c", 0);				 \
622	ret;							 \
623	})
624
625#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
626#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
627#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
628#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
629
630#define kvm_trace_symbol_emul_flags	                  \
631	{ 0,   			    "real" },		  \
632	{ KVM_EMUL_INSN_F_CR0_PE			  \
633	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
634	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
635	{ KVM_EMUL_INSN_F_CR0_PE			  \
636	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
637	{ KVM_EMUL_INSN_F_CR0_PE			  \
638	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
639
640#define kei_decode_mode(mode) ({			\
641	u8 flags = 0xff;				\
642	switch (mode) {					\
643	case X86EMUL_MODE_REAL:				\
644		flags = 0;				\
645		break;					\
646	case X86EMUL_MODE_VM86:				\
647		flags = KVM_EMUL_INSN_F_EFL_VM;		\
648		break;					\
649	case X86EMUL_MODE_PROT16:			\
650		flags = KVM_EMUL_INSN_F_CR0_PE;		\
651		break;					\
652	case X86EMUL_MODE_PROT32:			\
653		flags = KVM_EMUL_INSN_F_CR0_PE		\
654			| KVM_EMUL_INSN_F_CS_D;		\
655		break;					\
656	case X86EMUL_MODE_PROT64:			\
657		flags = KVM_EMUL_INSN_F_CR0_PE		\
658			| KVM_EMUL_INSN_F_CS_L;		\
659		break;					\
660	}						\
661	flags;						\
662	})
663
664TRACE_EVENT(kvm_emulate_insn,
665	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
666	TP_ARGS(vcpu, failed),
667
668	TP_STRUCT__entry(
669		__field(    __u64, rip                       )
670		__field(    __u32, csbase                    )
671		__field(    __u8,  len                       )
672		__array(    __u8,  insn,    15	             )
673		__field(    __u8,  flags       	   	     )
674		__field(    __u8,  failed                    )
675		),
676
677	TP_fast_assign(
678		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
679		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
680		__entry->len = vcpu->arch.emulate_ctxt._eip
681			       - vcpu->arch.emulate_ctxt.fetch.start;
 
682		memcpy(__entry->insn,
683		       vcpu->arch.emulate_ctxt.fetch.data,
684		       15);
685		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
686		__entry->failed = failed;
687		),
688
689	TP_printk("%x:%llx:%s (%s)%s",
690		  __entry->csbase, __entry->rip,
691		  __print_insn(__entry->insn, __entry->len),
692		  __print_symbolic(__entry->flags,
693				   kvm_trace_symbol_emul_flags),
694		  __entry->failed ? " failed" : ""
695		)
696	);
697
698#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
699#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
700
701TRACE_EVENT(
702	vcpu_match_mmio,
703	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
704	TP_ARGS(gva, gpa, write, gpa_match),
705
706	TP_STRUCT__entry(
707		__field(gva_t, gva)
708		__field(gpa_t, gpa)
709		__field(bool, write)
710		__field(bool, gpa_match)
711		),
712
713	TP_fast_assign(
714		__entry->gva = gva;
715		__entry->gpa = gpa;
716		__entry->write = write;
717		__entry->gpa_match = gpa_match
718		),
719
720	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
721		  __entry->write ? "Write" : "Read",
722		  __entry->gpa_match ? "GPA" : "GVA")
723);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
724#endif /* _TRACE_KVM_H */
725
726#undef TRACE_INCLUDE_PATH
727#define TRACE_INCLUDE_PATH arch/x86/kvm
728#undef TRACE_INCLUDE_FILE
729#define TRACE_INCLUDE_FILE trace
730
731/* This part must be outside protection */
732#include <trace/define_trace.h>
v5.4
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
   3#define _TRACE_KVM_H
   4
   5#include <linux/tracepoint.h>
   6#include <asm/vmx.h>
   7#include <asm/svm.h>
   8#include <asm/clocksource.h>
   9#include <asm/pvclock-abi.h>
  10
  11#undef TRACE_SYSTEM
  12#define TRACE_SYSTEM kvm
  13
  14/*
  15 * Tracepoint for guest mode entry.
  16 */
  17TRACE_EVENT(kvm_entry,
  18	TP_PROTO(unsigned int vcpu_id),
  19	TP_ARGS(vcpu_id),
  20
  21	TP_STRUCT__entry(
  22		__field(	unsigned int,	vcpu_id		)
  23	),
  24
  25	TP_fast_assign(
  26		__entry->vcpu_id	= vcpu_id;
  27	),
  28
  29	TP_printk("vcpu %u", __entry->vcpu_id)
  30);
  31
  32/*
  33 * Tracepoint for hypercall.
  34 */
  35TRACE_EVENT(kvm_hypercall,
  36	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
  37		 unsigned long a2, unsigned long a3),
  38	TP_ARGS(nr, a0, a1, a2, a3),
  39
  40	TP_STRUCT__entry(
  41		__field(	unsigned long, 	nr		)
  42		__field(	unsigned long,	a0		)
  43		__field(	unsigned long,	a1		)
  44		__field(	unsigned long,	a2		)
  45		__field(	unsigned long,	a3		)
  46	),
  47
  48	TP_fast_assign(
  49		__entry->nr		= nr;
  50		__entry->a0		= a0;
  51		__entry->a1		= a1;
  52		__entry->a2		= a2;
  53		__entry->a3		= a3;
  54	),
  55
  56	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
  57		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
  58		 __entry->a3)
  59);
  60
  61/*
  62 * Tracepoint for hypercall.
  63 */
  64TRACE_EVENT(kvm_hv_hypercall,
  65	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
  66		 __u64 ingpa, __u64 outgpa),
  67	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
  68
  69	TP_STRUCT__entry(
  70		__field(	__u16,		rep_cnt		)
  71		__field(	__u16,		rep_idx		)
  72		__field(	__u64,		ingpa		)
  73		__field(	__u64,		outgpa		)
  74		__field(	__u16, 		code		)
  75		__field(	bool,		fast		)
  76	),
  77
  78	TP_fast_assign(
  79		__entry->rep_cnt	= rep_cnt;
  80		__entry->rep_idx	= rep_idx;
  81		__entry->ingpa		= ingpa;
  82		__entry->outgpa		= outgpa;
  83		__entry->code		= code;
  84		__entry->fast		= fast;
  85	),
  86
  87	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
  88		  __entry->code, __entry->fast ? "fast" : "slow",
  89		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
  90		  __entry->outgpa)
  91);
  92
  93/*
  94 * Tracepoint for PIO.
  95 */
  96
  97#define KVM_PIO_IN   0
  98#define KVM_PIO_OUT  1
  99
 100TRACE_EVENT(kvm_pio,
 101	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 102		 unsigned int count, void *data),
 103	TP_ARGS(rw, port, size, count, data),
 104
 105	TP_STRUCT__entry(
 106		__field(	unsigned int, 	rw		)
 107		__field(	unsigned int, 	port		)
 108		__field(	unsigned int, 	size		)
 109		__field(	unsigned int,	count		)
 110		__field(	unsigned int,	val		)
 111	),
 112
 113	TP_fast_assign(
 114		__entry->rw		= rw;
 115		__entry->port		= port;
 116		__entry->size		= size;
 117		__entry->count		= count;
 118		if (size == 1)
 119			__entry->val	= *(unsigned char *)data;
 120		else if (size == 2)
 121			__entry->val	= *(unsigned short *)data;
 122		else
 123			__entry->val	= *(unsigned int *)data;
 124	),
 125
 126	TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s",
 127		  __entry->rw ? "write" : "read",
 128		  __entry->port, __entry->size, __entry->count, __entry->val,
 129		  __entry->count > 1 ? "(...)" : "")
 130);
 131
 132/*
 133 * Tracepoint for fast mmio.
 134 */
 135TRACE_EVENT(kvm_fast_mmio,
 136	TP_PROTO(u64 gpa),
 137	TP_ARGS(gpa),
 138
 139	TP_STRUCT__entry(
 140		__field(u64,	gpa)
 141	),
 142
 143	TP_fast_assign(
 144		__entry->gpa		= gpa;
 145	),
 146
 147	TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
 148);
 149
 150/*
 151 * Tracepoint for cpuid.
 152 */
 153TRACE_EVENT(kvm_cpuid,
 154	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
 155		 unsigned long rcx, unsigned long rdx, bool found),
 156	TP_ARGS(function, rax, rbx, rcx, rdx, found),
 157
 158	TP_STRUCT__entry(
 159		__field(	unsigned int,	function	)
 160		__field(	unsigned long,	rax		)
 161		__field(	unsigned long,	rbx		)
 162		__field(	unsigned long,	rcx		)
 163		__field(	unsigned long,	rdx		)
 164		__field(	bool,		found		)
 165	),
 166
 167	TP_fast_assign(
 168		__entry->function	= function;
 169		__entry->rax		= rax;
 170		__entry->rbx		= rbx;
 171		__entry->rcx		= rcx;
 172		__entry->rdx		= rdx;
 173		__entry->found		= found;
 174	),
 175
 176	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s",
 177		  __entry->function, __entry->rax,
 178		  __entry->rbx, __entry->rcx, __entry->rdx,
 179		  __entry->found ? "found" : "not found")
 180);
 181
 182#define AREG(x) { APIC_##x, "APIC_" #x }
 183
 184#define kvm_trace_symbol_apic						    \
 185	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
 186	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
 187	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
 188	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
 189	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
 190	AREG(ECTRL)
 191/*
 192 * Tracepoint for apic access.
 193 */
 194TRACE_EVENT(kvm_apic,
 195	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
 196	TP_ARGS(rw, reg, val),
 197
 198	TP_STRUCT__entry(
 199		__field(	unsigned int,	rw		)
 200		__field(	unsigned int,	reg		)
 201		__field(	unsigned int,	val		)
 202	),
 203
 204	TP_fast_assign(
 205		__entry->rw		= rw;
 206		__entry->reg		= reg;
 207		__entry->val		= val;
 208	),
 209
 210	TP_printk("apic_%s %s = 0x%x",
 211		  __entry->rw ? "write" : "read",
 212		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
 213		  __entry->val)
 214);
 215
 216#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
 217#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
 218
 219#define KVM_ISA_VMX   1
 220#define KVM_ISA_SVM   2
 221
 222/*
 223 * Tracepoint for kvm guest exit:
 224 */
 225TRACE_EVENT(kvm_exit,
 226	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
 227	TP_ARGS(exit_reason, vcpu, isa),
 228
 229	TP_STRUCT__entry(
 230		__field(	unsigned int,	exit_reason	)
 231		__field(	unsigned long,	guest_rip	)
 232		__field(	u32,	        isa             )
 233		__field(	u64,	        info1           )
 234		__field(	u64,	        info2           )
 235		__field(	unsigned int,	vcpu_id         )
 236	),
 237
 238	TP_fast_assign(
 239		__entry->exit_reason	= exit_reason;
 240		__entry->guest_rip	= kvm_rip_read(vcpu);
 241		__entry->isa            = isa;
 242		__entry->vcpu_id        = vcpu->vcpu_id;
 243		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
 244					   &__entry->info2);
 245	),
 246
 247	TP_printk("vcpu %u reason %s rip 0x%lx info %llx %llx",
 248		  __entry->vcpu_id,
 249		 (__entry->isa == KVM_ISA_VMX) ?
 250		 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
 251		 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
 252		 __entry->guest_rip, __entry->info1, __entry->info2)
 253);
 254
 255/*
 256 * Tracepoint for kvm interrupt injection:
 257 */
 258TRACE_EVENT(kvm_inj_virq,
 259	TP_PROTO(unsigned int irq),
 260	TP_ARGS(irq),
 261
 262	TP_STRUCT__entry(
 263		__field(	unsigned int,	irq		)
 264	),
 265
 266	TP_fast_assign(
 267		__entry->irq		= irq;
 268	),
 269
 270	TP_printk("irq %u", __entry->irq)
 271);
 272
 273#define EXS(x) { x##_VECTOR, "#" #x }
 274
 275#define kvm_trace_sym_exc						\
 276	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
 277	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
 278	EXS(MF), EXS(AC), EXS(MC)
 279
 280/*
 281 * Tracepoint for kvm interrupt injection:
 282 */
 283TRACE_EVENT(kvm_inj_exception,
 284	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
 285	TP_ARGS(exception, has_error, error_code),
 286
 287	TP_STRUCT__entry(
 288		__field(	u8,	exception	)
 289		__field(	u8,	has_error	)
 290		__field(	u32,	error_code	)
 291	),
 292
 293	TP_fast_assign(
 294		__entry->exception	= exception;
 295		__entry->has_error	= has_error;
 296		__entry->error_code	= error_code;
 297	),
 298
 299	TP_printk("%s (0x%x)",
 300		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
 301		  /* FIXME: don't print error_code if not present */
 302		  __entry->has_error ? __entry->error_code : 0)
 303);
 304
 305/*
 306 * Tracepoint for page fault.
 307 */
 308TRACE_EVENT(kvm_page_fault,
 309	TP_PROTO(unsigned long fault_address, unsigned int error_code),
 310	TP_ARGS(fault_address, error_code),
 311
 312	TP_STRUCT__entry(
 313		__field(	unsigned long,	fault_address	)
 314		__field(	unsigned int,	error_code	)
 315	),
 316
 317	TP_fast_assign(
 318		__entry->fault_address	= fault_address;
 319		__entry->error_code	= error_code;
 320	),
 321
 322	TP_printk("address %lx error_code %x",
 323		  __entry->fault_address, __entry->error_code)
 324);
 325
 326/*
 327 * Tracepoint for guest MSR access.
 328 */
 329TRACE_EVENT(kvm_msr,
 330	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
 331	TP_ARGS(write, ecx, data, exception),
 332
 333	TP_STRUCT__entry(
 334		__field(	unsigned,	write		)
 335		__field(	u32,		ecx		)
 336		__field(	u64,		data		)
 337		__field(	u8,		exception	)
 338	),
 339
 340	TP_fast_assign(
 341		__entry->write		= write;
 342		__entry->ecx		= ecx;
 343		__entry->data		= data;
 344		__entry->exception	= exception;
 345	),
 346
 347	TP_printk("msr_%s %x = 0x%llx%s",
 348		  __entry->write ? "write" : "read",
 349		  __entry->ecx, __entry->data,
 350		  __entry->exception ? " (#GP)" : "")
 351);
 352
 353#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
 354#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
 355#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
 356#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
 357
 358/*
 359 * Tracepoint for guest CR access.
 360 */
 361TRACE_EVENT(kvm_cr,
 362	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
 363	TP_ARGS(rw, cr, val),
 364
 365	TP_STRUCT__entry(
 366		__field(	unsigned int,	rw		)
 367		__field(	unsigned int,	cr		)
 368		__field(	unsigned long,	val		)
 369	),
 370
 371	TP_fast_assign(
 372		__entry->rw		= rw;
 373		__entry->cr		= cr;
 374		__entry->val		= val;
 375	),
 376
 377	TP_printk("cr_%s %x = 0x%lx",
 378		  __entry->rw ? "write" : "read",
 379		  __entry->cr, __entry->val)
 380);
 381
 382#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
 383#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
 384
 385TRACE_EVENT(kvm_pic_set_irq,
 386	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
 387	    TP_ARGS(chip, pin, elcr, imr, coalesced),
 388
 389	TP_STRUCT__entry(
 390		__field(	__u8,		chip		)
 391		__field(	__u8,		pin		)
 392		__field(	__u8,		elcr		)
 393		__field(	__u8,		imr		)
 394		__field(	bool,		coalesced	)
 395	),
 396
 397	TP_fast_assign(
 398		__entry->chip		= chip;
 399		__entry->pin		= pin;
 400		__entry->elcr		= elcr;
 401		__entry->imr		= imr;
 402		__entry->coalesced	= coalesced;
 403	),
 404
 405	TP_printk("chip %u pin %u (%s%s)%s",
 406		  __entry->chip, __entry->pin,
 407		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
 408		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
 409		  __entry->coalesced ? " (coalesced)" : "")
 410);
 411
 412#define kvm_apic_dst_shorthand		\
 413	{0x0, "dst"},			\
 414	{0x1, "self"},			\
 415	{0x2, "all"},			\
 416	{0x3, "all-but-self"}
 417
 418TRACE_EVENT(kvm_apic_ipi,
 419	    TP_PROTO(__u32 icr_low, __u32 dest_id),
 420	    TP_ARGS(icr_low, dest_id),
 421
 422	TP_STRUCT__entry(
 423		__field(	__u32,		icr_low		)
 424		__field(	__u32,		dest_id		)
 425	),
 426
 427	TP_fast_assign(
 428		__entry->icr_low	= icr_low;
 429		__entry->dest_id	= dest_id;
 430	),
 431
 432	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
 433		  __entry->dest_id, (u8)__entry->icr_low,
 434		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
 435				   kvm_deliver_mode),
 436		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
 437		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
 438		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
 439		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
 440				   kvm_apic_dst_shorthand))
 441);
 442
 443TRACE_EVENT(kvm_apic_accept_irq,
 444	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
 445	    TP_ARGS(apicid, dm, tm, vec),
 446
 447	TP_STRUCT__entry(
 448		__field(	__u32,		apicid		)
 449		__field(	__u16,		dm		)
 450		__field(	__u16,		tm		)
 451		__field(	__u8,		vec		)
 
 452	),
 453
 454	TP_fast_assign(
 455		__entry->apicid		= apicid;
 456		__entry->dm		= dm;
 457		__entry->tm		= tm;
 458		__entry->vec		= vec;
 
 459	),
 460
 461	TP_printk("apicid %x vec %u (%s|%s)",
 462		  __entry->apicid, __entry->vec,
 463		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
 464		  __entry->tm ? "level" : "edge")
 465);
 466
 467TRACE_EVENT(kvm_eoi,
 468	    TP_PROTO(struct kvm_lapic *apic, int vector),
 469	    TP_ARGS(apic, vector),
 470
 471	TP_STRUCT__entry(
 472		__field(	__u32,		apicid		)
 473		__field(	int,		vector		)
 474	),
 475
 476	TP_fast_assign(
 477		__entry->apicid		= apic->vcpu->vcpu_id;
 478		__entry->vector		= vector;
 479	),
 480
 481	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
 482);
 483
 484TRACE_EVENT(kvm_pv_eoi,
 485	    TP_PROTO(struct kvm_lapic *apic, int vector),
 486	    TP_ARGS(apic, vector),
 487
 488	TP_STRUCT__entry(
 489		__field(	__u32,		apicid		)
 490		__field(	int,		vector		)
 491	),
 492
 493	TP_fast_assign(
 494		__entry->apicid		= apic->vcpu->vcpu_id;
 495		__entry->vector		= vector;
 496	),
 497
 498	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
 499);
 500
 501/*
 502 * Tracepoint for nested VMRUN
 503 */
 504TRACE_EVENT(kvm_nested_vmrun,
 505	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
 506		     __u32 event_inj, bool npt),
 507	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
 508
 509	TP_STRUCT__entry(
 510		__field(	__u64,		rip		)
 511		__field(	__u64,		vmcb		)
 512		__field(	__u64,		nested_rip	)
 513		__field(	__u32,		int_ctl		)
 514		__field(	__u32,		event_inj	)
 515		__field(	bool,		npt		)
 516	),
 517
 518	TP_fast_assign(
 519		__entry->rip		= rip;
 520		__entry->vmcb		= vmcb;
 521		__entry->nested_rip	= nested_rip;
 522		__entry->int_ctl	= int_ctl;
 523		__entry->event_inj	= event_inj;
 524		__entry->npt		= npt;
 525	),
 526
 527	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
 528		  "event_inj: 0x%08x npt: %s",
 529		__entry->rip, __entry->vmcb, __entry->nested_rip,
 530		__entry->int_ctl, __entry->event_inj,
 531		__entry->npt ? "on" : "off")
 532);
 533
 534TRACE_EVENT(kvm_nested_intercepts,
 535	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
 536	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
 537
 538	TP_STRUCT__entry(
 539		__field(	__u16,		cr_read		)
 540		__field(	__u16,		cr_write	)
 541		__field(	__u32,		exceptions	)
 542		__field(	__u64,		intercept	)
 543	),
 544
 545	TP_fast_assign(
 546		__entry->cr_read	= cr_read;
 547		__entry->cr_write	= cr_write;
 548		__entry->exceptions	= exceptions;
 549		__entry->intercept	= intercept;
 550	),
 551
 552	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
 553		__entry->cr_read, __entry->cr_write, __entry->exceptions,
 554		__entry->intercept)
 555);
 556/*
 557 * Tracepoint for #VMEXIT while nested
 558 */
 559TRACE_EVENT(kvm_nested_vmexit,
 560	    TP_PROTO(__u64 rip, __u32 exit_code,
 561		     __u64 exit_info1, __u64 exit_info2,
 562		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
 563	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
 564		    exit_int_info, exit_int_info_err, isa),
 565
 566	TP_STRUCT__entry(
 567		__field(	__u64,		rip			)
 568		__field(	__u32,		exit_code		)
 569		__field(	__u64,		exit_info1		)
 570		__field(	__u64,		exit_info2		)
 571		__field(	__u32,		exit_int_info		)
 572		__field(	__u32,		exit_int_info_err	)
 573		__field(	__u32,		isa			)
 574	),
 575
 576	TP_fast_assign(
 577		__entry->rip			= rip;
 578		__entry->exit_code		= exit_code;
 579		__entry->exit_info1		= exit_info1;
 580		__entry->exit_info2		= exit_info2;
 581		__entry->exit_int_info		= exit_int_info;
 582		__entry->exit_int_info_err	= exit_int_info_err;
 583		__entry->isa			= isa;
 584	),
 585	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
 586		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
 587		  __entry->rip,
 588		 (__entry->isa == KVM_ISA_VMX) ?
 589		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
 590		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
 591		  __entry->exit_info1, __entry->exit_info2,
 592		  __entry->exit_int_info, __entry->exit_int_info_err)
 593);
 594
 595/*
 596 * Tracepoint for #VMEXIT reinjected to the guest
 597 */
 598TRACE_EVENT(kvm_nested_vmexit_inject,
 599	    TP_PROTO(__u32 exit_code,
 600		     __u64 exit_info1, __u64 exit_info2,
 601		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
 602	    TP_ARGS(exit_code, exit_info1, exit_info2,
 603		    exit_int_info, exit_int_info_err, isa),
 604
 605	TP_STRUCT__entry(
 606		__field(	__u32,		exit_code		)
 607		__field(	__u64,		exit_info1		)
 608		__field(	__u64,		exit_info2		)
 609		__field(	__u32,		exit_int_info		)
 610		__field(	__u32,		exit_int_info_err	)
 611		__field(	__u32,		isa			)
 612	),
 613
 614	TP_fast_assign(
 615		__entry->exit_code		= exit_code;
 616		__entry->exit_info1		= exit_info1;
 617		__entry->exit_info2		= exit_info2;
 618		__entry->exit_int_info		= exit_int_info;
 619		__entry->exit_int_info_err	= exit_int_info_err;
 620		__entry->isa			= isa;
 621	),
 622
 623	TP_printk("reason: %s ext_inf1: 0x%016llx "
 624		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
 625		 (__entry->isa == KVM_ISA_VMX) ?
 626		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
 627		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
 628		__entry->exit_info1, __entry->exit_info2,
 629		__entry->exit_int_info, __entry->exit_int_info_err)
 630);
 631
 632/*
 633 * Tracepoint for nested #vmexit because of interrupt pending
 634 */
 635TRACE_EVENT(kvm_nested_intr_vmexit,
 636	    TP_PROTO(__u64 rip),
 637	    TP_ARGS(rip),
 638
 639	TP_STRUCT__entry(
 640		__field(	__u64,	rip	)
 641	),
 642
 643	TP_fast_assign(
 644		__entry->rip	=	rip
 645	),
 646
 647	TP_printk("rip: 0x%016llx", __entry->rip)
 648);
 649
 650/*
 651 * Tracepoint for nested #vmexit because of interrupt pending
 652 */
 653TRACE_EVENT(kvm_invlpga,
 654	    TP_PROTO(__u64 rip, int asid, u64 address),
 655	    TP_ARGS(rip, asid, address),
 656
 657	TP_STRUCT__entry(
 658		__field(	__u64,	rip	)
 659		__field(	int,	asid	)
 660		__field(	__u64,	address	)
 661	),
 662
 663	TP_fast_assign(
 664		__entry->rip		=	rip;
 665		__entry->asid		=	asid;
 666		__entry->address	=	address;
 667	),
 668
 669	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
 670		  __entry->rip, __entry->asid, __entry->address)
 671);
 672
 673/*
 674 * Tracepoint for nested #vmexit because of interrupt pending
 675 */
 676TRACE_EVENT(kvm_skinit,
 677	    TP_PROTO(__u64 rip, __u32 slb),
 678	    TP_ARGS(rip, slb),
 679
 680	TP_STRUCT__entry(
 681		__field(	__u64,	rip	)
 682		__field(	__u32,	slb	)
 683	),
 684
 685	TP_fast_assign(
 686		__entry->rip		=	rip;
 687		__entry->slb		=	slb;
 688	),
 689
 690	TP_printk("rip: 0x%016llx slb: 0x%08x",
 691		  __entry->rip, __entry->slb)
 692);
 693
 
 
 
 
 
 
 
 
 
 
 694#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
 695#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
 696#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
 697#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
 698
 699#define kvm_trace_symbol_emul_flags	                  \
 700	{ 0,   			    "real" },		  \
 701	{ KVM_EMUL_INSN_F_CR0_PE			  \
 702	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
 703	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
 704	{ KVM_EMUL_INSN_F_CR0_PE			  \
 705	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
 706	{ KVM_EMUL_INSN_F_CR0_PE			  \
 707	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
 708
 709#define kei_decode_mode(mode) ({			\
 710	u8 flags = 0xff;				\
 711	switch (mode) {					\
 712	case X86EMUL_MODE_REAL:				\
 713		flags = 0;				\
 714		break;					\
 715	case X86EMUL_MODE_VM86:				\
 716		flags = KVM_EMUL_INSN_F_EFL_VM;		\
 717		break;					\
 718	case X86EMUL_MODE_PROT16:			\
 719		flags = KVM_EMUL_INSN_F_CR0_PE;		\
 720		break;					\
 721	case X86EMUL_MODE_PROT32:			\
 722		flags = KVM_EMUL_INSN_F_CR0_PE		\
 723			| KVM_EMUL_INSN_F_CS_D;		\
 724		break;					\
 725	case X86EMUL_MODE_PROT64:			\
 726		flags = KVM_EMUL_INSN_F_CR0_PE		\
 727			| KVM_EMUL_INSN_F_CS_L;		\
 728		break;					\
 729	}						\
 730	flags;						\
 731	})
 732
 733TRACE_EVENT(kvm_emulate_insn,
 734	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
 735	TP_ARGS(vcpu, failed),
 736
 737	TP_STRUCT__entry(
 738		__field(    __u64, rip                       )
 739		__field(    __u32, csbase                    )
 740		__field(    __u8,  len                       )
 741		__array(    __u8,  insn,    15	             )
 742		__field(    __u8,  flags       	   	     )
 743		__field(    __u8,  failed                    )
 744		),
 745
 746	TP_fast_assign(
 
 747		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
 748		__entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
 749			       - vcpu->arch.emulate_ctxt.fetch.data;
 750		__entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len;
 751		memcpy(__entry->insn,
 752		       vcpu->arch.emulate_ctxt.fetch.data,
 753		       15);
 754		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
 755		__entry->failed = failed;
 756		),
 757
 758	TP_printk("%x:%llx:%s (%s)%s",
 759		  __entry->csbase, __entry->rip,
 760		  __print_hex(__entry->insn, __entry->len),
 761		  __print_symbolic(__entry->flags,
 762				   kvm_trace_symbol_emul_flags),
 763		  __entry->failed ? " failed" : ""
 764		)
 765	);
 766
 767#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
 768#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
 769
 770TRACE_EVENT(
 771	vcpu_match_mmio,
 772	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
 773	TP_ARGS(gva, gpa, write, gpa_match),
 774
 775	TP_STRUCT__entry(
 776		__field(gva_t, gva)
 777		__field(gpa_t, gpa)
 778		__field(bool, write)
 779		__field(bool, gpa_match)
 780		),
 781
 782	TP_fast_assign(
 783		__entry->gva = gva;
 784		__entry->gpa = gpa;
 785		__entry->write = write;
 786		__entry->gpa_match = gpa_match
 787		),
 788
 789	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
 790		  __entry->write ? "Write" : "Read",
 791		  __entry->gpa_match ? "GPA" : "GVA")
 792);
 793
 794TRACE_EVENT(kvm_write_tsc_offset,
 795	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
 796		 __u64 next_tsc_offset),
 797	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
 798
 799	TP_STRUCT__entry(
 800		__field( unsigned int,	vcpu_id				)
 801		__field(	__u64,	previous_tsc_offset		)
 802		__field(	__u64,	next_tsc_offset			)
 803	),
 804
 805	TP_fast_assign(
 806		__entry->vcpu_id		= vcpu_id;
 807		__entry->previous_tsc_offset	= previous_tsc_offset;
 808		__entry->next_tsc_offset	= next_tsc_offset;
 809	),
 810
 811	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
 812		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
 813);
 814
 815#ifdef CONFIG_X86_64
 816
 817#define host_clocks					\
 818	{VCLOCK_NONE, "none"},				\
 819	{VCLOCK_TSC,  "tsc"}				\
 820
 821TRACE_EVENT(kvm_update_master_clock,
 822	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
 823	TP_ARGS(use_master_clock, host_clock, offset_matched),
 824
 825	TP_STRUCT__entry(
 826		__field(		bool,	use_master_clock	)
 827		__field(	unsigned int,	host_clock		)
 828		__field(		bool,	offset_matched		)
 829	),
 830
 831	TP_fast_assign(
 832		__entry->use_master_clock	= use_master_clock;
 833		__entry->host_clock		= host_clock;
 834		__entry->offset_matched		= offset_matched;
 835	),
 836
 837	TP_printk("masterclock %d hostclock %s offsetmatched %u",
 838		  __entry->use_master_clock,
 839		  __print_symbolic(__entry->host_clock, host_clocks),
 840		  __entry->offset_matched)
 841);
 842
 843TRACE_EVENT(kvm_track_tsc,
 844	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
 845		 unsigned int online_vcpus, bool use_master_clock,
 846		 unsigned int host_clock),
 847	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
 848		host_clock),
 849
 850	TP_STRUCT__entry(
 851		__field(	unsigned int,	vcpu_id			)
 852		__field(	unsigned int,	nr_vcpus_matched_tsc	)
 853		__field(	unsigned int,	online_vcpus		)
 854		__field(	bool,		use_master_clock	)
 855		__field(	unsigned int,	host_clock		)
 856	),
 857
 858	TP_fast_assign(
 859		__entry->vcpu_id		= vcpu_id;
 860		__entry->nr_vcpus_matched_tsc	= nr_matched;
 861		__entry->online_vcpus		= online_vcpus;
 862		__entry->use_master_clock	= use_master_clock;
 863		__entry->host_clock		= host_clock;
 864	),
 865
 866	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
 867		  " hostclock %s",
 868		  __entry->vcpu_id, __entry->use_master_clock,
 869		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
 870		  __print_symbolic(__entry->host_clock, host_clocks))
 871);
 872
 873#endif /* CONFIG_X86_64 */
 874
 875/*
 876 * Tracepoint for PML full VMEXIT.
 877 */
 878TRACE_EVENT(kvm_pml_full,
 879	TP_PROTO(unsigned int vcpu_id),
 880	TP_ARGS(vcpu_id),
 881
 882	TP_STRUCT__entry(
 883		__field(	unsigned int,	vcpu_id			)
 884	),
 885
 886	TP_fast_assign(
 887		__entry->vcpu_id		= vcpu_id;
 888	),
 889
 890	TP_printk("vcpu %d: PML full", __entry->vcpu_id)
 891);
 892
 893TRACE_EVENT(kvm_ple_window_update,
 894	TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old),
 895	TP_ARGS(vcpu_id, new, old),
 896
 897	TP_STRUCT__entry(
 898		__field(        unsigned int,   vcpu_id         )
 899		__field(        unsigned int,       new         )
 900		__field(        unsigned int,       old         )
 901	),
 902
 903	TP_fast_assign(
 904		__entry->vcpu_id        = vcpu_id;
 905		__entry->new            = new;
 906		__entry->old            = old;
 907	),
 908
 909	TP_printk("vcpu %u old %u new %u (%s)",
 910	          __entry->vcpu_id, __entry->old, __entry->new,
 911		  __entry->old < __entry->new ? "growed" : "shrinked")
 912);
 913
 914TRACE_EVENT(kvm_pvclock_update,
 915	TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
 916	TP_ARGS(vcpu_id, pvclock),
 917
 918	TP_STRUCT__entry(
 919		__field(	unsigned int,	vcpu_id			)
 920		__field(	__u32,		version			)
 921		__field(	__u64,		tsc_timestamp		)
 922		__field(	__u64,		system_time		)
 923		__field(	__u32,		tsc_to_system_mul	)
 924		__field(	__s8,		tsc_shift		)
 925		__field(	__u8,		flags			)
 926	),
 927
 928	TP_fast_assign(
 929		__entry->vcpu_id	   = vcpu_id;
 930		__entry->version	   = pvclock->version;
 931		__entry->tsc_timestamp	   = pvclock->tsc_timestamp;
 932		__entry->system_time	   = pvclock->system_time;
 933		__entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
 934		__entry->tsc_shift	   = pvclock->tsc_shift;
 935		__entry->flags		   = pvclock->flags;
 936	),
 937
 938	TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
 939		  "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
 940		  "flags 0x%x }",
 941		  __entry->vcpu_id,
 942		  __entry->version,
 943		  __entry->tsc_timestamp,
 944		  __entry->system_time,
 945		  __entry->tsc_to_system_mul,
 946		  __entry->tsc_shift,
 947		  __entry->flags)
 948);
 949
 950TRACE_EVENT(kvm_wait_lapic_expire,
 951	TP_PROTO(unsigned int vcpu_id, s64 delta),
 952	TP_ARGS(vcpu_id, delta),
 953
 954	TP_STRUCT__entry(
 955		__field(	unsigned int,	vcpu_id		)
 956		__field(	s64,		delta		)
 957	),
 958
 959	TP_fast_assign(
 960		__entry->vcpu_id	   = vcpu_id;
 961		__entry->delta             = delta;
 962	),
 963
 964	TP_printk("vcpu %u: delta %lld (%s)",
 965		  __entry->vcpu_id,
 966		  __entry->delta,
 967		  __entry->delta < 0 ? "early" : "late")
 968);
 969
 970TRACE_EVENT(kvm_enter_smm,
 971	TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
 972	TP_ARGS(vcpu_id, smbase, entering),
 973
 974	TP_STRUCT__entry(
 975		__field(	unsigned int,	vcpu_id		)
 976		__field(	u64,		smbase		)
 977		__field(	bool,		entering	)
 978	),
 979
 980	TP_fast_assign(
 981		__entry->vcpu_id	= vcpu_id;
 982		__entry->smbase		= smbase;
 983		__entry->entering	= entering;
 984	),
 985
 986	TP_printk("vcpu %u: %s SMM, smbase 0x%llx",
 987		  __entry->vcpu_id,
 988		  __entry->entering ? "entering" : "leaving",
 989		  __entry->smbase)
 990);
 991
 992/*
 993 * Tracepoint for VT-d posted-interrupts.
 994 */
 995TRACE_EVENT(kvm_pi_irte_update,
 996	TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
 997		 unsigned int gsi, unsigned int gvec,
 998		 u64 pi_desc_addr, bool set),
 999	TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
1000
1001	TP_STRUCT__entry(
1002		__field(	unsigned int,	host_irq	)
1003		__field(	unsigned int,	vcpu_id		)
1004		__field(	unsigned int,	gsi		)
1005		__field(	unsigned int,	gvec		)
1006		__field(	u64,		pi_desc_addr	)
1007		__field(	bool,		set		)
1008	),
1009
1010	TP_fast_assign(
1011		__entry->host_irq	= host_irq;
1012		__entry->vcpu_id	= vcpu_id;
1013		__entry->gsi		= gsi;
1014		__entry->gvec		= gvec;
1015		__entry->pi_desc_addr	= pi_desc_addr;
1016		__entry->set		= set;
1017	),
1018
1019	TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
1020		  "gvec: 0x%x, pi_desc_addr: 0x%llx",
1021		  __entry->set ? "enabled and being updated" : "disabled",
1022		  __entry->host_irq,
1023		  __entry->vcpu_id,
1024		  __entry->gsi,
1025		  __entry->gvec,
1026		  __entry->pi_desc_addr)
1027);
1028
1029/*
1030 * Tracepoint for kvm_hv_notify_acked_sint.
1031 */
1032TRACE_EVENT(kvm_hv_notify_acked_sint,
1033	TP_PROTO(int vcpu_id, u32 sint),
1034	TP_ARGS(vcpu_id, sint),
1035
1036	TP_STRUCT__entry(
1037		__field(int, vcpu_id)
1038		__field(u32, sint)
1039	),
1040
1041	TP_fast_assign(
1042		__entry->vcpu_id = vcpu_id;
1043		__entry->sint = sint;
1044	),
1045
1046	TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint)
1047);
1048
1049/*
1050 * Tracepoint for synic_set_irq.
1051 */
1052TRACE_EVENT(kvm_hv_synic_set_irq,
1053	TP_PROTO(int vcpu_id, u32 sint, int vector, int ret),
1054	TP_ARGS(vcpu_id, sint, vector, ret),
1055
1056	TP_STRUCT__entry(
1057		__field(int, vcpu_id)
1058		__field(u32, sint)
1059		__field(int, vector)
1060		__field(int, ret)
1061	),
1062
1063	TP_fast_assign(
1064		__entry->vcpu_id = vcpu_id;
1065		__entry->sint = sint;
1066		__entry->vector = vector;
1067		__entry->ret = ret;
1068	),
1069
1070	TP_printk("vcpu_id %d sint %u vector %d ret %d",
1071		  __entry->vcpu_id, __entry->sint, __entry->vector,
1072		  __entry->ret)
1073);
1074
1075/*
1076 * Tracepoint for kvm_hv_synic_send_eoi.
1077 */
1078TRACE_EVENT(kvm_hv_synic_send_eoi,
1079	TP_PROTO(int vcpu_id, int vector),
1080	TP_ARGS(vcpu_id, vector),
1081
1082	TP_STRUCT__entry(
1083		__field(int, vcpu_id)
1084		__field(u32, sint)
1085		__field(int, vector)
1086		__field(int, ret)
1087	),
1088
1089	TP_fast_assign(
1090		__entry->vcpu_id = vcpu_id;
1091		__entry->vector	= vector;
1092	),
1093
1094	TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector)
1095);
1096
1097/*
1098 * Tracepoint for synic_set_msr.
1099 */
1100TRACE_EVENT(kvm_hv_synic_set_msr,
1101	TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host),
1102	TP_ARGS(vcpu_id, msr, data, host),
1103
1104	TP_STRUCT__entry(
1105		__field(int, vcpu_id)
1106		__field(u32, msr)
1107		__field(u64, data)
1108		__field(bool, host)
1109	),
1110
1111	TP_fast_assign(
1112		__entry->vcpu_id = vcpu_id;
1113		__entry->msr = msr;
1114		__entry->data = data;
1115		__entry->host = host
1116	),
1117
1118	TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d",
1119		  __entry->vcpu_id, __entry->msr, __entry->data, __entry->host)
1120);
1121
1122/*
1123 * Tracepoint for stimer_set_config.
1124 */
1125TRACE_EVENT(kvm_hv_stimer_set_config,
1126	TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host),
1127	TP_ARGS(vcpu_id, timer_index, config, host),
1128
1129	TP_STRUCT__entry(
1130		__field(int, vcpu_id)
1131		__field(int, timer_index)
1132		__field(u64, config)
1133		__field(bool, host)
1134	),
1135
1136	TP_fast_assign(
1137		__entry->vcpu_id = vcpu_id;
1138		__entry->timer_index = timer_index;
1139		__entry->config = config;
1140		__entry->host = host;
1141	),
1142
1143	TP_printk("vcpu_id %d timer %d config 0x%llx host %d",
1144		  __entry->vcpu_id, __entry->timer_index, __entry->config,
1145		  __entry->host)
1146);
1147
1148/*
1149 * Tracepoint for stimer_set_count.
1150 */
1151TRACE_EVENT(kvm_hv_stimer_set_count,
1152	TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host),
1153	TP_ARGS(vcpu_id, timer_index, count, host),
1154
1155	TP_STRUCT__entry(
1156		__field(int, vcpu_id)
1157		__field(int, timer_index)
1158		__field(u64, count)
1159		__field(bool, host)
1160	),
1161
1162	TP_fast_assign(
1163		__entry->vcpu_id = vcpu_id;
1164		__entry->timer_index = timer_index;
1165		__entry->count = count;
1166		__entry->host = host;
1167	),
1168
1169	TP_printk("vcpu_id %d timer %d count %llu host %d",
1170		  __entry->vcpu_id, __entry->timer_index, __entry->count,
1171		  __entry->host)
1172);
1173
1174/*
1175 * Tracepoint for stimer_start(periodic timer case).
1176 */
1177TRACE_EVENT(kvm_hv_stimer_start_periodic,
1178	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time),
1179	TP_ARGS(vcpu_id, timer_index, time_now, exp_time),
1180
1181	TP_STRUCT__entry(
1182		__field(int, vcpu_id)
1183		__field(int, timer_index)
1184		__field(u64, time_now)
1185		__field(u64, exp_time)
1186	),
1187
1188	TP_fast_assign(
1189		__entry->vcpu_id = vcpu_id;
1190		__entry->timer_index = timer_index;
1191		__entry->time_now = time_now;
1192		__entry->exp_time = exp_time;
1193	),
1194
1195	TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu",
1196		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1197		  __entry->exp_time)
1198);
1199
1200/*
1201 * Tracepoint for stimer_start(one-shot timer case).
1202 */
1203TRACE_EVENT(kvm_hv_stimer_start_one_shot,
1204	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count),
1205	TP_ARGS(vcpu_id, timer_index, time_now, count),
1206
1207	TP_STRUCT__entry(
1208		__field(int, vcpu_id)
1209		__field(int, timer_index)
1210		__field(u64, time_now)
1211		__field(u64, count)
1212	),
1213
1214	TP_fast_assign(
1215		__entry->vcpu_id = vcpu_id;
1216		__entry->timer_index = timer_index;
1217		__entry->time_now = time_now;
1218		__entry->count = count;
1219	),
1220
1221	TP_printk("vcpu_id %d timer %d time_now %llu count %llu",
1222		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1223		  __entry->count)
1224);
1225
1226/*
1227 * Tracepoint for stimer_timer_callback.
1228 */
1229TRACE_EVENT(kvm_hv_stimer_callback,
1230	TP_PROTO(int vcpu_id, int timer_index),
1231	TP_ARGS(vcpu_id, timer_index),
1232
1233	TP_STRUCT__entry(
1234		__field(int, vcpu_id)
1235		__field(int, timer_index)
1236	),
1237
1238	TP_fast_assign(
1239		__entry->vcpu_id = vcpu_id;
1240		__entry->timer_index = timer_index;
1241	),
1242
1243	TP_printk("vcpu_id %d timer %d",
1244		  __entry->vcpu_id, __entry->timer_index)
1245);
1246
1247/*
1248 * Tracepoint for stimer_expiration.
1249 */
1250TRACE_EVENT(kvm_hv_stimer_expiration,
1251	TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result),
1252	TP_ARGS(vcpu_id, timer_index, direct, msg_send_result),
1253
1254	TP_STRUCT__entry(
1255		__field(int, vcpu_id)
1256		__field(int, timer_index)
1257		__field(int, direct)
1258		__field(int, msg_send_result)
1259	),
1260
1261	TP_fast_assign(
1262		__entry->vcpu_id = vcpu_id;
1263		__entry->timer_index = timer_index;
1264		__entry->direct = direct;
1265		__entry->msg_send_result = msg_send_result;
1266	),
1267
1268	TP_printk("vcpu_id %d timer %d direct %d send result %d",
1269		  __entry->vcpu_id, __entry->timer_index,
1270		  __entry->direct, __entry->msg_send_result)
1271);
1272
1273/*
1274 * Tracepoint for stimer_cleanup.
1275 */
1276TRACE_EVENT(kvm_hv_stimer_cleanup,
1277	TP_PROTO(int vcpu_id, int timer_index),
1278	TP_ARGS(vcpu_id, timer_index),
1279
1280	TP_STRUCT__entry(
1281		__field(int, vcpu_id)
1282		__field(int, timer_index)
1283	),
1284
1285	TP_fast_assign(
1286		__entry->vcpu_id = vcpu_id;
1287		__entry->timer_index = timer_index;
1288	),
1289
1290	TP_printk("vcpu_id %d timer %d",
1291		  __entry->vcpu_id, __entry->timer_index)
1292);
1293
1294/*
1295 * Tracepoint for AMD AVIC
1296 */
1297TRACE_EVENT(kvm_avic_incomplete_ipi,
1298	    TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
1299	    TP_ARGS(vcpu, icrh, icrl, id, index),
1300
1301	TP_STRUCT__entry(
1302		__field(u32, vcpu)
1303		__field(u32, icrh)
1304		__field(u32, icrl)
1305		__field(u32, id)
1306		__field(u32, index)
1307	),
1308
1309	TP_fast_assign(
1310		__entry->vcpu = vcpu;
1311		__entry->icrh = icrh;
1312		__entry->icrl = icrl;
1313		__entry->id = id;
1314		__entry->index = index;
1315	),
1316
1317	TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u",
1318		  __entry->vcpu, __entry->icrh, __entry->icrl,
1319		  __entry->id, __entry->index)
1320);
1321
1322TRACE_EVENT(kvm_avic_unaccelerated_access,
1323	    TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
1324	    TP_ARGS(vcpu, offset, ft, rw, vec),
1325
1326	TP_STRUCT__entry(
1327		__field(u32, vcpu)
1328		__field(u32, offset)
1329		__field(bool, ft)
1330		__field(bool, rw)
1331		__field(u32, vec)
1332	),
1333
1334	TP_fast_assign(
1335		__entry->vcpu = vcpu;
1336		__entry->offset = offset;
1337		__entry->ft = ft;
1338		__entry->rw = rw;
1339		__entry->vec = vec;
1340	),
1341
1342	TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x",
1343		  __entry->vcpu,
1344		  __entry->offset,
1345		  __print_symbolic(__entry->offset, kvm_trace_symbol_apic),
1346		  __entry->ft ? "trap" : "fault",
1347		  __entry->rw ? "write" : "read",
1348		  __entry->vec)
1349);
1350
1351TRACE_EVENT(kvm_hv_timer_state,
1352		TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
1353		TP_ARGS(vcpu_id, hv_timer_in_use),
1354		TP_STRUCT__entry(
1355			__field(unsigned int, vcpu_id)
1356			__field(unsigned int, hv_timer_in_use)
1357			),
1358		TP_fast_assign(
1359			__entry->vcpu_id = vcpu_id;
1360			__entry->hv_timer_in_use = hv_timer_in_use;
1361			),
1362		TP_printk("vcpu_id %x hv_timer %x",
1363			__entry->vcpu_id,
1364			__entry->hv_timer_in_use)
1365);
1366
1367/*
1368 * Tracepoint for kvm_hv_flush_tlb.
1369 */
1370TRACE_EVENT(kvm_hv_flush_tlb,
1371	TP_PROTO(u64 processor_mask, u64 address_space, u64 flags),
1372	TP_ARGS(processor_mask, address_space, flags),
1373
1374	TP_STRUCT__entry(
1375		__field(u64, processor_mask)
1376		__field(u64, address_space)
1377		__field(u64, flags)
1378	),
1379
1380	TP_fast_assign(
1381		__entry->processor_mask = processor_mask;
1382		__entry->address_space = address_space;
1383		__entry->flags = flags;
1384	),
1385
1386	TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx",
1387		  __entry->processor_mask, __entry->address_space,
1388		  __entry->flags)
1389);
1390
1391/*
1392 * Tracepoint for kvm_hv_flush_tlb_ex.
1393 */
1394TRACE_EVENT(kvm_hv_flush_tlb_ex,
1395	TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags),
1396	TP_ARGS(valid_bank_mask, format, address_space, flags),
1397
1398	TP_STRUCT__entry(
1399		__field(u64, valid_bank_mask)
1400		__field(u64, format)
1401		__field(u64, address_space)
1402		__field(u64, flags)
1403	),
1404
1405	TP_fast_assign(
1406		__entry->valid_bank_mask = valid_bank_mask;
1407		__entry->format = format;
1408		__entry->address_space = address_space;
1409		__entry->flags = flags;
1410	),
1411
1412	TP_printk("valid_bank_mask 0x%llx format 0x%llx "
1413		  "address_space 0x%llx flags 0x%llx",
1414		  __entry->valid_bank_mask, __entry->format,
1415		  __entry->address_space, __entry->flags)
1416);
1417
1418/*
1419 * Tracepoints for kvm_hv_send_ipi.
1420 */
1421TRACE_EVENT(kvm_hv_send_ipi,
1422	TP_PROTO(u32 vector, u64 processor_mask),
1423	TP_ARGS(vector, processor_mask),
1424
1425	TP_STRUCT__entry(
1426		__field(u32, vector)
1427		__field(u64, processor_mask)
1428	),
1429
1430	TP_fast_assign(
1431		__entry->vector = vector;
1432		__entry->processor_mask = processor_mask;
1433	),
1434
1435	TP_printk("vector %x processor_mask 0x%llx",
1436		  __entry->vector, __entry->processor_mask)
1437);
1438
1439TRACE_EVENT(kvm_hv_send_ipi_ex,
1440	TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
1441	TP_ARGS(vector, format, valid_bank_mask),
1442
1443	TP_STRUCT__entry(
1444		__field(u32, vector)
1445		__field(u64, format)
1446		__field(u64, valid_bank_mask)
1447	),
1448
1449	TP_fast_assign(
1450		__entry->vector = vector;
1451		__entry->format = format;
1452		__entry->valid_bank_mask = valid_bank_mask;
1453	),
1454
1455	TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
1456		  __entry->vector, __entry->format,
1457		  __entry->valid_bank_mask)
1458);
1459
1460TRACE_EVENT(kvm_pv_tlb_flush,
1461	TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb),
1462	TP_ARGS(vcpu_id, need_flush_tlb),
1463
1464	TP_STRUCT__entry(
1465		__field(	unsigned int,	vcpu_id		)
1466		__field(	bool,	need_flush_tlb		)
1467	),
1468
1469	TP_fast_assign(
1470		__entry->vcpu_id	= vcpu_id;
1471		__entry->need_flush_tlb = need_flush_tlb;
1472	),
1473
1474	TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id,
1475		__entry->need_flush_tlb ? "true" : "false")
1476);
1477
1478/*
1479 * Tracepoint for failed nested VMX VM-Enter.
1480 */
1481TRACE_EVENT(kvm_nested_vmenter_failed,
1482	TP_PROTO(const char *msg, u32 err),
1483	TP_ARGS(msg, err),
1484
1485	TP_STRUCT__entry(
1486		__field(const char *, msg)
1487		__field(u32, err)
1488	),
1489
1490	TP_fast_assign(
1491		__entry->msg = msg;
1492		__entry->err = err;
1493	),
1494
1495	TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
1496		__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
1497);
1498
1499#endif /* _TRACE_KVM_H */
1500
1501#undef TRACE_INCLUDE_PATH
1502#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
1503#undef TRACE_INCLUDE_FILE
1504#define TRACE_INCLUDE_FILE trace
1505
1506/* This part must be outside protection */
1507#include <trace/define_trace.h>