Linux Audio

Check our new training course

Loading...
v3.15
  1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  2#define _TRACE_KVM_H
  3
  4#include <linux/tracepoint.h>
  5#include <asm/vmx.h>
  6#include <asm/svm.h>
  7#include <asm/clocksource.h>
  8
  9#undef TRACE_SYSTEM
 10#define TRACE_SYSTEM kvm
 11
 12/*
 13 * Tracepoint for guest mode entry.
 14 */
 15TRACE_EVENT(kvm_entry,
 16	TP_PROTO(unsigned int vcpu_id),
 17	TP_ARGS(vcpu_id),
 18
 19	TP_STRUCT__entry(
 20		__field(	unsigned int,	vcpu_id		)
 21	),
 22
 23	TP_fast_assign(
 24		__entry->vcpu_id	= vcpu_id;
 25	),
 26
 27	TP_printk("vcpu %u", __entry->vcpu_id)
 28);
 29
 30/*
 31 * Tracepoint for hypercall.
 32 */
 33TRACE_EVENT(kvm_hypercall,
 34	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
 35		 unsigned long a2, unsigned long a3),
 36	TP_ARGS(nr, a0, a1, a2, a3),
 37
 38	TP_STRUCT__entry(
 39		__field(	unsigned long, 	nr		)
 40		__field(	unsigned long,	a0		)
 41		__field(	unsigned long,	a1		)
 42		__field(	unsigned long,	a2		)
 43		__field(	unsigned long,	a3		)
 44	),
 45
 46	TP_fast_assign(
 47		__entry->nr		= nr;
 48		__entry->a0		= a0;
 49		__entry->a1		= a1;
 50		__entry->a2		= a2;
 51		__entry->a3		= a3;
 52	),
 53
 54	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
 55		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
 56		 __entry->a3)
 57);
 58
 59/*
 60 * Tracepoint for hypercall.
 61 */
 62TRACE_EVENT(kvm_hv_hypercall,
 63	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
 64		 __u64 ingpa, __u64 outgpa),
 65	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
 66
 67	TP_STRUCT__entry(
 68		__field(	__u16,		rep_cnt		)
 69		__field(	__u16,		rep_idx		)
 70		__field(	__u64,		ingpa		)
 71		__field(	__u64,		outgpa		)
 72		__field(	__u16, 		code		)
 73		__field(	bool,		fast		)
 74	),
 75
 76	TP_fast_assign(
 77		__entry->rep_cnt	= rep_cnt;
 78		__entry->rep_idx	= rep_idx;
 79		__entry->ingpa		= ingpa;
 80		__entry->outgpa		= outgpa;
 81		__entry->code		= code;
 82		__entry->fast		= fast;
 83	),
 84
 85	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
 86		  __entry->code, __entry->fast ? "fast" : "slow",
 87		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
 88		  __entry->outgpa)
 89);
 90
 91/*
 92 * Tracepoint for PIO.
 93 */
 94TRACE_EVENT(kvm_pio,
 95	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 96		 unsigned int count),
 97	TP_ARGS(rw, port, size, count),
 98
 99	TP_STRUCT__entry(
100		__field(	unsigned int, 	rw		)
101		__field(	unsigned int, 	port		)
102		__field(	unsigned int, 	size		)
103		__field(	unsigned int,	count		)
104	),
105
106	TP_fast_assign(
107		__entry->rw		= rw;
108		__entry->port		= port;
109		__entry->size		= size;
110		__entry->count		= count;
111	),
112
113	TP_printk("pio_%s at 0x%x size %d count %d",
114		  __entry->rw ? "write" : "read",
115		  __entry->port, __entry->size, __entry->count)
116);
117
118/*
119 * Tracepoint for cpuid.
120 */
121TRACE_EVENT(kvm_cpuid,
122	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
123		 unsigned long rcx, unsigned long rdx),
124	TP_ARGS(function, rax, rbx, rcx, rdx),
125
126	TP_STRUCT__entry(
127		__field(	unsigned int,	function	)
128		__field(	unsigned long,	rax		)
129		__field(	unsigned long,	rbx		)
130		__field(	unsigned long,	rcx		)
131		__field(	unsigned long,	rdx		)
132	),
133
134	TP_fast_assign(
135		__entry->function	= function;
136		__entry->rax		= rax;
137		__entry->rbx		= rbx;
138		__entry->rcx		= rcx;
139		__entry->rdx		= rdx;
140	),
141
142	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
143		  __entry->function, __entry->rax,
144		  __entry->rbx, __entry->rcx, __entry->rdx)
145);
146
147#define AREG(x) { APIC_##x, "APIC_" #x }
148
149#define kvm_trace_symbol_apic						    \
150	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
151	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
152	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
153	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
154	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
155	AREG(ECTRL)
156/*
157 * Tracepoint for apic access.
158 */
159TRACE_EVENT(kvm_apic,
160	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
161	TP_ARGS(rw, reg, val),
162
163	TP_STRUCT__entry(
164		__field(	unsigned int,	rw		)
165		__field(	unsigned int,	reg		)
166		__field(	unsigned int,	val		)
167	),
168
169	TP_fast_assign(
170		__entry->rw		= rw;
171		__entry->reg		= reg;
172		__entry->val		= val;
173	),
174
175	TP_printk("apic_%s %s = 0x%x",
176		  __entry->rw ? "write" : "read",
177		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
178		  __entry->val)
179);
180
181#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
182#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
183
184#define KVM_ISA_VMX   1
185#define KVM_ISA_SVM   2
186
187/*
188 * Tracepoint for kvm guest exit:
189 */
190TRACE_EVENT(kvm_exit,
191	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
192	TP_ARGS(exit_reason, vcpu, isa),
193
194	TP_STRUCT__entry(
195		__field(	unsigned int,	exit_reason	)
196		__field(	unsigned long,	guest_rip	)
197		__field(	u32,	        isa             )
198		__field(	u64,	        info1           )
199		__field(	u64,	        info2           )
200	),
201
202	TP_fast_assign(
203		__entry->exit_reason	= exit_reason;
204		__entry->guest_rip	= kvm_rip_read(vcpu);
205		__entry->isa            = isa;
206		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
207					   &__entry->info2);
208	),
209
210	TP_printk("reason %s rip 0x%lx info %llx %llx",
211		 (__entry->isa == KVM_ISA_VMX) ?
212		 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
213		 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
214		 __entry->guest_rip, __entry->info1, __entry->info2)
215);
216
217/*
218 * Tracepoint for kvm interrupt injection:
219 */
220TRACE_EVENT(kvm_inj_virq,
221	TP_PROTO(unsigned int irq),
222	TP_ARGS(irq),
223
224	TP_STRUCT__entry(
225		__field(	unsigned int,	irq		)
226	),
227
228	TP_fast_assign(
229		__entry->irq		= irq;
230	),
231
232	TP_printk("irq %u", __entry->irq)
233);
234
235#define EXS(x) { x##_VECTOR, "#" #x }
236
237#define kvm_trace_sym_exc						\
238	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
239	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
240	EXS(MF), EXS(MC)
241
242/*
243 * Tracepoint for kvm interrupt injection:
244 */
245TRACE_EVENT(kvm_inj_exception,
246	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
247	TP_ARGS(exception, has_error, error_code),
248
249	TP_STRUCT__entry(
250		__field(	u8,	exception	)
251		__field(	u8,	has_error	)
252		__field(	u32,	error_code	)
253	),
254
255	TP_fast_assign(
256		__entry->exception	= exception;
257		__entry->has_error	= has_error;
258		__entry->error_code	= error_code;
259	),
260
261	TP_printk("%s (0x%x)",
262		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
263		  /* FIXME: don't print error_code if not present */
264		  __entry->has_error ? __entry->error_code : 0)
265);
266
267/*
268 * Tracepoint for page fault.
269 */
270TRACE_EVENT(kvm_page_fault,
271	TP_PROTO(unsigned long fault_address, unsigned int error_code),
272	TP_ARGS(fault_address, error_code),
273
274	TP_STRUCT__entry(
275		__field(	unsigned long,	fault_address	)
276		__field(	unsigned int,	error_code	)
277	),
278
279	TP_fast_assign(
280		__entry->fault_address	= fault_address;
281		__entry->error_code	= error_code;
282	),
283
284	TP_printk("address %lx error_code %x",
285		  __entry->fault_address, __entry->error_code)
286);
287
288/*
289 * Tracepoint for guest MSR access.
290 */
291TRACE_EVENT(kvm_msr,
292	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
293	TP_ARGS(write, ecx, data, exception),
294
295	TP_STRUCT__entry(
296		__field(	unsigned,	write		)
297		__field(	u32,		ecx		)
298		__field(	u64,		data		)
299		__field(	u8,		exception	)
300	),
301
302	TP_fast_assign(
303		__entry->write		= write;
304		__entry->ecx		= ecx;
305		__entry->data		= data;
306		__entry->exception	= exception;
307	),
308
309	TP_printk("msr_%s %x = 0x%llx%s",
310		  __entry->write ? "write" : "read",
311		  __entry->ecx, __entry->data,
312		  __entry->exception ? " (#GP)" : "")
313);
314
315#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
316#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
317#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
318#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
319
320/*
321 * Tracepoint for guest CR access.
322 */
323TRACE_EVENT(kvm_cr,
324	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
325	TP_ARGS(rw, cr, val),
326
327	TP_STRUCT__entry(
328		__field(	unsigned int,	rw		)
329		__field(	unsigned int,	cr		)
330		__field(	unsigned long,	val		)
331	),
332
333	TP_fast_assign(
334		__entry->rw		= rw;
335		__entry->cr		= cr;
336		__entry->val		= val;
337	),
338
339	TP_printk("cr_%s %x = 0x%lx",
340		  __entry->rw ? "write" : "read",
341		  __entry->cr, __entry->val)
342);
343
344#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
345#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
346
347TRACE_EVENT(kvm_pic_set_irq,
348	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
349	    TP_ARGS(chip, pin, elcr, imr, coalesced),
350
351	TP_STRUCT__entry(
352		__field(	__u8,		chip		)
353		__field(	__u8,		pin		)
354		__field(	__u8,		elcr		)
355		__field(	__u8,		imr		)
356		__field(	bool,		coalesced	)
357	),
358
359	TP_fast_assign(
360		__entry->chip		= chip;
361		__entry->pin		= pin;
362		__entry->elcr		= elcr;
363		__entry->imr		= imr;
364		__entry->coalesced	= coalesced;
365	),
366
367	TP_printk("chip %u pin %u (%s%s)%s",
368		  __entry->chip, __entry->pin,
369		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
370		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
371		  __entry->coalesced ? " (coalesced)" : "")
372);
373
374#define kvm_apic_dst_shorthand		\
375	{0x0, "dst"},			\
376	{0x1, "self"},			\
377	{0x2, "all"},			\
378	{0x3, "all-but-self"}
379
380TRACE_EVENT(kvm_apic_ipi,
381	    TP_PROTO(__u32 icr_low, __u32 dest_id),
382	    TP_ARGS(icr_low, dest_id),
383
384	TP_STRUCT__entry(
385		__field(	__u32,		icr_low		)
386		__field(	__u32,		dest_id		)
387	),
388
389	TP_fast_assign(
390		__entry->icr_low	= icr_low;
391		__entry->dest_id	= dest_id;
392	),
393
394	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
395		  __entry->dest_id, (u8)__entry->icr_low,
396		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
397				   kvm_deliver_mode),
398		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
399		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
400		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
401		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
402				   kvm_apic_dst_shorthand))
403);
404
405TRACE_EVENT(kvm_apic_accept_irq,
406	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
407	    TP_ARGS(apicid, dm, tm, vec, coalesced),
408
409	TP_STRUCT__entry(
410		__field(	__u32,		apicid		)
411		__field(	__u16,		dm		)
412		__field(	__u8,		tm		)
413		__field(	__u8,		vec		)
414		__field(	bool,		coalesced	)
415	),
416
417	TP_fast_assign(
418		__entry->apicid		= apicid;
419		__entry->dm		= dm;
420		__entry->tm		= tm;
421		__entry->vec		= vec;
422		__entry->coalesced	= coalesced;
423	),
424
425	TP_printk("apicid %x vec %u (%s|%s)%s",
426		  __entry->apicid, __entry->vec,
427		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
428		  __entry->tm ? "level" : "edge",
429		  __entry->coalesced ? " (coalesced)" : "")
430);
431
432TRACE_EVENT(kvm_eoi,
433	    TP_PROTO(struct kvm_lapic *apic, int vector),
434	    TP_ARGS(apic, vector),
435
436	TP_STRUCT__entry(
437		__field(	__u32,		apicid		)
438		__field(	int,		vector		)
439	),
440
441	TP_fast_assign(
442		__entry->apicid		= apic->vcpu->vcpu_id;
443		__entry->vector		= vector;
444	),
445
446	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
447);
448
449TRACE_EVENT(kvm_pv_eoi,
450	    TP_PROTO(struct kvm_lapic *apic, int vector),
451	    TP_ARGS(apic, vector),
452
453	TP_STRUCT__entry(
454		__field(	__u32,		apicid		)
455		__field(	int,		vector		)
456	),
457
458	TP_fast_assign(
459		__entry->apicid		= apic->vcpu->vcpu_id;
460		__entry->vector		= vector;
461	),
462
463	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
464);
465
466/*
467 * Tracepoint for nested VMRUN
468 */
469TRACE_EVENT(kvm_nested_vmrun,
470	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
471		     __u32 event_inj, bool npt),
472	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
473
474	TP_STRUCT__entry(
475		__field(	__u64,		rip		)
476		__field(	__u64,		vmcb		)
477		__field(	__u64,		nested_rip	)
478		__field(	__u32,		int_ctl		)
479		__field(	__u32,		event_inj	)
480		__field(	bool,		npt		)
481	),
482
483	TP_fast_assign(
484		__entry->rip		= rip;
485		__entry->vmcb		= vmcb;
486		__entry->nested_rip	= nested_rip;
487		__entry->int_ctl	= int_ctl;
488		__entry->event_inj	= event_inj;
489		__entry->npt		= npt;
490	),
491
492	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
493		  "event_inj: 0x%08x npt: %s",
494		__entry->rip, __entry->vmcb, __entry->nested_rip,
495		__entry->int_ctl, __entry->event_inj,
496		__entry->npt ? "on" : "off")
497);
498
499TRACE_EVENT(kvm_nested_intercepts,
500	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
501	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
502
503	TP_STRUCT__entry(
504		__field(	__u16,		cr_read		)
505		__field(	__u16,		cr_write	)
506		__field(	__u32,		exceptions	)
507		__field(	__u64,		intercept	)
508	),
509
510	TP_fast_assign(
511		__entry->cr_read	= cr_read;
512		__entry->cr_write	= cr_write;
513		__entry->exceptions	= exceptions;
514		__entry->intercept	= intercept;
515	),
516
517	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
518		__entry->cr_read, __entry->cr_write, __entry->exceptions,
519		__entry->intercept)
520);
521/*
522 * Tracepoint for #VMEXIT while nested
523 */
524TRACE_EVENT(kvm_nested_vmexit,
525	    TP_PROTO(__u64 rip, __u32 exit_code,
526		     __u64 exit_info1, __u64 exit_info2,
527		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
528	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
529		    exit_int_info, exit_int_info_err, isa),
530
531	TP_STRUCT__entry(
532		__field(	__u64,		rip			)
533		__field(	__u32,		exit_code		)
534		__field(	__u64,		exit_info1		)
535		__field(	__u64,		exit_info2		)
536		__field(	__u32,		exit_int_info		)
537		__field(	__u32,		exit_int_info_err	)
538		__field(	__u32,		isa			)
539	),
540
541	TP_fast_assign(
542		__entry->rip			= rip;
543		__entry->exit_code		= exit_code;
544		__entry->exit_info1		= exit_info1;
545		__entry->exit_info2		= exit_info2;
546		__entry->exit_int_info		= exit_int_info;
547		__entry->exit_int_info_err	= exit_int_info_err;
548		__entry->isa			= isa;
549	),
550	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
551		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
552		  __entry->rip,
553		 (__entry->isa == KVM_ISA_VMX) ?
554		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
555		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
556		  __entry->exit_info1, __entry->exit_info2,
557		  __entry->exit_int_info, __entry->exit_int_info_err)
558);
559
560/*
561 * Tracepoint for #VMEXIT reinjected to the guest
562 */
563TRACE_EVENT(kvm_nested_vmexit_inject,
564	    TP_PROTO(__u32 exit_code,
565		     __u64 exit_info1, __u64 exit_info2,
566		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
567	    TP_ARGS(exit_code, exit_info1, exit_info2,
568		    exit_int_info, exit_int_info_err, isa),
569
570	TP_STRUCT__entry(
571		__field(	__u32,		exit_code		)
572		__field(	__u64,		exit_info1		)
573		__field(	__u64,		exit_info2		)
574		__field(	__u32,		exit_int_info		)
575		__field(	__u32,		exit_int_info_err	)
576		__field(	__u32,		isa			)
577	),
578
579	TP_fast_assign(
580		__entry->exit_code		= exit_code;
581		__entry->exit_info1		= exit_info1;
582		__entry->exit_info2		= exit_info2;
583		__entry->exit_int_info		= exit_int_info;
584		__entry->exit_int_info_err	= exit_int_info_err;
585		__entry->isa			= isa;
586	),
587
588	TP_printk("reason: %s ext_inf1: 0x%016llx "
589		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
590		 (__entry->isa == KVM_ISA_VMX) ?
591		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
592		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
593		__entry->exit_info1, __entry->exit_info2,
594		__entry->exit_int_info, __entry->exit_int_info_err)
595);
596
597/*
598 * Tracepoint for nested #vmexit because of interrupt pending
599 */
600TRACE_EVENT(kvm_nested_intr_vmexit,
601	    TP_PROTO(__u64 rip),
602	    TP_ARGS(rip),
603
604	TP_STRUCT__entry(
605		__field(	__u64,	rip	)
606	),
607
608	TP_fast_assign(
609		__entry->rip	=	rip
610	),
611
612	TP_printk("rip: 0x%016llx", __entry->rip)
613);
614
615/*
616 * Tracepoint for nested #vmexit because of interrupt pending
617 */
618TRACE_EVENT(kvm_invlpga,
619	    TP_PROTO(__u64 rip, int asid, u64 address),
620	    TP_ARGS(rip, asid, address),
621
622	TP_STRUCT__entry(
623		__field(	__u64,	rip	)
624		__field(	int,	asid	)
625		__field(	__u64,	address	)
626	),
627
628	TP_fast_assign(
629		__entry->rip		=	rip;
630		__entry->asid		=	asid;
631		__entry->address	=	address;
632	),
633
634	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
635		  __entry->rip, __entry->asid, __entry->address)
636);
637
638/*
639 * Tracepoint for nested #vmexit because of interrupt pending
640 */
641TRACE_EVENT(kvm_skinit,
642	    TP_PROTO(__u64 rip, __u32 slb),
643	    TP_ARGS(rip, slb),
644
645	TP_STRUCT__entry(
646		__field(	__u64,	rip	)
647		__field(	__u32,	slb	)
648	),
649
650	TP_fast_assign(
651		__entry->rip		=	rip;
652		__entry->slb		=	slb;
653	),
654
655	TP_printk("rip: 0x%016llx slb: 0x%08x",
656		  __entry->rip, __entry->slb)
657);
658
 
 
 
 
 
 
 
 
 
 
659#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
660#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
661#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
662#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
663
664#define kvm_trace_symbol_emul_flags	                  \
665	{ 0,   			    "real" },		  \
666	{ KVM_EMUL_INSN_F_CR0_PE			  \
667	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
668	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
669	{ KVM_EMUL_INSN_F_CR0_PE			  \
670	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
671	{ KVM_EMUL_INSN_F_CR0_PE			  \
672	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
673
674#define kei_decode_mode(mode) ({			\
675	u8 flags = 0xff;				\
676	switch (mode) {					\
677	case X86EMUL_MODE_REAL:				\
678		flags = 0;				\
679		break;					\
680	case X86EMUL_MODE_VM86:				\
681		flags = KVM_EMUL_INSN_F_EFL_VM;		\
682		break;					\
683	case X86EMUL_MODE_PROT16:			\
684		flags = KVM_EMUL_INSN_F_CR0_PE;		\
685		break;					\
686	case X86EMUL_MODE_PROT32:			\
687		flags = KVM_EMUL_INSN_F_CR0_PE		\
688			| KVM_EMUL_INSN_F_CS_D;		\
689		break;					\
690	case X86EMUL_MODE_PROT64:			\
691		flags = KVM_EMUL_INSN_F_CR0_PE		\
692			| KVM_EMUL_INSN_F_CS_L;		\
693		break;					\
694	}						\
695	flags;						\
696	})
697
698TRACE_EVENT(kvm_emulate_insn,
699	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
700	TP_ARGS(vcpu, failed),
701
702	TP_STRUCT__entry(
703		__field(    __u64, rip                       )
704		__field(    __u32, csbase                    )
705		__field(    __u8,  len                       )
706		__array(    __u8,  insn,    15	             )
707		__field(    __u8,  flags       	   	     )
708		__field(    __u8,  failed                    )
709		),
710
711	TP_fast_assign(
712		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
713		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
714		__entry->len = vcpu->arch.emulate_ctxt._eip
715			       - vcpu->arch.emulate_ctxt.fetch.start;
716		memcpy(__entry->insn,
717		       vcpu->arch.emulate_ctxt.fetch.data,
718		       15);
719		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
720		__entry->failed = failed;
721		),
722
723	TP_printk("%x:%llx:%s (%s)%s",
724		  __entry->csbase, __entry->rip,
725		  __print_hex(__entry->insn, __entry->len),
726		  __print_symbolic(__entry->flags,
727				   kvm_trace_symbol_emul_flags),
728		  __entry->failed ? " failed" : ""
729		)
730	);
731
732#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
733#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
734
735TRACE_EVENT(
736	vcpu_match_mmio,
737	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
738	TP_ARGS(gva, gpa, write, gpa_match),
739
740	TP_STRUCT__entry(
741		__field(gva_t, gva)
742		__field(gpa_t, gpa)
743		__field(bool, write)
744		__field(bool, gpa_match)
745		),
746
747	TP_fast_assign(
748		__entry->gva = gva;
749		__entry->gpa = gpa;
750		__entry->write = write;
751		__entry->gpa_match = gpa_match
752		),
753
754	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
755		  __entry->write ? "Write" : "Read",
756		  __entry->gpa_match ? "GPA" : "GVA")
757);
758
759TRACE_EVENT(kvm_write_tsc_offset,
760	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
761		 __u64 next_tsc_offset),
762	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
763
764	TP_STRUCT__entry(
765		__field( unsigned int,	vcpu_id				)
766		__field(	__u64,	previous_tsc_offset		)
767		__field(	__u64,	next_tsc_offset			)
768	),
769
770	TP_fast_assign(
771		__entry->vcpu_id		= vcpu_id;
772		__entry->previous_tsc_offset	= previous_tsc_offset;
773		__entry->next_tsc_offset	= next_tsc_offset;
774	),
775
776	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
777		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
778);
779
780#ifdef CONFIG_X86_64
781
782#define host_clocks					\
783	{VCLOCK_NONE, "none"},				\
784	{VCLOCK_TSC,  "tsc"},				\
785	{VCLOCK_HPET, "hpet"}				\
786
787TRACE_EVENT(kvm_update_master_clock,
788	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
789	TP_ARGS(use_master_clock, host_clock, offset_matched),
790
791	TP_STRUCT__entry(
792		__field(		bool,	use_master_clock	)
793		__field(	unsigned int,	host_clock		)
794		__field(		bool,	offset_matched		)
795	),
796
797	TP_fast_assign(
798		__entry->use_master_clock	= use_master_clock;
799		__entry->host_clock		= host_clock;
800		__entry->offset_matched		= offset_matched;
801	),
802
803	TP_printk("masterclock %d hostclock %s offsetmatched %u",
804		  __entry->use_master_clock,
805		  __print_symbolic(__entry->host_clock, host_clocks),
806		  __entry->offset_matched)
807);
808
809TRACE_EVENT(kvm_track_tsc,
810	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
811		 unsigned int online_vcpus, bool use_master_clock,
812		 unsigned int host_clock),
813	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
814		host_clock),
815
816	TP_STRUCT__entry(
817		__field(	unsigned int,	vcpu_id			)
818		__field(	unsigned int,	nr_vcpus_matched_tsc	)
819		__field(	unsigned int,	online_vcpus		)
820		__field(	bool,		use_master_clock	)
821		__field(	unsigned int,	host_clock		)
822	),
823
824	TP_fast_assign(
825		__entry->vcpu_id		= vcpu_id;
826		__entry->nr_vcpus_matched_tsc	= nr_matched;
827		__entry->online_vcpus		= online_vcpus;
828		__entry->use_master_clock	= use_master_clock;
829		__entry->host_clock		= host_clock;
830	),
831
832	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
833		  " hostclock %s",
834		  __entry->vcpu_id, __entry->use_master_clock,
835		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
836		  __print_symbolic(__entry->host_clock, host_clocks))
837);
838
839#endif /* CONFIG_X86_64 */
840
841#endif /* _TRACE_KVM_H */
842
843#undef TRACE_INCLUDE_PATH
844#define TRACE_INCLUDE_PATH arch/x86/kvm
845#undef TRACE_INCLUDE_FILE
846#define TRACE_INCLUDE_FILE trace
847
848/* This part must be outside protection */
849#include <trace/define_trace.h>
v3.1
  1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  2#define _TRACE_KVM_H
  3
  4#include <linux/tracepoint.h>
 
 
 
  5
  6#undef TRACE_SYSTEM
  7#define TRACE_SYSTEM kvm
  8
  9/*
 10 * Tracepoint for guest mode entry.
 11 */
 12TRACE_EVENT(kvm_entry,
 13	TP_PROTO(unsigned int vcpu_id),
 14	TP_ARGS(vcpu_id),
 15
 16	TP_STRUCT__entry(
 17		__field(	unsigned int,	vcpu_id		)
 18	),
 19
 20	TP_fast_assign(
 21		__entry->vcpu_id	= vcpu_id;
 22	),
 23
 24	TP_printk("vcpu %u", __entry->vcpu_id)
 25);
 26
 27/*
 28 * Tracepoint for hypercall.
 29 */
 30TRACE_EVENT(kvm_hypercall,
 31	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
 32		 unsigned long a2, unsigned long a3),
 33	TP_ARGS(nr, a0, a1, a2, a3),
 34
 35	TP_STRUCT__entry(
 36		__field(	unsigned long, 	nr		)
 37		__field(	unsigned long,	a0		)
 38		__field(	unsigned long,	a1		)
 39		__field(	unsigned long,	a2		)
 40		__field(	unsigned long,	a3		)
 41	),
 42
 43	TP_fast_assign(
 44		__entry->nr		= nr;
 45		__entry->a0		= a0;
 46		__entry->a1		= a1;
 47		__entry->a2		= a2;
 48		__entry->a3		= a3;
 49	),
 50
 51	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
 52		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
 53		 __entry->a3)
 54);
 55
 56/*
 57 * Tracepoint for hypercall.
 58 */
 59TRACE_EVENT(kvm_hv_hypercall,
 60	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
 61		 __u64 ingpa, __u64 outgpa),
 62	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
 63
 64	TP_STRUCT__entry(
 65		__field(	__u16,		rep_cnt		)
 66		__field(	__u16,		rep_idx		)
 67		__field(	__u64,		ingpa		)
 68		__field(	__u64,		outgpa		)
 69		__field(	__u16, 		code		)
 70		__field(	bool,		fast		)
 71	),
 72
 73	TP_fast_assign(
 74		__entry->rep_cnt	= rep_cnt;
 75		__entry->rep_idx	= rep_idx;
 76		__entry->ingpa		= ingpa;
 77		__entry->outgpa		= outgpa;
 78		__entry->code		= code;
 79		__entry->fast		= fast;
 80	),
 81
 82	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
 83		  __entry->code, __entry->fast ? "fast" : "slow",
 84		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
 85		  __entry->outgpa)
 86);
 87
 88/*
 89 * Tracepoint for PIO.
 90 */
 91TRACE_EVENT(kvm_pio,
 92	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 93		 unsigned int count),
 94	TP_ARGS(rw, port, size, count),
 95
 96	TP_STRUCT__entry(
 97		__field(	unsigned int, 	rw		)
 98		__field(	unsigned int, 	port		)
 99		__field(	unsigned int, 	size		)
100		__field(	unsigned int,	count		)
101	),
102
103	TP_fast_assign(
104		__entry->rw		= rw;
105		__entry->port		= port;
106		__entry->size		= size;
107		__entry->count		= count;
108	),
109
110	TP_printk("pio_%s at 0x%x size %d count %d",
111		  __entry->rw ? "write" : "read",
112		  __entry->port, __entry->size, __entry->count)
113);
114
115/*
116 * Tracepoint for cpuid.
117 */
118TRACE_EVENT(kvm_cpuid,
119	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
120		 unsigned long rcx, unsigned long rdx),
121	TP_ARGS(function, rax, rbx, rcx, rdx),
122
123	TP_STRUCT__entry(
124		__field(	unsigned int,	function	)
125		__field(	unsigned long,	rax		)
126		__field(	unsigned long,	rbx		)
127		__field(	unsigned long,	rcx		)
128		__field(	unsigned long,	rdx		)
129	),
130
131	TP_fast_assign(
132		__entry->function	= function;
133		__entry->rax		= rax;
134		__entry->rbx		= rbx;
135		__entry->rcx		= rcx;
136		__entry->rdx		= rdx;
137	),
138
139	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
140		  __entry->function, __entry->rax,
141		  __entry->rbx, __entry->rcx, __entry->rdx)
142);
143
144#define AREG(x) { APIC_##x, "APIC_" #x }
145
146#define kvm_trace_symbol_apic						    \
147	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
148	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
149	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
150	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
151	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
152	AREG(ECTRL)
153/*
154 * Tracepoint for apic access.
155 */
156TRACE_EVENT(kvm_apic,
157	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
158	TP_ARGS(rw, reg, val),
159
160	TP_STRUCT__entry(
161		__field(	unsigned int,	rw		)
162		__field(	unsigned int,	reg		)
163		__field(	unsigned int,	val		)
164	),
165
166	TP_fast_assign(
167		__entry->rw		= rw;
168		__entry->reg		= reg;
169		__entry->val		= val;
170	),
171
172	TP_printk("apic_%s %s = 0x%x",
173		  __entry->rw ? "write" : "read",
174		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
175		  __entry->val)
176);
177
178#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
179#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
180
181#define KVM_ISA_VMX   1
182#define KVM_ISA_SVM   2
183
184/*
185 * Tracepoint for kvm guest exit:
186 */
187TRACE_EVENT(kvm_exit,
188	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
189	TP_ARGS(exit_reason, vcpu, isa),
190
191	TP_STRUCT__entry(
192		__field(	unsigned int,	exit_reason	)
193		__field(	unsigned long,	guest_rip	)
194		__field(	u32,	        isa             )
195		__field(	u64,	        info1           )
196		__field(	u64,	        info2           )
197	),
198
199	TP_fast_assign(
200		__entry->exit_reason	= exit_reason;
201		__entry->guest_rip	= kvm_rip_read(vcpu);
202		__entry->isa            = isa;
203		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
204					   &__entry->info2);
205	),
206
207	TP_printk("reason %s rip 0x%lx info %llx %llx",
208		 ftrace_print_symbols_seq(p, __entry->exit_reason,
209					  kvm_x86_ops->exit_reasons_str),
 
210		 __entry->guest_rip, __entry->info1, __entry->info2)
211);
212
213/*
214 * Tracepoint for kvm interrupt injection:
215 */
216TRACE_EVENT(kvm_inj_virq,
217	TP_PROTO(unsigned int irq),
218	TP_ARGS(irq),
219
220	TP_STRUCT__entry(
221		__field(	unsigned int,	irq		)
222	),
223
224	TP_fast_assign(
225		__entry->irq		= irq;
226	),
227
228	TP_printk("irq %u", __entry->irq)
229);
230
231#define EXS(x) { x##_VECTOR, "#" #x }
232
233#define kvm_trace_sym_exc						\
234	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
235	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
236	EXS(MF), EXS(MC)
237
238/*
239 * Tracepoint for kvm interrupt injection:
240 */
241TRACE_EVENT(kvm_inj_exception,
242	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
243	TP_ARGS(exception, has_error, error_code),
244
245	TP_STRUCT__entry(
246		__field(	u8,	exception	)
247		__field(	u8,	has_error	)
248		__field(	u32,	error_code	)
249	),
250
251	TP_fast_assign(
252		__entry->exception	= exception;
253		__entry->has_error	= has_error;
254		__entry->error_code	= error_code;
255	),
256
257	TP_printk("%s (0x%x)",
258		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
259		  /* FIXME: don't print error_code if not present */
260		  __entry->has_error ? __entry->error_code : 0)
261);
262
263/*
264 * Tracepoint for page fault.
265 */
266TRACE_EVENT(kvm_page_fault,
267	TP_PROTO(unsigned long fault_address, unsigned int error_code),
268	TP_ARGS(fault_address, error_code),
269
270	TP_STRUCT__entry(
271		__field(	unsigned long,	fault_address	)
272		__field(	unsigned int,	error_code	)
273	),
274
275	TP_fast_assign(
276		__entry->fault_address	= fault_address;
277		__entry->error_code	= error_code;
278	),
279
280	TP_printk("address %lx error_code %x",
281		  __entry->fault_address, __entry->error_code)
282);
283
284/*
285 * Tracepoint for guest MSR access.
286 */
287TRACE_EVENT(kvm_msr,
288	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
289	TP_ARGS(write, ecx, data, exception),
290
291	TP_STRUCT__entry(
292		__field(	unsigned,	write		)
293		__field(	u32,		ecx		)
294		__field(	u64,		data		)
295		__field(	u8,		exception	)
296	),
297
298	TP_fast_assign(
299		__entry->write		= write;
300		__entry->ecx		= ecx;
301		__entry->data		= data;
302		__entry->exception	= exception;
303	),
304
305	TP_printk("msr_%s %x = 0x%llx%s",
306		  __entry->write ? "write" : "read",
307		  __entry->ecx, __entry->data,
308		  __entry->exception ? " (#GP)" : "")
309);
310
311#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
312#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
313#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
314#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
315
316/*
317 * Tracepoint for guest CR access.
318 */
319TRACE_EVENT(kvm_cr,
320	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
321	TP_ARGS(rw, cr, val),
322
323	TP_STRUCT__entry(
324		__field(	unsigned int,	rw		)
325		__field(	unsigned int,	cr		)
326		__field(	unsigned long,	val		)
327	),
328
329	TP_fast_assign(
330		__entry->rw		= rw;
331		__entry->cr		= cr;
332		__entry->val		= val;
333	),
334
335	TP_printk("cr_%s %x = 0x%lx",
336		  __entry->rw ? "write" : "read",
337		  __entry->cr, __entry->val)
338);
339
340#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
341#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
342
343TRACE_EVENT(kvm_pic_set_irq,
344	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
345	    TP_ARGS(chip, pin, elcr, imr, coalesced),
346
347	TP_STRUCT__entry(
348		__field(	__u8,		chip		)
349		__field(	__u8,		pin		)
350		__field(	__u8,		elcr		)
351		__field(	__u8,		imr		)
352		__field(	bool,		coalesced	)
353	),
354
355	TP_fast_assign(
356		__entry->chip		= chip;
357		__entry->pin		= pin;
358		__entry->elcr		= elcr;
359		__entry->imr		= imr;
360		__entry->coalesced	= coalesced;
361	),
362
363	TP_printk("chip %u pin %u (%s%s)%s",
364		  __entry->chip, __entry->pin,
365		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
366		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
367		  __entry->coalesced ? " (coalesced)" : "")
368);
369
370#define kvm_apic_dst_shorthand		\
371	{0x0, "dst"},			\
372	{0x1, "self"},			\
373	{0x2, "all"},			\
374	{0x3, "all-but-self"}
375
376TRACE_EVENT(kvm_apic_ipi,
377	    TP_PROTO(__u32 icr_low, __u32 dest_id),
378	    TP_ARGS(icr_low, dest_id),
379
380	TP_STRUCT__entry(
381		__field(	__u32,		icr_low		)
382		__field(	__u32,		dest_id		)
383	),
384
385	TP_fast_assign(
386		__entry->icr_low	= icr_low;
387		__entry->dest_id	= dest_id;
388	),
389
390	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
391		  __entry->dest_id, (u8)__entry->icr_low,
392		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
393				   kvm_deliver_mode),
394		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
395		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
396		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
397		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
398				   kvm_apic_dst_shorthand))
399);
400
401TRACE_EVENT(kvm_apic_accept_irq,
402	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
403	    TP_ARGS(apicid, dm, tm, vec, coalesced),
404
405	TP_STRUCT__entry(
406		__field(	__u32,		apicid		)
407		__field(	__u16,		dm		)
408		__field(	__u8,		tm		)
409		__field(	__u8,		vec		)
410		__field(	bool,		coalesced	)
411	),
412
413	TP_fast_assign(
414		__entry->apicid		= apicid;
415		__entry->dm		= dm;
416		__entry->tm		= tm;
417		__entry->vec		= vec;
418		__entry->coalesced	= coalesced;
419	),
420
421	TP_printk("apicid %x vec %u (%s|%s)%s",
422		  __entry->apicid, __entry->vec,
423		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
424		  __entry->tm ? "level" : "edge",
425		  __entry->coalesced ? " (coalesced)" : "")
426);
427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428/*
429 * Tracepoint for nested VMRUN
430 */
431TRACE_EVENT(kvm_nested_vmrun,
432	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
433		     __u32 event_inj, bool npt),
434	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
435
436	TP_STRUCT__entry(
437		__field(	__u64,		rip		)
438		__field(	__u64,		vmcb		)
439		__field(	__u64,		nested_rip	)
440		__field(	__u32,		int_ctl		)
441		__field(	__u32,		event_inj	)
442		__field(	bool,		npt		)
443	),
444
445	TP_fast_assign(
446		__entry->rip		= rip;
447		__entry->vmcb		= vmcb;
448		__entry->nested_rip	= nested_rip;
449		__entry->int_ctl	= int_ctl;
450		__entry->event_inj	= event_inj;
451		__entry->npt		= npt;
452	),
453
454	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
455		  "event_inj: 0x%08x npt: %s",
456		__entry->rip, __entry->vmcb, __entry->nested_rip,
457		__entry->int_ctl, __entry->event_inj,
458		__entry->npt ? "on" : "off")
459);
460
461TRACE_EVENT(kvm_nested_intercepts,
462	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
463	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
464
465	TP_STRUCT__entry(
466		__field(	__u16,		cr_read		)
467		__field(	__u16,		cr_write	)
468		__field(	__u32,		exceptions	)
469		__field(	__u64,		intercept	)
470	),
471
472	TP_fast_assign(
473		__entry->cr_read	= cr_read;
474		__entry->cr_write	= cr_write;
475		__entry->exceptions	= exceptions;
476		__entry->intercept	= intercept;
477	),
478
479	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
480		__entry->cr_read, __entry->cr_write, __entry->exceptions,
481		__entry->intercept)
482);
483/*
484 * Tracepoint for #VMEXIT while nested
485 */
486TRACE_EVENT(kvm_nested_vmexit,
487	    TP_PROTO(__u64 rip, __u32 exit_code,
488		     __u64 exit_info1, __u64 exit_info2,
489		     __u32 exit_int_info, __u32 exit_int_info_err),
490	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
491		    exit_int_info, exit_int_info_err),
492
493	TP_STRUCT__entry(
494		__field(	__u64,		rip			)
495		__field(	__u32,		exit_code		)
496		__field(	__u64,		exit_info1		)
497		__field(	__u64,		exit_info2		)
498		__field(	__u32,		exit_int_info		)
499		__field(	__u32,		exit_int_info_err	)
 
500	),
501
502	TP_fast_assign(
503		__entry->rip			= rip;
504		__entry->exit_code		= exit_code;
505		__entry->exit_info1		= exit_info1;
506		__entry->exit_info2		= exit_info2;
507		__entry->exit_int_info		= exit_int_info;
508		__entry->exit_int_info_err	= exit_int_info_err;
 
509	),
510	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
511		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
512		  __entry->rip,
513		  ftrace_print_symbols_seq(p, __entry->exit_code,
514					   kvm_x86_ops->exit_reasons_str),
 
515		  __entry->exit_info1, __entry->exit_info2,
516		  __entry->exit_int_info, __entry->exit_int_info_err)
517);
518
519/*
520 * Tracepoint for #VMEXIT reinjected to the guest
521 */
522TRACE_EVENT(kvm_nested_vmexit_inject,
523	    TP_PROTO(__u32 exit_code,
524		     __u64 exit_info1, __u64 exit_info2,
525		     __u32 exit_int_info, __u32 exit_int_info_err),
526	    TP_ARGS(exit_code, exit_info1, exit_info2,
527		    exit_int_info, exit_int_info_err),
528
529	TP_STRUCT__entry(
530		__field(	__u32,		exit_code		)
531		__field(	__u64,		exit_info1		)
532		__field(	__u64,		exit_info2		)
533		__field(	__u32,		exit_int_info		)
534		__field(	__u32,		exit_int_info_err	)
 
535	),
536
537	TP_fast_assign(
538		__entry->exit_code		= exit_code;
539		__entry->exit_info1		= exit_info1;
540		__entry->exit_info2		= exit_info2;
541		__entry->exit_int_info		= exit_int_info;
542		__entry->exit_int_info_err	= exit_int_info_err;
 
543	),
544
545	TP_printk("reason: %s ext_inf1: 0x%016llx "
546		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
547		  ftrace_print_symbols_seq(p, __entry->exit_code,
548					   kvm_x86_ops->exit_reasons_str),
 
549		__entry->exit_info1, __entry->exit_info2,
550		__entry->exit_int_info, __entry->exit_int_info_err)
551);
552
553/*
554 * Tracepoint for nested #vmexit because of interrupt pending
555 */
556TRACE_EVENT(kvm_nested_intr_vmexit,
557	    TP_PROTO(__u64 rip),
558	    TP_ARGS(rip),
559
560	TP_STRUCT__entry(
561		__field(	__u64,	rip	)
562	),
563
564	TP_fast_assign(
565		__entry->rip	=	rip
566	),
567
568	TP_printk("rip: 0x%016llx", __entry->rip)
569);
570
571/*
572 * Tracepoint for nested #vmexit because of interrupt pending
573 */
574TRACE_EVENT(kvm_invlpga,
575	    TP_PROTO(__u64 rip, int asid, u64 address),
576	    TP_ARGS(rip, asid, address),
577
578	TP_STRUCT__entry(
579		__field(	__u64,	rip	)
580		__field(	int,	asid	)
581		__field(	__u64,	address	)
582	),
583
584	TP_fast_assign(
585		__entry->rip		=	rip;
586		__entry->asid		=	asid;
587		__entry->address	=	address;
588	),
589
590	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
591		  __entry->rip, __entry->asid, __entry->address)
592);
593
594/*
595 * Tracepoint for nested #vmexit because of interrupt pending
596 */
597TRACE_EVENT(kvm_skinit,
598	    TP_PROTO(__u64 rip, __u32 slb),
599	    TP_ARGS(rip, slb),
600
601	TP_STRUCT__entry(
602		__field(	__u64,	rip	)
603		__field(	__u32,	slb	)
604	),
605
606	TP_fast_assign(
607		__entry->rip		=	rip;
608		__entry->slb		=	slb;
609	),
610
611	TP_printk("rip: 0x%016llx slb: 0x%08x",
612		  __entry->rip, __entry->slb)
613);
614
615#define __print_insn(insn, ilen) ({		                 \
616	int i;							 \
617	const char *ret = p->buffer + p->len;			 \
618								 \
619	for (i = 0; i < ilen; ++i)				 \
620		trace_seq_printf(p, " %02x", insn[i]);		 \
621	trace_seq_printf(p, "%c", 0);				 \
622	ret;							 \
623	})
624
625#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
626#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
627#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
628#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
629
630#define kvm_trace_symbol_emul_flags	                  \
631	{ 0,   			    "real" },		  \
632	{ KVM_EMUL_INSN_F_CR0_PE			  \
633	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
634	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
635	{ KVM_EMUL_INSN_F_CR0_PE			  \
636	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
637	{ KVM_EMUL_INSN_F_CR0_PE			  \
638	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
639
640#define kei_decode_mode(mode) ({			\
641	u8 flags = 0xff;				\
642	switch (mode) {					\
643	case X86EMUL_MODE_REAL:				\
644		flags = 0;				\
645		break;					\
646	case X86EMUL_MODE_VM86:				\
647		flags = KVM_EMUL_INSN_F_EFL_VM;		\
648		break;					\
649	case X86EMUL_MODE_PROT16:			\
650		flags = KVM_EMUL_INSN_F_CR0_PE;		\
651		break;					\
652	case X86EMUL_MODE_PROT32:			\
653		flags = KVM_EMUL_INSN_F_CR0_PE		\
654			| KVM_EMUL_INSN_F_CS_D;		\
655		break;					\
656	case X86EMUL_MODE_PROT64:			\
657		flags = KVM_EMUL_INSN_F_CR0_PE		\
658			| KVM_EMUL_INSN_F_CS_L;		\
659		break;					\
660	}						\
661	flags;						\
662	})
663
664TRACE_EVENT(kvm_emulate_insn,
665	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
666	TP_ARGS(vcpu, failed),
667
668	TP_STRUCT__entry(
669		__field(    __u64, rip                       )
670		__field(    __u32, csbase                    )
671		__field(    __u8,  len                       )
672		__array(    __u8,  insn,    15	             )
673		__field(    __u8,  flags       	   	     )
674		__field(    __u8,  failed                    )
675		),
676
677	TP_fast_assign(
678		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
679		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
680		__entry->len = vcpu->arch.emulate_ctxt._eip
681			       - vcpu->arch.emulate_ctxt.fetch.start;
682		memcpy(__entry->insn,
683		       vcpu->arch.emulate_ctxt.fetch.data,
684		       15);
685		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
686		__entry->failed = failed;
687		),
688
689	TP_printk("%x:%llx:%s (%s)%s",
690		  __entry->csbase, __entry->rip,
691		  __print_insn(__entry->insn, __entry->len),
692		  __print_symbolic(__entry->flags,
693				   kvm_trace_symbol_emul_flags),
694		  __entry->failed ? " failed" : ""
695		)
696	);
697
698#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
699#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
700
701TRACE_EVENT(
702	vcpu_match_mmio,
703	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
704	TP_ARGS(gva, gpa, write, gpa_match),
705
706	TP_STRUCT__entry(
707		__field(gva_t, gva)
708		__field(gpa_t, gpa)
709		__field(bool, write)
710		__field(bool, gpa_match)
711		),
712
713	TP_fast_assign(
714		__entry->gva = gva;
715		__entry->gpa = gpa;
716		__entry->write = write;
717		__entry->gpa_match = gpa_match
718		),
719
720	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
721		  __entry->write ? "Write" : "Read",
722		  __entry->gpa_match ? "GPA" : "GVA")
723);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
724#endif /* _TRACE_KVM_H */
725
726#undef TRACE_INCLUDE_PATH
727#define TRACE_INCLUDE_PATH arch/x86/kvm
728#undef TRACE_INCLUDE_FILE
729#define TRACE_INCLUDE_FILE trace
730
731/* This part must be outside protection */
732#include <trace/define_trace.h>