Linux Audio

Check our new training course

Loading...
v3.15
  1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  2#define _TRACE_KVM_H
  3
  4#include <linux/tracepoint.h>
  5#include <asm/vmx.h>
  6#include <asm/svm.h>
  7#include <asm/clocksource.h>
  8
  9#undef TRACE_SYSTEM
 10#define TRACE_SYSTEM kvm
 11
 12/*
 13 * Tracepoint for guest mode entry.
 14 */
 15TRACE_EVENT(kvm_entry,
 16	TP_PROTO(unsigned int vcpu_id),
 17	TP_ARGS(vcpu_id),
 18
 19	TP_STRUCT__entry(
 20		__field(	unsigned int,	vcpu_id		)
 21	),
 22
 23	TP_fast_assign(
 24		__entry->vcpu_id	= vcpu_id;
 25	),
 26
 27	TP_printk("vcpu %u", __entry->vcpu_id)
 28);
 29
 30/*
 31 * Tracepoint for hypercall.
 32 */
 33TRACE_EVENT(kvm_hypercall,
 34	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
 35		 unsigned long a2, unsigned long a3),
 36	TP_ARGS(nr, a0, a1, a2, a3),
 37
 38	TP_STRUCT__entry(
 39		__field(	unsigned long, 	nr		)
 40		__field(	unsigned long,	a0		)
 41		__field(	unsigned long,	a1		)
 42		__field(	unsigned long,	a2		)
 43		__field(	unsigned long,	a3		)
 44	),
 45
 46	TP_fast_assign(
 47		__entry->nr		= nr;
 48		__entry->a0		= a0;
 49		__entry->a1		= a1;
 50		__entry->a2		= a2;
 51		__entry->a3		= a3;
 52	),
 53
 54	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
 55		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
 56		 __entry->a3)
 57);
 58
 59/*
 60 * Tracepoint for hypercall.
 61 */
 62TRACE_EVENT(kvm_hv_hypercall,
 63	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
 64		 __u64 ingpa, __u64 outgpa),
 65	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
 66
 67	TP_STRUCT__entry(
 68		__field(	__u16,		rep_cnt		)
 69		__field(	__u16,		rep_idx		)
 70		__field(	__u64,		ingpa		)
 71		__field(	__u64,		outgpa		)
 72		__field(	__u16, 		code		)
 73		__field(	bool,		fast		)
 74	),
 75
 76	TP_fast_assign(
 77		__entry->rep_cnt	= rep_cnt;
 78		__entry->rep_idx	= rep_idx;
 79		__entry->ingpa		= ingpa;
 80		__entry->outgpa		= outgpa;
 81		__entry->code		= code;
 82		__entry->fast		= fast;
 83	),
 84
 85	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
 86		  __entry->code, __entry->fast ? "fast" : "slow",
 87		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
 88		  __entry->outgpa)
 89);
 90
 91/*
 92 * Tracepoint for PIO.
 93 */
 94TRACE_EVENT(kvm_pio,
 95	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 96		 unsigned int count),
 97	TP_ARGS(rw, port, size, count),
 98
 99	TP_STRUCT__entry(
100		__field(	unsigned int, 	rw		)
101		__field(	unsigned int, 	port		)
102		__field(	unsigned int, 	size		)
103		__field(	unsigned int,	count		)
104	),
105
106	TP_fast_assign(
107		__entry->rw		= rw;
108		__entry->port		= port;
109		__entry->size		= size;
110		__entry->count		= count;
111	),
112
113	TP_printk("pio_%s at 0x%x size %d count %d",
114		  __entry->rw ? "write" : "read",
115		  __entry->port, __entry->size, __entry->count)
116);
117
118/*
119 * Tracepoint for cpuid.
120 */
121TRACE_EVENT(kvm_cpuid,
122	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
123		 unsigned long rcx, unsigned long rdx),
124	TP_ARGS(function, rax, rbx, rcx, rdx),
125
126	TP_STRUCT__entry(
127		__field(	unsigned int,	function	)
128		__field(	unsigned long,	rax		)
129		__field(	unsigned long,	rbx		)
130		__field(	unsigned long,	rcx		)
131		__field(	unsigned long,	rdx		)
132	),
133
134	TP_fast_assign(
135		__entry->function	= function;
136		__entry->rax		= rax;
137		__entry->rbx		= rbx;
138		__entry->rcx		= rcx;
139		__entry->rdx		= rdx;
140	),
141
142	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
143		  __entry->function, __entry->rax,
144		  __entry->rbx, __entry->rcx, __entry->rdx)
145);
146
147#define AREG(x) { APIC_##x, "APIC_" #x }
148
149#define kvm_trace_symbol_apic						    \
150	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
151	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
152	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
153	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
154	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
155	AREG(ECTRL)
156/*
157 * Tracepoint for apic access.
158 */
159TRACE_EVENT(kvm_apic,
160	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
161	TP_ARGS(rw, reg, val),
162
163	TP_STRUCT__entry(
164		__field(	unsigned int,	rw		)
165		__field(	unsigned int,	reg		)
166		__field(	unsigned int,	val		)
167	),
168
169	TP_fast_assign(
170		__entry->rw		= rw;
171		__entry->reg		= reg;
172		__entry->val		= val;
173	),
174
175	TP_printk("apic_%s %s = 0x%x",
176		  __entry->rw ? "write" : "read",
177		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
178		  __entry->val)
179);
180
181#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
182#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
183
184#define KVM_ISA_VMX   1
185#define KVM_ISA_SVM   2
186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187/*
188 * Tracepoint for kvm guest exit:
189 */
190TRACE_EVENT(kvm_exit,
191	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
192	TP_ARGS(exit_reason, vcpu, isa),
193
194	TP_STRUCT__entry(
195		__field(	unsigned int,	exit_reason	)
196		__field(	unsigned long,	guest_rip	)
197		__field(	u32,	        isa             )
198		__field(	u64,	        info1           )
199		__field(	u64,	        info2           )
200	),
201
202	TP_fast_assign(
203		__entry->exit_reason	= exit_reason;
204		__entry->guest_rip	= kvm_rip_read(vcpu);
205		__entry->isa            = isa;
206		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
207					   &__entry->info2);
208	),
209
210	TP_printk("reason %s rip 0x%lx info %llx %llx",
211		 (__entry->isa == KVM_ISA_VMX) ?
212		 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
213		 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
214		 __entry->guest_rip, __entry->info1, __entry->info2)
215);
216
217/*
218 * Tracepoint for kvm interrupt injection:
219 */
220TRACE_EVENT(kvm_inj_virq,
221	TP_PROTO(unsigned int irq),
222	TP_ARGS(irq),
223
224	TP_STRUCT__entry(
225		__field(	unsigned int,	irq		)
226	),
227
228	TP_fast_assign(
229		__entry->irq		= irq;
230	),
231
232	TP_printk("irq %u", __entry->irq)
233);
234
235#define EXS(x) { x##_VECTOR, "#" #x }
236
237#define kvm_trace_sym_exc						\
238	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
239	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
240	EXS(MF), EXS(MC)
241
242/*
243 * Tracepoint for kvm interrupt injection:
244 */
245TRACE_EVENT(kvm_inj_exception,
246	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
247	TP_ARGS(exception, has_error, error_code),
248
249	TP_STRUCT__entry(
250		__field(	u8,	exception	)
251		__field(	u8,	has_error	)
252		__field(	u32,	error_code	)
253	),
254
255	TP_fast_assign(
256		__entry->exception	= exception;
257		__entry->has_error	= has_error;
258		__entry->error_code	= error_code;
259	),
260
261	TP_printk("%s (0x%x)",
262		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
263		  /* FIXME: don't print error_code if not present */
264		  __entry->has_error ? __entry->error_code : 0)
265);
266
267/*
268 * Tracepoint for page fault.
269 */
270TRACE_EVENT(kvm_page_fault,
271	TP_PROTO(unsigned long fault_address, unsigned int error_code),
272	TP_ARGS(fault_address, error_code),
273
274	TP_STRUCT__entry(
275		__field(	unsigned long,	fault_address	)
276		__field(	unsigned int,	error_code	)
277	),
278
279	TP_fast_assign(
280		__entry->fault_address	= fault_address;
281		__entry->error_code	= error_code;
282	),
283
284	TP_printk("address %lx error_code %x",
285		  __entry->fault_address, __entry->error_code)
286);
287
288/*
289 * Tracepoint for guest MSR access.
290 */
291TRACE_EVENT(kvm_msr,
292	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
293	TP_ARGS(write, ecx, data, exception),
294
295	TP_STRUCT__entry(
296		__field(	unsigned,	write		)
297		__field(	u32,		ecx		)
298		__field(	u64,		data		)
299		__field(	u8,		exception	)
300	),
301
302	TP_fast_assign(
303		__entry->write		= write;
304		__entry->ecx		= ecx;
305		__entry->data		= data;
306		__entry->exception	= exception;
307	),
308
309	TP_printk("msr_%s %x = 0x%llx%s",
310		  __entry->write ? "write" : "read",
311		  __entry->ecx, __entry->data,
312		  __entry->exception ? " (#GP)" : "")
313);
314
315#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
316#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
317#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
318#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
319
320/*
321 * Tracepoint for guest CR access.
322 */
323TRACE_EVENT(kvm_cr,
324	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
325	TP_ARGS(rw, cr, val),
326
327	TP_STRUCT__entry(
328		__field(	unsigned int,	rw		)
329		__field(	unsigned int,	cr		)
330		__field(	unsigned long,	val		)
331	),
332
333	TP_fast_assign(
334		__entry->rw		= rw;
335		__entry->cr		= cr;
336		__entry->val		= val;
337	),
338
339	TP_printk("cr_%s %x = 0x%lx",
340		  __entry->rw ? "write" : "read",
341		  __entry->cr, __entry->val)
342);
343
344#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
345#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
346
347TRACE_EVENT(kvm_pic_set_irq,
348	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
349	    TP_ARGS(chip, pin, elcr, imr, coalesced),
350
351	TP_STRUCT__entry(
352		__field(	__u8,		chip		)
353		__field(	__u8,		pin		)
354		__field(	__u8,		elcr		)
355		__field(	__u8,		imr		)
356		__field(	bool,		coalesced	)
357	),
358
359	TP_fast_assign(
360		__entry->chip		= chip;
361		__entry->pin		= pin;
362		__entry->elcr		= elcr;
363		__entry->imr		= imr;
364		__entry->coalesced	= coalesced;
365	),
366
367	TP_printk("chip %u pin %u (%s%s)%s",
368		  __entry->chip, __entry->pin,
369		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
370		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
371		  __entry->coalesced ? " (coalesced)" : "")
372);
373
374#define kvm_apic_dst_shorthand		\
375	{0x0, "dst"},			\
376	{0x1, "self"},			\
377	{0x2, "all"},			\
378	{0x3, "all-but-self"}
379
380TRACE_EVENT(kvm_apic_ipi,
381	    TP_PROTO(__u32 icr_low, __u32 dest_id),
382	    TP_ARGS(icr_low, dest_id),
383
384	TP_STRUCT__entry(
385		__field(	__u32,		icr_low		)
386		__field(	__u32,		dest_id		)
387	),
388
389	TP_fast_assign(
390		__entry->icr_low	= icr_low;
391		__entry->dest_id	= dest_id;
392	),
393
394	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
395		  __entry->dest_id, (u8)__entry->icr_low,
396		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
397				   kvm_deliver_mode),
398		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
399		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
400		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
401		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
402				   kvm_apic_dst_shorthand))
403);
404
405TRACE_EVENT(kvm_apic_accept_irq,
406	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
407	    TP_ARGS(apicid, dm, tm, vec, coalesced),
408
409	TP_STRUCT__entry(
410		__field(	__u32,		apicid		)
411		__field(	__u16,		dm		)
412		__field(	__u8,		tm		)
413		__field(	__u8,		vec		)
414		__field(	bool,		coalesced	)
415	),
416
417	TP_fast_assign(
418		__entry->apicid		= apicid;
419		__entry->dm		= dm;
420		__entry->tm		= tm;
421		__entry->vec		= vec;
422		__entry->coalesced	= coalesced;
423	),
424
425	TP_printk("apicid %x vec %u (%s|%s)%s",
426		  __entry->apicid, __entry->vec,
427		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
428		  __entry->tm ? "level" : "edge",
429		  __entry->coalesced ? " (coalesced)" : "")
430);
431
432TRACE_EVENT(kvm_eoi,
433	    TP_PROTO(struct kvm_lapic *apic, int vector),
434	    TP_ARGS(apic, vector),
435
436	TP_STRUCT__entry(
437		__field(	__u32,		apicid		)
438		__field(	int,		vector		)
439	),
440
441	TP_fast_assign(
442		__entry->apicid		= apic->vcpu->vcpu_id;
443		__entry->vector		= vector;
444	),
445
446	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
447);
448
449TRACE_EVENT(kvm_pv_eoi,
450	    TP_PROTO(struct kvm_lapic *apic, int vector),
451	    TP_ARGS(apic, vector),
452
453	TP_STRUCT__entry(
454		__field(	__u32,		apicid		)
455		__field(	int,		vector		)
456	),
457
458	TP_fast_assign(
459		__entry->apicid		= apic->vcpu->vcpu_id;
460		__entry->vector		= vector;
461	),
462
463	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
464);
465
466/*
467 * Tracepoint for nested VMRUN
468 */
469TRACE_EVENT(kvm_nested_vmrun,
470	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
471		     __u32 event_inj, bool npt),
472	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
473
474	TP_STRUCT__entry(
475		__field(	__u64,		rip		)
476		__field(	__u64,		vmcb		)
477		__field(	__u64,		nested_rip	)
478		__field(	__u32,		int_ctl		)
479		__field(	__u32,		event_inj	)
480		__field(	bool,		npt		)
481	),
482
483	TP_fast_assign(
484		__entry->rip		= rip;
485		__entry->vmcb		= vmcb;
486		__entry->nested_rip	= nested_rip;
487		__entry->int_ctl	= int_ctl;
488		__entry->event_inj	= event_inj;
489		__entry->npt		= npt;
490	),
491
492	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
493		  "event_inj: 0x%08x npt: %s",
494		__entry->rip, __entry->vmcb, __entry->nested_rip,
495		__entry->int_ctl, __entry->event_inj,
496		__entry->npt ? "on" : "off")
497);
498
499TRACE_EVENT(kvm_nested_intercepts,
500	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
501	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
502
503	TP_STRUCT__entry(
504		__field(	__u16,		cr_read		)
505		__field(	__u16,		cr_write	)
506		__field(	__u32,		exceptions	)
507		__field(	__u64,		intercept	)
508	),
509
510	TP_fast_assign(
511		__entry->cr_read	= cr_read;
512		__entry->cr_write	= cr_write;
513		__entry->exceptions	= exceptions;
514		__entry->intercept	= intercept;
515	),
516
517	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
518		__entry->cr_read, __entry->cr_write, __entry->exceptions,
519		__entry->intercept)
520);
521/*
522 * Tracepoint for #VMEXIT while nested
523 */
524TRACE_EVENT(kvm_nested_vmexit,
525	    TP_PROTO(__u64 rip, __u32 exit_code,
526		     __u64 exit_info1, __u64 exit_info2,
527		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
528	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
529		    exit_int_info, exit_int_info_err, isa),
530
531	TP_STRUCT__entry(
532		__field(	__u64,		rip			)
533		__field(	__u32,		exit_code		)
534		__field(	__u64,		exit_info1		)
535		__field(	__u64,		exit_info2		)
536		__field(	__u32,		exit_int_info		)
537		__field(	__u32,		exit_int_info_err	)
538		__field(	__u32,		isa			)
539	),
540
541	TP_fast_assign(
542		__entry->rip			= rip;
543		__entry->exit_code		= exit_code;
544		__entry->exit_info1		= exit_info1;
545		__entry->exit_info2		= exit_info2;
546		__entry->exit_int_info		= exit_int_info;
547		__entry->exit_int_info_err	= exit_int_info_err;
548		__entry->isa			= isa;
549	),
550	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
551		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
552		  __entry->rip,
553		 (__entry->isa == KVM_ISA_VMX) ?
554		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
555		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
556		  __entry->exit_info1, __entry->exit_info2,
557		  __entry->exit_int_info, __entry->exit_int_info_err)
558);
559
560/*
561 * Tracepoint for #VMEXIT reinjected to the guest
562 */
563TRACE_EVENT(kvm_nested_vmexit_inject,
564	    TP_PROTO(__u32 exit_code,
565		     __u64 exit_info1, __u64 exit_info2,
566		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
567	    TP_ARGS(exit_code, exit_info1, exit_info2,
568		    exit_int_info, exit_int_info_err, isa),
569
570	TP_STRUCT__entry(
571		__field(	__u32,		exit_code		)
572		__field(	__u64,		exit_info1		)
573		__field(	__u64,		exit_info2		)
574		__field(	__u32,		exit_int_info		)
575		__field(	__u32,		exit_int_info_err	)
576		__field(	__u32,		isa			)
577	),
578
579	TP_fast_assign(
580		__entry->exit_code		= exit_code;
581		__entry->exit_info1		= exit_info1;
582		__entry->exit_info2		= exit_info2;
583		__entry->exit_int_info		= exit_int_info;
584		__entry->exit_int_info_err	= exit_int_info_err;
585		__entry->isa			= isa;
586	),
587
588	TP_printk("reason: %s ext_inf1: 0x%016llx "
589		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
590		 (__entry->isa == KVM_ISA_VMX) ?
591		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
592		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
593		__entry->exit_info1, __entry->exit_info2,
594		__entry->exit_int_info, __entry->exit_int_info_err)
595);
596
597/*
598 * Tracepoint for nested #vmexit because of interrupt pending
599 */
600TRACE_EVENT(kvm_nested_intr_vmexit,
601	    TP_PROTO(__u64 rip),
602	    TP_ARGS(rip),
603
604	TP_STRUCT__entry(
605		__field(	__u64,	rip	)
606	),
607
608	TP_fast_assign(
609		__entry->rip	=	rip
610	),
611
612	TP_printk("rip: 0x%016llx", __entry->rip)
613);
614
615/*
616 * Tracepoint for nested #vmexit because of interrupt pending
617 */
618TRACE_EVENT(kvm_invlpga,
619	    TP_PROTO(__u64 rip, int asid, u64 address),
620	    TP_ARGS(rip, asid, address),
621
622	TP_STRUCT__entry(
623		__field(	__u64,	rip	)
624		__field(	int,	asid	)
625		__field(	__u64,	address	)
626	),
627
628	TP_fast_assign(
629		__entry->rip		=	rip;
630		__entry->asid		=	asid;
631		__entry->address	=	address;
632	),
633
634	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
635		  __entry->rip, __entry->asid, __entry->address)
636);
637
638/*
639 * Tracepoint for nested #vmexit because of interrupt pending
640 */
641TRACE_EVENT(kvm_skinit,
642	    TP_PROTO(__u64 rip, __u32 slb),
643	    TP_ARGS(rip, slb),
644
645	TP_STRUCT__entry(
646		__field(	__u64,	rip	)
647		__field(	__u32,	slb	)
648	),
649
650	TP_fast_assign(
651		__entry->rip		=	rip;
652		__entry->slb		=	slb;
653	),
654
655	TP_printk("rip: 0x%016llx slb: 0x%08x",
656		  __entry->rip, __entry->slb)
657);
658
 
 
 
 
 
 
 
 
 
 
659#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
660#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
661#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
662#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
663
664#define kvm_trace_symbol_emul_flags	                  \
665	{ 0,   			    "real" },		  \
666	{ KVM_EMUL_INSN_F_CR0_PE			  \
667	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
668	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
669	{ KVM_EMUL_INSN_F_CR0_PE			  \
670	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
671	{ KVM_EMUL_INSN_F_CR0_PE			  \
672	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
673
674#define kei_decode_mode(mode) ({			\
675	u8 flags = 0xff;				\
676	switch (mode) {					\
677	case X86EMUL_MODE_REAL:				\
678		flags = 0;				\
679		break;					\
680	case X86EMUL_MODE_VM86:				\
681		flags = KVM_EMUL_INSN_F_EFL_VM;		\
682		break;					\
683	case X86EMUL_MODE_PROT16:			\
684		flags = KVM_EMUL_INSN_F_CR0_PE;		\
685		break;					\
686	case X86EMUL_MODE_PROT32:			\
687		flags = KVM_EMUL_INSN_F_CR0_PE		\
688			| KVM_EMUL_INSN_F_CS_D;		\
689		break;					\
690	case X86EMUL_MODE_PROT64:			\
691		flags = KVM_EMUL_INSN_F_CR0_PE		\
692			| KVM_EMUL_INSN_F_CS_L;		\
693		break;					\
694	}						\
695	flags;						\
696	})
697
698TRACE_EVENT(kvm_emulate_insn,
699	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
700	TP_ARGS(vcpu, failed),
701
702	TP_STRUCT__entry(
703		__field(    __u64, rip                       )
704		__field(    __u32, csbase                    )
705		__field(    __u8,  len                       )
706		__array(    __u8,  insn,    15	             )
707		__field(    __u8,  flags       	   	     )
708		__field(    __u8,  failed                    )
709		),
710
711	TP_fast_assign(
712		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
713		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
714		__entry->len = vcpu->arch.emulate_ctxt._eip
715			       - vcpu->arch.emulate_ctxt.fetch.start;
716		memcpy(__entry->insn,
717		       vcpu->arch.emulate_ctxt.fetch.data,
718		       15);
719		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
720		__entry->failed = failed;
721		),
722
723	TP_printk("%x:%llx:%s (%s)%s",
724		  __entry->csbase, __entry->rip,
725		  __print_hex(__entry->insn, __entry->len),
726		  __print_symbolic(__entry->flags,
727				   kvm_trace_symbol_emul_flags),
728		  __entry->failed ? " failed" : ""
729		)
730	);
731
732#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
733#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
734
735TRACE_EVENT(
736	vcpu_match_mmio,
737	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
738	TP_ARGS(gva, gpa, write, gpa_match),
739
740	TP_STRUCT__entry(
741		__field(gva_t, gva)
742		__field(gpa_t, gpa)
743		__field(bool, write)
744		__field(bool, gpa_match)
745		),
746
747	TP_fast_assign(
748		__entry->gva = gva;
749		__entry->gpa = gpa;
750		__entry->write = write;
751		__entry->gpa_match = gpa_match
752		),
753
754	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
755		  __entry->write ? "Write" : "Read",
756		  __entry->gpa_match ? "GPA" : "GVA")
757);
758
759TRACE_EVENT(kvm_write_tsc_offset,
760	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
761		 __u64 next_tsc_offset),
762	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
763
764	TP_STRUCT__entry(
765		__field( unsigned int,	vcpu_id				)
766		__field(	__u64,	previous_tsc_offset		)
767		__field(	__u64,	next_tsc_offset			)
768	),
769
770	TP_fast_assign(
771		__entry->vcpu_id		= vcpu_id;
772		__entry->previous_tsc_offset	= previous_tsc_offset;
773		__entry->next_tsc_offset	= next_tsc_offset;
774	),
775
776	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
777		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
778);
779
780#ifdef CONFIG_X86_64
781
782#define host_clocks					\
783	{VCLOCK_NONE, "none"},				\
784	{VCLOCK_TSC,  "tsc"},				\
785	{VCLOCK_HPET, "hpet"}				\
786
787TRACE_EVENT(kvm_update_master_clock,
788	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
789	TP_ARGS(use_master_clock, host_clock, offset_matched),
790
791	TP_STRUCT__entry(
792		__field(		bool,	use_master_clock	)
793		__field(	unsigned int,	host_clock		)
794		__field(		bool,	offset_matched		)
795	),
796
797	TP_fast_assign(
798		__entry->use_master_clock	= use_master_clock;
799		__entry->host_clock		= host_clock;
800		__entry->offset_matched		= offset_matched;
801	),
802
803	TP_printk("masterclock %d hostclock %s offsetmatched %u",
804		  __entry->use_master_clock,
805		  __print_symbolic(__entry->host_clock, host_clocks),
806		  __entry->offset_matched)
807);
808
809TRACE_EVENT(kvm_track_tsc,
810	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
811		 unsigned int online_vcpus, bool use_master_clock,
812		 unsigned int host_clock),
813	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
814		host_clock),
815
816	TP_STRUCT__entry(
817		__field(	unsigned int,	vcpu_id			)
818		__field(	unsigned int,	nr_vcpus_matched_tsc	)
819		__field(	unsigned int,	online_vcpus		)
820		__field(	bool,		use_master_clock	)
821		__field(	unsigned int,	host_clock		)
822	),
823
824	TP_fast_assign(
825		__entry->vcpu_id		= vcpu_id;
826		__entry->nr_vcpus_matched_tsc	= nr_matched;
827		__entry->online_vcpus		= online_vcpus;
828		__entry->use_master_clock	= use_master_clock;
829		__entry->host_clock		= host_clock;
830	),
831
832	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
833		  " hostclock %s",
834		  __entry->vcpu_id, __entry->use_master_clock,
835		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
836		  __print_symbolic(__entry->host_clock, host_clocks))
837);
838
839#endif /* CONFIG_X86_64 */
840
841#endif /* _TRACE_KVM_H */
842
843#undef TRACE_INCLUDE_PATH
844#define TRACE_INCLUDE_PATH arch/x86/kvm
845#undef TRACE_INCLUDE_FILE
846#define TRACE_INCLUDE_FILE trace
847
848/* This part must be outside protection */
849#include <trace/define_trace.h>
v3.5.6
  1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  2#define _TRACE_KVM_H
  3
  4#include <linux/tracepoint.h>
  5#include <asm/vmx.h>
  6#include <asm/svm.h>
 
  7
  8#undef TRACE_SYSTEM
  9#define TRACE_SYSTEM kvm
 10
 11/*
 12 * Tracepoint for guest mode entry.
 13 */
 14TRACE_EVENT(kvm_entry,
 15	TP_PROTO(unsigned int vcpu_id),
 16	TP_ARGS(vcpu_id),
 17
 18	TP_STRUCT__entry(
 19		__field(	unsigned int,	vcpu_id		)
 20	),
 21
 22	TP_fast_assign(
 23		__entry->vcpu_id	= vcpu_id;
 24	),
 25
 26	TP_printk("vcpu %u", __entry->vcpu_id)
 27);
 28
 29/*
 30 * Tracepoint for hypercall.
 31 */
 32TRACE_EVENT(kvm_hypercall,
 33	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
 34		 unsigned long a2, unsigned long a3),
 35	TP_ARGS(nr, a0, a1, a2, a3),
 36
 37	TP_STRUCT__entry(
 38		__field(	unsigned long, 	nr		)
 39		__field(	unsigned long,	a0		)
 40		__field(	unsigned long,	a1		)
 41		__field(	unsigned long,	a2		)
 42		__field(	unsigned long,	a3		)
 43	),
 44
 45	TP_fast_assign(
 46		__entry->nr		= nr;
 47		__entry->a0		= a0;
 48		__entry->a1		= a1;
 49		__entry->a2		= a2;
 50		__entry->a3		= a3;
 51	),
 52
 53	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
 54		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
 55		 __entry->a3)
 56);
 57
 58/*
 59 * Tracepoint for hypercall.
 60 */
 61TRACE_EVENT(kvm_hv_hypercall,
 62	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
 63		 __u64 ingpa, __u64 outgpa),
 64	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
 65
 66	TP_STRUCT__entry(
 67		__field(	__u16,		rep_cnt		)
 68		__field(	__u16,		rep_idx		)
 69		__field(	__u64,		ingpa		)
 70		__field(	__u64,		outgpa		)
 71		__field(	__u16, 		code		)
 72		__field(	bool,		fast		)
 73	),
 74
 75	TP_fast_assign(
 76		__entry->rep_cnt	= rep_cnt;
 77		__entry->rep_idx	= rep_idx;
 78		__entry->ingpa		= ingpa;
 79		__entry->outgpa		= outgpa;
 80		__entry->code		= code;
 81		__entry->fast		= fast;
 82	),
 83
 84	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
 85		  __entry->code, __entry->fast ? "fast" : "slow",
 86		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
 87		  __entry->outgpa)
 88);
 89
 90/*
 91 * Tracepoint for PIO.
 92 */
 93TRACE_EVENT(kvm_pio,
 94	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 95		 unsigned int count),
 96	TP_ARGS(rw, port, size, count),
 97
 98	TP_STRUCT__entry(
 99		__field(	unsigned int, 	rw		)
100		__field(	unsigned int, 	port		)
101		__field(	unsigned int, 	size		)
102		__field(	unsigned int,	count		)
103	),
104
105	TP_fast_assign(
106		__entry->rw		= rw;
107		__entry->port		= port;
108		__entry->size		= size;
109		__entry->count		= count;
110	),
111
112	TP_printk("pio_%s at 0x%x size %d count %d",
113		  __entry->rw ? "write" : "read",
114		  __entry->port, __entry->size, __entry->count)
115);
116
117/*
118 * Tracepoint for cpuid.
119 */
120TRACE_EVENT(kvm_cpuid,
121	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
122		 unsigned long rcx, unsigned long rdx),
123	TP_ARGS(function, rax, rbx, rcx, rdx),
124
125	TP_STRUCT__entry(
126		__field(	unsigned int,	function	)
127		__field(	unsigned long,	rax		)
128		__field(	unsigned long,	rbx		)
129		__field(	unsigned long,	rcx		)
130		__field(	unsigned long,	rdx		)
131	),
132
133	TP_fast_assign(
134		__entry->function	= function;
135		__entry->rax		= rax;
136		__entry->rbx		= rbx;
137		__entry->rcx		= rcx;
138		__entry->rdx		= rdx;
139	),
140
141	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
142		  __entry->function, __entry->rax,
143		  __entry->rbx, __entry->rcx, __entry->rdx)
144);
145
146#define AREG(x) { APIC_##x, "APIC_" #x }
147
148#define kvm_trace_symbol_apic						    \
149	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
150	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
151	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
152	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
153	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
154	AREG(ECTRL)
155/*
156 * Tracepoint for apic access.
157 */
158TRACE_EVENT(kvm_apic,
159	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
160	TP_ARGS(rw, reg, val),
161
162	TP_STRUCT__entry(
163		__field(	unsigned int,	rw		)
164		__field(	unsigned int,	reg		)
165		__field(	unsigned int,	val		)
166	),
167
168	TP_fast_assign(
169		__entry->rw		= rw;
170		__entry->reg		= reg;
171		__entry->val		= val;
172	),
173
174	TP_printk("apic_%s %s = 0x%x",
175		  __entry->rw ? "write" : "read",
176		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
177		  __entry->val)
178);
179
180#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
181#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
182
183#define KVM_ISA_VMX   1
184#define KVM_ISA_SVM   2
185
186#define VMX_EXIT_REASONS \
187	{ EXIT_REASON_EXCEPTION_NMI,		"EXCEPTION_NMI" }, \
188	{ EXIT_REASON_EXTERNAL_INTERRUPT,	"EXTERNAL_INTERRUPT" }, \
189	{ EXIT_REASON_TRIPLE_FAULT,		"TRIPLE_FAULT" }, \
190	{ EXIT_REASON_PENDING_INTERRUPT,	"PENDING_INTERRUPT" }, \
191	{ EXIT_REASON_NMI_WINDOW,		"NMI_WINDOW" }, \
192	{ EXIT_REASON_TASK_SWITCH,		"TASK_SWITCH" }, \
193	{ EXIT_REASON_CPUID,			"CPUID" }, \
194	{ EXIT_REASON_HLT,			"HLT" }, \
195	{ EXIT_REASON_INVLPG,			"INVLPG" }, \
196	{ EXIT_REASON_RDPMC,			"RDPMC" }, \
197	{ EXIT_REASON_RDTSC,			"RDTSC" }, \
198	{ EXIT_REASON_VMCALL,			"VMCALL" }, \
199	{ EXIT_REASON_VMCLEAR,			"VMCLEAR" }, \
200	{ EXIT_REASON_VMLAUNCH,			"VMLAUNCH" }, \
201	{ EXIT_REASON_VMPTRLD,			"VMPTRLD" }, \
202	{ EXIT_REASON_VMPTRST,			"VMPTRST" }, \
203	{ EXIT_REASON_VMREAD,			"VMREAD" }, \
204	{ EXIT_REASON_VMRESUME,			"VMRESUME" }, \
205	{ EXIT_REASON_VMWRITE,			"VMWRITE" }, \
206	{ EXIT_REASON_VMOFF,			"VMOFF" }, \
207	{ EXIT_REASON_VMON,			"VMON" }, \
208	{ EXIT_REASON_CR_ACCESS,		"CR_ACCESS" }, \
209	{ EXIT_REASON_DR_ACCESS,		"DR_ACCESS" }, \
210	{ EXIT_REASON_IO_INSTRUCTION,		"IO_INSTRUCTION" }, \
211	{ EXIT_REASON_MSR_READ,			"MSR_READ" }, \
212	{ EXIT_REASON_MSR_WRITE,		"MSR_WRITE" }, \
213	{ EXIT_REASON_MWAIT_INSTRUCTION,	"MWAIT_INSTRUCTION" }, \
214	{ EXIT_REASON_MONITOR_INSTRUCTION,	"MONITOR_INSTRUCTION" }, \
215	{ EXIT_REASON_PAUSE_INSTRUCTION,	"PAUSE_INSTRUCTION" }, \
216	{ EXIT_REASON_MCE_DURING_VMENTRY,	"MCE_DURING_VMENTRY" }, \
217	{ EXIT_REASON_TPR_BELOW_THRESHOLD,	"TPR_BELOW_THRESHOLD" },	\
218	{ EXIT_REASON_APIC_ACCESS,		"APIC_ACCESS" }, \
219	{ EXIT_REASON_EPT_VIOLATION,		"EPT_VIOLATION" }, \
220	{ EXIT_REASON_EPT_MISCONFIG,		"EPT_MISCONFIG" }, \
221	{ EXIT_REASON_WBINVD,			"WBINVD" }
222
223#define SVM_EXIT_REASONS \
224	{ SVM_EXIT_READ_CR0,			"read_cr0" }, \
225	{ SVM_EXIT_READ_CR3,			"read_cr3" }, \
226	{ SVM_EXIT_READ_CR4,			"read_cr4" }, \
227	{ SVM_EXIT_READ_CR8,			"read_cr8" }, \
228	{ SVM_EXIT_WRITE_CR0,			"write_cr0" }, \
229	{ SVM_EXIT_WRITE_CR3,			"write_cr3" }, \
230	{ SVM_EXIT_WRITE_CR4,			"write_cr4" }, \
231	{ SVM_EXIT_WRITE_CR8,			"write_cr8" }, \
232	{ SVM_EXIT_READ_DR0,			"read_dr0" }, \
233	{ SVM_EXIT_READ_DR1,			"read_dr1" }, \
234	{ SVM_EXIT_READ_DR2,			"read_dr2" }, \
235	{ SVM_EXIT_READ_DR3,			"read_dr3" }, \
236	{ SVM_EXIT_WRITE_DR0,			"write_dr0" }, \
237	{ SVM_EXIT_WRITE_DR1,			"write_dr1" }, \
238	{ SVM_EXIT_WRITE_DR2,			"write_dr2" }, \
239	{ SVM_EXIT_WRITE_DR3,			"write_dr3" }, \
240	{ SVM_EXIT_WRITE_DR5,			"write_dr5" }, \
241	{ SVM_EXIT_WRITE_DR7,			"write_dr7" }, \
242	{ SVM_EXIT_EXCP_BASE + DB_VECTOR,	"DB excp" }, \
243	{ SVM_EXIT_EXCP_BASE + BP_VECTOR,	"BP excp" }, \
244	{ SVM_EXIT_EXCP_BASE + UD_VECTOR,	"UD excp" }, \
245	{ SVM_EXIT_EXCP_BASE + PF_VECTOR,	"PF excp" }, \
246	{ SVM_EXIT_EXCP_BASE + NM_VECTOR,	"NM excp" }, \
247	{ SVM_EXIT_EXCP_BASE + MC_VECTOR,	"MC excp" }, \
248	{ SVM_EXIT_INTR,			"interrupt" }, \
249	{ SVM_EXIT_NMI,				"nmi" }, \
250	{ SVM_EXIT_SMI,				"smi" }, \
251	{ SVM_EXIT_INIT,			"init" }, \
252	{ SVM_EXIT_VINTR,			"vintr" }, \
253	{ SVM_EXIT_CPUID,			"cpuid" }, \
254	{ SVM_EXIT_INVD,			"invd" }, \
255	{ SVM_EXIT_HLT,				"hlt" }, \
256	{ SVM_EXIT_INVLPG,			"invlpg" }, \
257	{ SVM_EXIT_INVLPGA,			"invlpga" }, \
258	{ SVM_EXIT_IOIO,			"io" }, \
259	{ SVM_EXIT_MSR,				"msr" }, \
260	{ SVM_EXIT_TASK_SWITCH,			"task_switch" }, \
261	{ SVM_EXIT_SHUTDOWN,			"shutdown" }, \
262	{ SVM_EXIT_VMRUN,			"vmrun" }, \
263	{ SVM_EXIT_VMMCALL,			"hypercall" }, \
264	{ SVM_EXIT_VMLOAD,			"vmload" }, \
265	{ SVM_EXIT_VMSAVE,			"vmsave" }, \
266	{ SVM_EXIT_STGI,			"stgi" }, \
267	{ SVM_EXIT_CLGI,			"clgi" }, \
268	{ SVM_EXIT_SKINIT,			"skinit" }, \
269	{ SVM_EXIT_WBINVD,			"wbinvd" }, \
270	{ SVM_EXIT_MONITOR,			"monitor" }, \
271	{ SVM_EXIT_MWAIT,			"mwait" }, \
272	{ SVM_EXIT_XSETBV,			"xsetbv" }, \
273	{ SVM_EXIT_NPF,				"npf" }
274
275/*
276 * Tracepoint for kvm guest exit:
277 */
278TRACE_EVENT(kvm_exit,
279	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
280	TP_ARGS(exit_reason, vcpu, isa),
281
282	TP_STRUCT__entry(
283		__field(	unsigned int,	exit_reason	)
284		__field(	unsigned long,	guest_rip	)
285		__field(	u32,	        isa             )
286		__field(	u64,	        info1           )
287		__field(	u64,	        info2           )
288	),
289
290	TP_fast_assign(
291		__entry->exit_reason	= exit_reason;
292		__entry->guest_rip	= kvm_rip_read(vcpu);
293		__entry->isa            = isa;
294		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
295					   &__entry->info2);
296	),
297
298	TP_printk("reason %s rip 0x%lx info %llx %llx",
299		 (__entry->isa == KVM_ISA_VMX) ?
300		 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
301		 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
302		 __entry->guest_rip, __entry->info1, __entry->info2)
303);
304
305/*
306 * Tracepoint for kvm interrupt injection:
307 */
308TRACE_EVENT(kvm_inj_virq,
309	TP_PROTO(unsigned int irq),
310	TP_ARGS(irq),
311
312	TP_STRUCT__entry(
313		__field(	unsigned int,	irq		)
314	),
315
316	TP_fast_assign(
317		__entry->irq		= irq;
318	),
319
320	TP_printk("irq %u", __entry->irq)
321);
322
323#define EXS(x) { x##_VECTOR, "#" #x }
324
325#define kvm_trace_sym_exc						\
326	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
327	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
328	EXS(MF), EXS(MC)
329
330/*
331 * Tracepoint for kvm interrupt injection:
332 */
333TRACE_EVENT(kvm_inj_exception,
334	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
335	TP_ARGS(exception, has_error, error_code),
336
337	TP_STRUCT__entry(
338		__field(	u8,	exception	)
339		__field(	u8,	has_error	)
340		__field(	u32,	error_code	)
341	),
342
343	TP_fast_assign(
344		__entry->exception	= exception;
345		__entry->has_error	= has_error;
346		__entry->error_code	= error_code;
347	),
348
349	TP_printk("%s (0x%x)",
350		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
351		  /* FIXME: don't print error_code if not present */
352		  __entry->has_error ? __entry->error_code : 0)
353);
354
355/*
356 * Tracepoint for page fault.
357 */
358TRACE_EVENT(kvm_page_fault,
359	TP_PROTO(unsigned long fault_address, unsigned int error_code),
360	TP_ARGS(fault_address, error_code),
361
362	TP_STRUCT__entry(
363		__field(	unsigned long,	fault_address	)
364		__field(	unsigned int,	error_code	)
365	),
366
367	TP_fast_assign(
368		__entry->fault_address	= fault_address;
369		__entry->error_code	= error_code;
370	),
371
372	TP_printk("address %lx error_code %x",
373		  __entry->fault_address, __entry->error_code)
374);
375
376/*
377 * Tracepoint for guest MSR access.
378 */
379TRACE_EVENT(kvm_msr,
380	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
381	TP_ARGS(write, ecx, data, exception),
382
383	TP_STRUCT__entry(
384		__field(	unsigned,	write		)
385		__field(	u32,		ecx		)
386		__field(	u64,		data		)
387		__field(	u8,		exception	)
388	),
389
390	TP_fast_assign(
391		__entry->write		= write;
392		__entry->ecx		= ecx;
393		__entry->data		= data;
394		__entry->exception	= exception;
395	),
396
397	TP_printk("msr_%s %x = 0x%llx%s",
398		  __entry->write ? "write" : "read",
399		  __entry->ecx, __entry->data,
400		  __entry->exception ? " (#GP)" : "")
401);
402
403#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
404#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
405#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
406#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
407
408/*
409 * Tracepoint for guest CR access.
410 */
411TRACE_EVENT(kvm_cr,
412	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
413	TP_ARGS(rw, cr, val),
414
415	TP_STRUCT__entry(
416		__field(	unsigned int,	rw		)
417		__field(	unsigned int,	cr		)
418		__field(	unsigned long,	val		)
419	),
420
421	TP_fast_assign(
422		__entry->rw		= rw;
423		__entry->cr		= cr;
424		__entry->val		= val;
425	),
426
427	TP_printk("cr_%s %x = 0x%lx",
428		  __entry->rw ? "write" : "read",
429		  __entry->cr, __entry->val)
430);
431
432#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
433#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
434
435TRACE_EVENT(kvm_pic_set_irq,
436	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
437	    TP_ARGS(chip, pin, elcr, imr, coalesced),
438
439	TP_STRUCT__entry(
440		__field(	__u8,		chip		)
441		__field(	__u8,		pin		)
442		__field(	__u8,		elcr		)
443		__field(	__u8,		imr		)
444		__field(	bool,		coalesced	)
445	),
446
447	TP_fast_assign(
448		__entry->chip		= chip;
449		__entry->pin		= pin;
450		__entry->elcr		= elcr;
451		__entry->imr		= imr;
452		__entry->coalesced	= coalesced;
453	),
454
455	TP_printk("chip %u pin %u (%s%s)%s",
456		  __entry->chip, __entry->pin,
457		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
458		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
459		  __entry->coalesced ? " (coalesced)" : "")
460);
461
462#define kvm_apic_dst_shorthand		\
463	{0x0, "dst"},			\
464	{0x1, "self"},			\
465	{0x2, "all"},			\
466	{0x3, "all-but-self"}
467
468TRACE_EVENT(kvm_apic_ipi,
469	    TP_PROTO(__u32 icr_low, __u32 dest_id),
470	    TP_ARGS(icr_low, dest_id),
471
472	TP_STRUCT__entry(
473		__field(	__u32,		icr_low		)
474		__field(	__u32,		dest_id		)
475	),
476
477	TP_fast_assign(
478		__entry->icr_low	= icr_low;
479		__entry->dest_id	= dest_id;
480	),
481
482	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
483		  __entry->dest_id, (u8)__entry->icr_low,
484		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
485				   kvm_deliver_mode),
486		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
487		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
488		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
489		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
490				   kvm_apic_dst_shorthand))
491);
492
493TRACE_EVENT(kvm_apic_accept_irq,
494	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
495	    TP_ARGS(apicid, dm, tm, vec, coalesced),
496
497	TP_STRUCT__entry(
498		__field(	__u32,		apicid		)
499		__field(	__u16,		dm		)
500		__field(	__u8,		tm		)
501		__field(	__u8,		vec		)
502		__field(	bool,		coalesced	)
503	),
504
505	TP_fast_assign(
506		__entry->apicid		= apicid;
507		__entry->dm		= dm;
508		__entry->tm		= tm;
509		__entry->vec		= vec;
510		__entry->coalesced	= coalesced;
511	),
512
513	TP_printk("apicid %x vec %u (%s|%s)%s",
514		  __entry->apicid, __entry->vec,
515		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
516		  __entry->tm ? "level" : "edge",
517		  __entry->coalesced ? " (coalesced)" : "")
518);
519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520/*
521 * Tracepoint for nested VMRUN
522 */
523TRACE_EVENT(kvm_nested_vmrun,
524	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
525		     __u32 event_inj, bool npt),
526	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
527
528	TP_STRUCT__entry(
529		__field(	__u64,		rip		)
530		__field(	__u64,		vmcb		)
531		__field(	__u64,		nested_rip	)
532		__field(	__u32,		int_ctl		)
533		__field(	__u32,		event_inj	)
534		__field(	bool,		npt		)
535	),
536
537	TP_fast_assign(
538		__entry->rip		= rip;
539		__entry->vmcb		= vmcb;
540		__entry->nested_rip	= nested_rip;
541		__entry->int_ctl	= int_ctl;
542		__entry->event_inj	= event_inj;
543		__entry->npt		= npt;
544	),
545
546	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
547		  "event_inj: 0x%08x npt: %s",
548		__entry->rip, __entry->vmcb, __entry->nested_rip,
549		__entry->int_ctl, __entry->event_inj,
550		__entry->npt ? "on" : "off")
551);
552
553TRACE_EVENT(kvm_nested_intercepts,
554	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
555	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
556
557	TP_STRUCT__entry(
558		__field(	__u16,		cr_read		)
559		__field(	__u16,		cr_write	)
560		__field(	__u32,		exceptions	)
561		__field(	__u64,		intercept	)
562	),
563
564	TP_fast_assign(
565		__entry->cr_read	= cr_read;
566		__entry->cr_write	= cr_write;
567		__entry->exceptions	= exceptions;
568		__entry->intercept	= intercept;
569	),
570
571	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
572		__entry->cr_read, __entry->cr_write, __entry->exceptions,
573		__entry->intercept)
574);
575/*
576 * Tracepoint for #VMEXIT while nested
577 */
578TRACE_EVENT(kvm_nested_vmexit,
579	    TP_PROTO(__u64 rip, __u32 exit_code,
580		     __u64 exit_info1, __u64 exit_info2,
581		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
582	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
583		    exit_int_info, exit_int_info_err, isa),
584
585	TP_STRUCT__entry(
586		__field(	__u64,		rip			)
587		__field(	__u32,		exit_code		)
588		__field(	__u64,		exit_info1		)
589		__field(	__u64,		exit_info2		)
590		__field(	__u32,		exit_int_info		)
591		__field(	__u32,		exit_int_info_err	)
592		__field(	__u32,		isa			)
593	),
594
595	TP_fast_assign(
596		__entry->rip			= rip;
597		__entry->exit_code		= exit_code;
598		__entry->exit_info1		= exit_info1;
599		__entry->exit_info2		= exit_info2;
600		__entry->exit_int_info		= exit_int_info;
601		__entry->exit_int_info_err	= exit_int_info_err;
602		__entry->isa			= isa;
603	),
604	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
605		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
606		  __entry->rip,
607		 (__entry->isa == KVM_ISA_VMX) ?
608		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
609		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
610		  __entry->exit_info1, __entry->exit_info2,
611		  __entry->exit_int_info, __entry->exit_int_info_err)
612);
613
614/*
615 * Tracepoint for #VMEXIT reinjected to the guest
616 */
617TRACE_EVENT(kvm_nested_vmexit_inject,
618	    TP_PROTO(__u32 exit_code,
619		     __u64 exit_info1, __u64 exit_info2,
620		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
621	    TP_ARGS(exit_code, exit_info1, exit_info2,
622		    exit_int_info, exit_int_info_err, isa),
623
624	TP_STRUCT__entry(
625		__field(	__u32,		exit_code		)
626		__field(	__u64,		exit_info1		)
627		__field(	__u64,		exit_info2		)
628		__field(	__u32,		exit_int_info		)
629		__field(	__u32,		exit_int_info_err	)
630		__field(	__u32,		isa			)
631	),
632
633	TP_fast_assign(
634		__entry->exit_code		= exit_code;
635		__entry->exit_info1		= exit_info1;
636		__entry->exit_info2		= exit_info2;
637		__entry->exit_int_info		= exit_int_info;
638		__entry->exit_int_info_err	= exit_int_info_err;
639		__entry->isa			= isa;
640	),
641
642	TP_printk("reason: %s ext_inf1: 0x%016llx "
643		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
644		 (__entry->isa == KVM_ISA_VMX) ?
645		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
646		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
647		__entry->exit_info1, __entry->exit_info2,
648		__entry->exit_int_info, __entry->exit_int_info_err)
649);
650
651/*
652 * Tracepoint for nested #vmexit because of interrupt pending
653 */
654TRACE_EVENT(kvm_nested_intr_vmexit,
655	    TP_PROTO(__u64 rip),
656	    TP_ARGS(rip),
657
658	TP_STRUCT__entry(
659		__field(	__u64,	rip	)
660	),
661
662	TP_fast_assign(
663		__entry->rip	=	rip
664	),
665
666	TP_printk("rip: 0x%016llx", __entry->rip)
667);
668
669/*
670 * Tracepoint for nested #vmexit because of interrupt pending
671 */
672TRACE_EVENT(kvm_invlpga,
673	    TP_PROTO(__u64 rip, int asid, u64 address),
674	    TP_ARGS(rip, asid, address),
675
676	TP_STRUCT__entry(
677		__field(	__u64,	rip	)
678		__field(	int,	asid	)
679		__field(	__u64,	address	)
680	),
681
682	TP_fast_assign(
683		__entry->rip		=	rip;
684		__entry->asid		=	asid;
685		__entry->address	=	address;
686	),
687
688	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
689		  __entry->rip, __entry->asid, __entry->address)
690);
691
692/*
693 * Tracepoint for nested #vmexit because of interrupt pending
694 */
695TRACE_EVENT(kvm_skinit,
696	    TP_PROTO(__u64 rip, __u32 slb),
697	    TP_ARGS(rip, slb),
698
699	TP_STRUCT__entry(
700		__field(	__u64,	rip	)
701		__field(	__u32,	slb	)
702	),
703
704	TP_fast_assign(
705		__entry->rip		=	rip;
706		__entry->slb		=	slb;
707	),
708
709	TP_printk("rip: 0x%016llx slb: 0x%08x",
710		  __entry->rip, __entry->slb)
711);
712
713#define __print_insn(insn, ilen) ({		                 \
714	int i;							 \
715	const char *ret = p->buffer + p->len;			 \
716								 \
717	for (i = 0; i < ilen; ++i)				 \
718		trace_seq_printf(p, " %02x", insn[i]);		 \
719	trace_seq_printf(p, "%c", 0);				 \
720	ret;							 \
721	})
722
723#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
724#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
725#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
726#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
727
728#define kvm_trace_symbol_emul_flags	                  \
729	{ 0,   			    "real" },		  \
730	{ KVM_EMUL_INSN_F_CR0_PE			  \
731	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
732	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
733	{ KVM_EMUL_INSN_F_CR0_PE			  \
734	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
735	{ KVM_EMUL_INSN_F_CR0_PE			  \
736	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
737
738#define kei_decode_mode(mode) ({			\
739	u8 flags = 0xff;				\
740	switch (mode) {					\
741	case X86EMUL_MODE_REAL:				\
742		flags = 0;				\
743		break;					\
744	case X86EMUL_MODE_VM86:				\
745		flags = KVM_EMUL_INSN_F_EFL_VM;		\
746		break;					\
747	case X86EMUL_MODE_PROT16:			\
748		flags = KVM_EMUL_INSN_F_CR0_PE;		\
749		break;					\
750	case X86EMUL_MODE_PROT32:			\
751		flags = KVM_EMUL_INSN_F_CR0_PE		\
752			| KVM_EMUL_INSN_F_CS_D;		\
753		break;					\
754	case X86EMUL_MODE_PROT64:			\
755		flags = KVM_EMUL_INSN_F_CR0_PE		\
756			| KVM_EMUL_INSN_F_CS_L;		\
757		break;					\
758	}						\
759	flags;						\
760	})
761
762TRACE_EVENT(kvm_emulate_insn,
763	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
764	TP_ARGS(vcpu, failed),
765
766	TP_STRUCT__entry(
767		__field(    __u64, rip                       )
768		__field(    __u32, csbase                    )
769		__field(    __u8,  len                       )
770		__array(    __u8,  insn,    15	             )
771		__field(    __u8,  flags       	   	     )
772		__field(    __u8,  failed                    )
773		),
774
775	TP_fast_assign(
776		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
777		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
778		__entry->len = vcpu->arch.emulate_ctxt._eip
779			       - vcpu->arch.emulate_ctxt.fetch.start;
780		memcpy(__entry->insn,
781		       vcpu->arch.emulate_ctxt.fetch.data,
782		       15);
783		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
784		__entry->failed = failed;
785		),
786
787	TP_printk("%x:%llx:%s (%s)%s",
788		  __entry->csbase, __entry->rip,
789		  __print_insn(__entry->insn, __entry->len),
790		  __print_symbolic(__entry->flags,
791				   kvm_trace_symbol_emul_flags),
792		  __entry->failed ? " failed" : ""
793		)
794	);
795
796#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
797#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
798
799TRACE_EVENT(
800	vcpu_match_mmio,
801	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
802	TP_ARGS(gva, gpa, write, gpa_match),
803
804	TP_STRUCT__entry(
805		__field(gva_t, gva)
806		__field(gpa_t, gpa)
807		__field(bool, write)
808		__field(bool, gpa_match)
809		),
810
811	TP_fast_assign(
812		__entry->gva = gva;
813		__entry->gpa = gpa;
814		__entry->write = write;
815		__entry->gpa_match = gpa_match
816		),
817
818	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
819		  __entry->write ? "Write" : "Read",
820		  __entry->gpa_match ? "GPA" : "GVA")
821);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
822#endif /* _TRACE_KVM_H */
823
824#undef TRACE_INCLUDE_PATH
825#define TRACE_INCLUDE_PATH arch/x86/kvm
826#undef TRACE_INCLUDE_FILE
827#define TRACE_INCLUDE_FILE trace
828
829/* This part must be outside protection */
830#include <trace/define_trace.h>