Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
   3#define _TRACE_KVM_H
   4
   5#include <linux/tracepoint.h>
   6#include <asm/vmx.h>
   7#include <asm/svm.h>
   8#include <asm/clocksource.h>
   9#include <asm/pvclock-abi.h>
  10
  11#undef TRACE_SYSTEM
  12#define TRACE_SYSTEM kvm
  13
  14/*
  15 * Tracepoint for guest mode entry.
  16 */
  17TRACE_EVENT(kvm_entry,
  18	TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit),
  19	TP_ARGS(vcpu, force_immediate_exit),
  20
  21	TP_STRUCT__entry(
  22		__field(	unsigned int,	vcpu_id		)
  23		__field(	unsigned long,	rip		)
  24		__field(	bool,		immediate_exit	)
  25	),
  26
  27	TP_fast_assign(
  28		__entry->vcpu_id        = vcpu->vcpu_id;
  29		__entry->rip		= kvm_rip_read(vcpu);
  30		__entry->immediate_exit	= force_immediate_exit;
  31	),
  32
  33	TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip,
  34		  __entry->immediate_exit ? "[immediate exit]" : "")
  35);
  36
  37/*
  38 * Tracepoint for hypercall.
  39 */
  40TRACE_EVENT(kvm_hypercall,
  41	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
  42		 unsigned long a2, unsigned long a3),
  43	TP_ARGS(nr, a0, a1, a2, a3),
  44
  45	TP_STRUCT__entry(
  46		__field(	unsigned long, 	nr		)
  47		__field(	unsigned long,	a0		)
  48		__field(	unsigned long,	a1		)
  49		__field(	unsigned long,	a2		)
  50		__field(	unsigned long,	a3		)
  51	),
  52
  53	TP_fast_assign(
  54		__entry->nr		= nr;
  55		__entry->a0		= a0;
  56		__entry->a1		= a1;
  57		__entry->a2		= a2;
  58		__entry->a3		= a3;
  59	),
  60
  61	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
  62		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
  63		 __entry->a3)
  64);
  65
  66/*
  67 * Tracepoint for hypercall.
  68 */
  69TRACE_EVENT(kvm_hv_hypercall,
  70	TP_PROTO(__u16 code, bool fast,  __u16 var_cnt, __u16 rep_cnt,
  71		 __u16 rep_idx, __u64 ingpa, __u64 outgpa),
  72	TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa),
  73
  74	TP_STRUCT__entry(
  75		__field(	__u16,		rep_cnt		)
  76		__field(	__u16,		rep_idx		)
  77		__field(	__u64,		ingpa		)
  78		__field(	__u64,		outgpa		)
  79		__field(	__u16, 		code		)
  80		__field(	__u16,		var_cnt		)
  81		__field(	bool,		fast		)
  82	),
  83
  84	TP_fast_assign(
  85		__entry->rep_cnt	= rep_cnt;
  86		__entry->rep_idx	= rep_idx;
  87		__entry->ingpa		= ingpa;
  88		__entry->outgpa		= outgpa;
  89		__entry->code		= code;
  90		__entry->var_cnt	= var_cnt;
  91		__entry->fast		= fast;
  92	),
  93
  94	TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
  95		  __entry->code, __entry->fast ? "fast" : "slow",
  96		  __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx,
  97		  __entry->ingpa, __entry->outgpa)
  98);
  99
 100TRACE_EVENT(kvm_hv_hypercall_done,
 101	TP_PROTO(u64 result),
 102	TP_ARGS(result),
 103
 104	TP_STRUCT__entry(
 105		__field(__u64, result)
 106	),
 107
 108	TP_fast_assign(
 109		__entry->result	= result;
 110	),
 111
 112	TP_printk("result 0x%llx", __entry->result)
 113);
 114
 115/*
 116 * Tracepoint for Xen hypercall.
 117 */
 118TRACE_EVENT(kvm_xen_hypercall,
 119	    TP_PROTO(u8 cpl, unsigned long nr,
 120		     unsigned long a0, unsigned long a1, unsigned long a2,
 121		     unsigned long a3, unsigned long a4, unsigned long a5),
 122	    TP_ARGS(cpl, nr, a0, a1, a2, a3, a4, a5),
 123
 124	TP_STRUCT__entry(
 125		__field(u8, cpl)
 126		__field(unsigned long, nr)
 127		__field(unsigned long, a0)
 128		__field(unsigned long, a1)
 129		__field(unsigned long, a2)
 130		__field(unsigned long, a3)
 131		__field(unsigned long, a4)
 132		__field(unsigned long, a5)
 133	),
 134
 135	TP_fast_assign(
 136		__entry->cpl = cpl;
 137		__entry->nr = nr;
 138		__entry->a0 = a0;
 139		__entry->a1 = a1;
 140		__entry->a2 = a2;
 141		__entry->a3 = a3;
 142		__entry->a4 = a4;
 143		__entry->a4 = a5;
 144	),
 145
 146	TP_printk("cpl %d nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx",
 147		  __entry->cpl, __entry->nr,
 148		  __entry->a0, __entry->a1, __entry->a2,
 149		  __entry->a3, __entry->a4, __entry->a5)
 150);
 151
 152
 153
 154/*
 155 * Tracepoint for PIO.
 156 */
 157
 158#define KVM_PIO_IN   0
 159#define KVM_PIO_OUT  1
 160
 161TRACE_EVENT(kvm_pio,
 162	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 163		 unsigned int count, const void *data),
 164	TP_ARGS(rw, port, size, count, data),
 165
 166	TP_STRUCT__entry(
 167		__field(	unsigned int, 	rw		)
 168		__field(	unsigned int, 	port		)
 169		__field(	unsigned int, 	size		)
 170		__field(	unsigned int,	count		)
 171		__field(	unsigned int,	val		)
 172	),
 173
 174	TP_fast_assign(
 175		__entry->rw		= rw;
 176		__entry->port		= port;
 177		__entry->size		= size;
 178		__entry->count		= count;
 179		if (size == 1)
 180			__entry->val	= *(unsigned char *)data;
 181		else if (size == 2)
 182			__entry->val	= *(unsigned short *)data;
 183		else
 184			__entry->val	= *(unsigned int *)data;
 185	),
 186
 187	TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s",
 188		  __entry->rw ? "write" : "read",
 189		  __entry->port, __entry->size, __entry->count, __entry->val,
 190		  __entry->count > 1 ? "(...)" : "")
 191);
 192
 193/*
 194 * Tracepoint for fast mmio.
 195 */
 196TRACE_EVENT(kvm_fast_mmio,
 197	TP_PROTO(u64 gpa),
 198	TP_ARGS(gpa),
 199
 200	TP_STRUCT__entry(
 201		__field(u64,	gpa)
 202	),
 203
 204	TP_fast_assign(
 205		__entry->gpa		= gpa;
 206	),
 207
 208	TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
 209);
 210
 211/*
 212 * Tracepoint for cpuid.
 213 */
 214TRACE_EVENT(kvm_cpuid,
 215	TP_PROTO(unsigned int function, unsigned int index, unsigned long rax,
 216		 unsigned long rbx, unsigned long rcx, unsigned long rdx,
 217		 bool found, bool used_max_basic),
 218	TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic),
 219
 220	TP_STRUCT__entry(
 221		__field(	unsigned int,	function	)
 222		__field(	unsigned int,	index		)
 223		__field(	unsigned long,	rax		)
 224		__field(	unsigned long,	rbx		)
 225		__field(	unsigned long,	rcx		)
 226		__field(	unsigned long,	rdx		)
 227		__field(	bool,		found		)
 228		__field(	bool,		used_max_basic	)
 229	),
 230
 231	TP_fast_assign(
 232		__entry->function	= function;
 233		__entry->index		= index;
 234		__entry->rax		= rax;
 235		__entry->rbx		= rbx;
 236		__entry->rcx		= rcx;
 237		__entry->rdx		= rdx;
 238		__entry->found		= found;
 239		__entry->used_max_basic	= used_max_basic;
 240	),
 241
 242	TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s",
 243		  __entry->function, __entry->index, __entry->rax,
 244		  __entry->rbx, __entry->rcx, __entry->rdx,
 245		  __entry->found ? "found" : "not found",
 246		  __entry->used_max_basic ? ", used max basic" : "")
 247);
 248
 249#define AREG(x) { APIC_##x, "APIC_" #x }
 250
 251#define kvm_trace_symbol_apic						    \
 252	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
 253	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
 254	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
 255	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
 256	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
 257	AREG(ECTRL)
 258/*
 259 * Tracepoint for apic access.
 260 */
 261TRACE_EVENT(kvm_apic,
 262	TP_PROTO(unsigned int rw, unsigned int reg, u64 val),
 263	TP_ARGS(rw, reg, val),
 264
 265	TP_STRUCT__entry(
 266		__field(	unsigned int,	rw		)
 267		__field(	unsigned int,	reg		)
 268		__field(	u64,		val		)
 269	),
 270
 271	TP_fast_assign(
 272		__entry->rw		= rw;
 273		__entry->reg		= reg;
 274		__entry->val		= val;
 275	),
 276
 277	TP_printk("apic_%s %s = 0x%llx",
 278		  __entry->rw ? "write" : "read",
 279		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
 280		  __entry->val)
 281);
 282
 283#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
 284#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
 285
 286#define KVM_ISA_VMX   1
 287#define KVM_ISA_SVM   2
 288
 289#define kvm_print_exit_reason(exit_reason, isa)				\
 290	(isa == KVM_ISA_VMX) ?						\
 291	__print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) :	\
 292	__print_symbolic(exit_reason, SVM_EXIT_REASONS),		\
 293	(isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "",	\
 294	(isa == KVM_ISA_VMX) ?						\
 295	__print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : ""
 296
 297#define TRACE_EVENT_KVM_EXIT(name)					     \
 298TRACE_EVENT(name,							     \
 299	TP_PROTO(struct kvm_vcpu *vcpu, u32 isa),			     \
 300	TP_ARGS(vcpu, isa),						     \
 301									     \
 302	TP_STRUCT__entry(						     \
 303		__field(	unsigned int,	exit_reason	)	     \
 304		__field(	unsigned long,	guest_rip	)	     \
 305		__field(	u32,	        isa             )	     \
 306		__field(	u64,	        info1           )	     \
 307		__field(	u64,	        info2           )	     \
 308		__field(	u32,	        intr_info	)	     \
 309		__field(	u32,	        error_code	)	     \
 310		__field(	unsigned int,	vcpu_id         )	     \
 311	),								     \
 312									     \
 313	TP_fast_assign(							     \
 314		__entry->guest_rip	= kvm_rip_read(vcpu);		     \
 315		__entry->isa            = isa;				     \
 316		__entry->vcpu_id        = vcpu->vcpu_id;		     \
 317		kvm_x86_call(get_exit_info)(vcpu,			     \
 318					    &__entry->exit_reason,	     \
 319					    &__entry->info1,		     \
 320					    &__entry->info2,		     \
 321					    &__entry->intr_info,	     \
 322					    &__entry->error_code);	     \
 323	),								     \
 324									     \
 325	TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx "	     \
 326		  "info2 0x%016llx intr_info 0x%08x error_code 0x%08x",	     \
 327		  __entry->vcpu_id,					     \
 328		  kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \
 329		  __entry->guest_rip, __entry->info1, __entry->info2,	     \
 330		  __entry->intr_info, __entry->error_code)		     \
 331)
 332
 333/*
 334 * Tracepoint for kvm guest exit:
 335 */
 336TRACE_EVENT_KVM_EXIT(kvm_exit);
 337
 338/*
 339 * Tracepoint for kvm interrupt injection:
 340 */
 341TRACE_EVENT(kvm_inj_virq,
 342	TP_PROTO(unsigned int vector, bool soft, bool reinjected),
 343	TP_ARGS(vector, soft, reinjected),
 344
 345	TP_STRUCT__entry(
 346		__field(	unsigned int,	vector		)
 347		__field(	bool,		soft		)
 348		__field(	bool,		reinjected	)
 349	),
 350
 351	TP_fast_assign(
 352		__entry->vector		= vector;
 353		__entry->soft		= soft;
 354		__entry->reinjected	= reinjected;
 355	),
 356
 357	TP_printk("%s 0x%x%s",
 358		  __entry->soft ? "Soft/INTn" : "IRQ", __entry->vector,
 359		  __entry->reinjected ? " [reinjected]" : "")
 360);
 361
 362#define EXS(x) { x##_VECTOR, "#" #x }
 363
 364#define kvm_trace_sym_exc						\
 365	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
 366	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
 367	EXS(MF), EXS(AC), EXS(MC)
 368
 369/*
 370 * Tracepoint for kvm interrupt injection:
 371 */
 372TRACE_EVENT(kvm_inj_exception,
 373	TP_PROTO(unsigned exception, bool has_error, unsigned error_code,
 374		 bool reinjected),
 375	TP_ARGS(exception, has_error, error_code, reinjected),
 376
 377	TP_STRUCT__entry(
 378		__field(	u8,	exception	)
 379		__field(	u8,	has_error	)
 380		__field(	u32,	error_code	)
 381		__field(	bool,	reinjected	)
 382	),
 383
 384	TP_fast_assign(
 385		__entry->exception	= exception;
 386		__entry->has_error	= has_error;
 387		__entry->error_code	= error_code;
 388		__entry->reinjected	= reinjected;
 389	),
 390
 391	TP_printk("%s%s%s%s%s",
 392		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
 393		  !__entry->has_error ? "" : " (",
 394		  !__entry->has_error ? "" : __print_symbolic(__entry->error_code, { }),
 395		  !__entry->has_error ? "" : ")",
 396		  __entry->reinjected ? " [reinjected]" : "")
 397);
 398
 399/*
 400 * Tracepoint for page fault.
 401 */
 402TRACE_EVENT(kvm_page_fault,
 403	TP_PROTO(struct kvm_vcpu *vcpu, u64 fault_address, u64 error_code),
 404	TP_ARGS(vcpu, fault_address, error_code),
 405
 406	TP_STRUCT__entry(
 407		__field(	unsigned int,	vcpu_id		)
 408		__field(	unsigned long,	guest_rip	)
 409		__field(	u64,		fault_address	)
 410		__field(	u64,		error_code	)
 411	),
 412
 413	TP_fast_assign(
 414		__entry->vcpu_id	= vcpu->vcpu_id;
 415		__entry->guest_rip	= kvm_rip_read(vcpu);
 416		__entry->fault_address	= fault_address;
 417		__entry->error_code	= error_code;
 418	),
 419
 420	TP_printk("vcpu %u rip 0x%lx address 0x%016llx error_code 0x%llx",
 421		  __entry->vcpu_id, __entry->guest_rip,
 422		  __entry->fault_address, __entry->error_code)
 423);
 424
 425/*
 426 * Tracepoint for guest MSR access.
 427 */
 428TRACE_EVENT(kvm_msr,
 429	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
 430	TP_ARGS(write, ecx, data, exception),
 431
 432	TP_STRUCT__entry(
 433		__field(	unsigned,	write		)
 434		__field(	u32,		ecx		)
 435		__field(	u64,		data		)
 436		__field(	u8,		exception	)
 437	),
 438
 439	TP_fast_assign(
 440		__entry->write		= write;
 441		__entry->ecx		= ecx;
 442		__entry->data		= data;
 443		__entry->exception	= exception;
 444	),
 445
 446	TP_printk("msr_%s %x = 0x%llx%s",
 447		  __entry->write ? "write" : "read",
 448		  __entry->ecx, __entry->data,
 449		  __entry->exception ? " (#GP)" : "")
 450);
 451
 452#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
 453#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
 454#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
 455#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
 456
 457/*
 458 * Tracepoint for guest CR access.
 459 */
 460TRACE_EVENT(kvm_cr,
 461	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
 462	TP_ARGS(rw, cr, val),
 463
 464	TP_STRUCT__entry(
 465		__field(	unsigned int,	rw		)
 466		__field(	unsigned int,	cr		)
 467		__field(	unsigned long,	val		)
 468	),
 469
 470	TP_fast_assign(
 471		__entry->rw		= rw;
 472		__entry->cr		= cr;
 473		__entry->val		= val;
 474	),
 475
 476	TP_printk("cr_%s %x = 0x%lx",
 477		  __entry->rw ? "write" : "read",
 478		  __entry->cr, __entry->val)
 479);
 480
 481#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
 482#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
 483
 484TRACE_EVENT(kvm_pic_set_irq,
 485	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
 486	    TP_ARGS(chip, pin, elcr, imr, coalesced),
 487
 488	TP_STRUCT__entry(
 489		__field(	__u8,		chip		)
 490		__field(	__u8,		pin		)
 491		__field(	__u8,		elcr		)
 492		__field(	__u8,		imr		)
 493		__field(	bool,		coalesced	)
 494	),
 495
 496	TP_fast_assign(
 497		__entry->chip		= chip;
 498		__entry->pin		= pin;
 499		__entry->elcr		= elcr;
 500		__entry->imr		= imr;
 501		__entry->coalesced	= coalesced;
 502	),
 503
 504	TP_printk("chip %u pin %u (%s%s)%s",
 505		  __entry->chip, __entry->pin,
 506		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
 507		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
 508		  __entry->coalesced ? " (coalesced)" : "")
 509);
 510
 511#define kvm_apic_dst_shorthand		\
 512	{0x0, "dst"},			\
 513	{0x1, "self"},			\
 514	{0x2, "all"},			\
 515	{0x3, "all-but-self"}
 516
 517TRACE_EVENT(kvm_apic_ipi,
 518	    TP_PROTO(__u32 icr_low, __u32 dest_id),
 519	    TP_ARGS(icr_low, dest_id),
 520
 521	TP_STRUCT__entry(
 522		__field(	__u32,		icr_low		)
 523		__field(	__u32,		dest_id		)
 524	),
 525
 526	TP_fast_assign(
 527		__entry->icr_low	= icr_low;
 528		__entry->dest_id	= dest_id;
 529	),
 530
 531	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
 532		  __entry->dest_id, (u8)__entry->icr_low,
 533		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
 534				   kvm_deliver_mode),
 535		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
 536		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
 537		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
 538		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
 539				   kvm_apic_dst_shorthand))
 540);
 541
 542TRACE_EVENT(kvm_apic_accept_irq,
 543	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
 544	    TP_ARGS(apicid, dm, tm, vec),
 545
 546	TP_STRUCT__entry(
 547		__field(	__u32,		apicid		)
 548		__field(	__u16,		dm		)
 549		__field(	__u16,		tm		)
 550		__field(	__u8,		vec		)
 551	),
 552
 553	TP_fast_assign(
 554		__entry->apicid		= apicid;
 555		__entry->dm		= dm;
 556		__entry->tm		= tm;
 557		__entry->vec		= vec;
 558	),
 559
 560	TP_printk("apicid %x vec %u (%s|%s)",
 561		  __entry->apicid, __entry->vec,
 562		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
 563		  __entry->tm ? "level" : "edge")
 564);
 565
 566TRACE_EVENT(kvm_eoi,
 567	    TP_PROTO(struct kvm_lapic *apic, int vector),
 568	    TP_ARGS(apic, vector),
 569
 570	TP_STRUCT__entry(
 571		__field(	__u32,		apicid		)
 572		__field(	int,		vector		)
 573	),
 574
 575	TP_fast_assign(
 576		__entry->apicid		= apic->vcpu->vcpu_id;
 577		__entry->vector		= vector;
 578	),
 579
 580	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
 581);
 582
 583TRACE_EVENT(kvm_pv_eoi,
 584	    TP_PROTO(struct kvm_lapic *apic, int vector),
 585	    TP_ARGS(apic, vector),
 586
 587	TP_STRUCT__entry(
 588		__field(	__u32,		apicid		)
 589		__field(	int,		vector		)
 590	),
 591
 592	TP_fast_assign(
 593		__entry->apicid		= apic->vcpu->vcpu_id;
 594		__entry->vector		= vector;
 595	),
 596
 597	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
 598);
 599
 600/*
 601 * Tracepoint for nested VMRUN
 602 */
 603TRACE_EVENT(kvm_nested_vmenter,
 604	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
 605		     __u32 event_inj, bool tdp_enabled, __u64 guest_tdp_pgd,
 606		     __u64 guest_cr3, __u32 isa),
 607	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled,
 608		    guest_tdp_pgd, guest_cr3, isa),
 609
 610	TP_STRUCT__entry(
 611		__field(	__u64,		rip		)
 612		__field(	__u64,		vmcb		)
 613		__field(	__u64,		nested_rip	)
 614		__field(	__u32,		int_ctl		)
 615		__field(	__u32,		event_inj	)
 616		__field(	bool,		tdp_enabled	)
 617		__field(	__u64,		guest_pgd	)
 618		__field(	__u32,		isa		)
 619	),
 620
 621	TP_fast_assign(
 622		__entry->rip		= rip;
 623		__entry->vmcb		= vmcb;
 624		__entry->nested_rip	= nested_rip;
 625		__entry->int_ctl	= int_ctl;
 626		__entry->event_inj	= event_inj;
 627		__entry->tdp_enabled	= tdp_enabled;
 628		__entry->guest_pgd	= tdp_enabled ? guest_tdp_pgd : guest_cr3;
 629		__entry->isa		= isa;
 630	),
 631
 632	TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx "
 633		  "int_ctl: 0x%08x event_inj: 0x%08x nested_%s=%s %s: 0x%016llx",
 634		  __entry->rip,
 635		  __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb",
 636		  __entry->vmcb,
 637		  __entry->nested_rip,
 638		  __entry->int_ctl,
 639		  __entry->event_inj,
 640		  __entry->isa == KVM_ISA_VMX ? "ept" : "npt",
 641		  __entry->tdp_enabled ? "y" : "n",
 642		  !__entry->tdp_enabled ? "guest_cr3" :
 643		  __entry->isa == KVM_ISA_VMX ? "nested_eptp" : "nested_cr3",
 644		  __entry->guest_pgd)
 645);
 646
 647TRACE_EVENT(kvm_nested_intercepts,
 648	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions,
 649		     __u32 intercept1, __u32 intercept2, __u32 intercept3),
 650	    TP_ARGS(cr_read, cr_write, exceptions, intercept1,
 651		    intercept2, intercept3),
 652
 653	TP_STRUCT__entry(
 654		__field(	__u16,		cr_read		)
 655		__field(	__u16,		cr_write	)
 656		__field(	__u32,		exceptions	)
 657		__field(	__u32,		intercept1	)
 658		__field(	__u32,		intercept2	)
 659		__field(	__u32,		intercept3	)
 660	),
 661
 662	TP_fast_assign(
 663		__entry->cr_read	= cr_read;
 664		__entry->cr_write	= cr_write;
 665		__entry->exceptions	= exceptions;
 666		__entry->intercept1	= intercept1;
 667		__entry->intercept2	= intercept2;
 668		__entry->intercept3	= intercept3;
 669	),
 670
 671	TP_printk("cr_read: %04x cr_write: %04x excp: %08x "
 672		  "intercepts: %08x %08x %08x",
 673		  __entry->cr_read, __entry->cr_write, __entry->exceptions,
 674		  __entry->intercept1, __entry->intercept2, __entry->intercept3)
 675);
 676/*
 677 * Tracepoint for #VMEXIT while nested
 678 */
 679TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit);
 680
 681/*
 682 * Tracepoint for #VMEXIT reinjected to the guest
 683 */
 684TRACE_EVENT(kvm_nested_vmexit_inject,
 685	    TP_PROTO(__u32 exit_code,
 686		     __u64 exit_info1, __u64 exit_info2,
 687		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
 688	    TP_ARGS(exit_code, exit_info1, exit_info2,
 689		    exit_int_info, exit_int_info_err, isa),
 690
 691	TP_STRUCT__entry(
 692		__field(	__u32,		exit_code		)
 693		__field(	__u64,		exit_info1		)
 694		__field(	__u64,		exit_info2		)
 695		__field(	__u32,		exit_int_info		)
 696		__field(	__u32,		exit_int_info_err	)
 697		__field(	__u32,		isa			)
 698	),
 699
 700	TP_fast_assign(
 701		__entry->exit_code		= exit_code;
 702		__entry->exit_info1		= exit_info1;
 703		__entry->exit_info2		= exit_info2;
 704		__entry->exit_int_info		= exit_int_info;
 705		__entry->exit_int_info_err	= exit_int_info_err;
 706		__entry->isa			= isa;
 707	),
 708
 709	TP_printk("reason: %s%s%s ext_inf1: 0x%016llx "
 710		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
 711		  kvm_print_exit_reason(__entry->exit_code, __entry->isa),
 712		  __entry->exit_info1, __entry->exit_info2,
 713		  __entry->exit_int_info, __entry->exit_int_info_err)
 714);
 715
 716/*
 717 * Tracepoint for nested #vmexit because of interrupt pending
 718 */
 719TRACE_EVENT(kvm_nested_intr_vmexit,
 720	    TP_PROTO(__u64 rip),
 721	    TP_ARGS(rip),
 722
 723	TP_STRUCT__entry(
 724		__field(	__u64,	rip	)
 725	),
 726
 727	TP_fast_assign(
 728		__entry->rip	=	rip
 729	),
 730
 731	TP_printk("rip: 0x%016llx", __entry->rip)
 732);
 733
 734/*
 735 * Tracepoint for nested #vmexit because of interrupt pending
 736 */
 737TRACE_EVENT(kvm_invlpga,
 738	    TP_PROTO(__u64 rip, unsigned int asid, u64 address),
 739	    TP_ARGS(rip, asid, address),
 740
 741	TP_STRUCT__entry(
 742		__field(	__u64,		rip	)
 743		__field(	unsigned int,	asid	)
 744		__field(	__u64,		address	)
 745	),
 746
 747	TP_fast_assign(
 748		__entry->rip		=	rip;
 749		__entry->asid		=	asid;
 750		__entry->address	=	address;
 751	),
 752
 753	TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
 754		  __entry->rip, __entry->asid, __entry->address)
 755);
 756
 757/*
 758 * Tracepoint for nested #vmexit because of interrupt pending
 759 */
 760TRACE_EVENT(kvm_skinit,
 761	    TP_PROTO(__u64 rip, __u32 slb),
 762	    TP_ARGS(rip, slb),
 763
 764	TP_STRUCT__entry(
 765		__field(	__u64,	rip	)
 766		__field(	__u32,	slb	)
 767	),
 768
 769	TP_fast_assign(
 770		__entry->rip		=	rip;
 771		__entry->slb		=	slb;
 772	),
 773
 774	TP_printk("rip: 0x%016llx slb: 0x%08x",
 775		  __entry->rip, __entry->slb)
 776);
 777
 778#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
 779#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
 780#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
 781#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
 782
 783#define kvm_trace_symbol_emul_flags	                  \
 784	{ 0,   			    "real" },		  \
 785	{ KVM_EMUL_INSN_F_CR0_PE			  \
 786	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
 787	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
 788	{ KVM_EMUL_INSN_F_CR0_PE			  \
 789	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
 790	{ KVM_EMUL_INSN_F_CR0_PE			  \
 791	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
 792
 793#define kei_decode_mode(mode) ({			\
 794	u8 flags = 0xff;				\
 795	switch (mode) {					\
 796	case X86EMUL_MODE_REAL:				\
 797		flags = 0;				\
 798		break;					\
 799	case X86EMUL_MODE_VM86:				\
 800		flags = KVM_EMUL_INSN_F_EFL_VM;		\
 801		break;					\
 802	case X86EMUL_MODE_PROT16:			\
 803		flags = KVM_EMUL_INSN_F_CR0_PE;		\
 804		break;					\
 805	case X86EMUL_MODE_PROT32:			\
 806		flags = KVM_EMUL_INSN_F_CR0_PE		\
 807			| KVM_EMUL_INSN_F_CS_D;		\
 808		break;					\
 809	case X86EMUL_MODE_PROT64:			\
 810		flags = KVM_EMUL_INSN_F_CR0_PE		\
 811			| KVM_EMUL_INSN_F_CS_L;		\
 812		break;					\
 813	}						\
 814	flags;						\
 815	})
 816
 817TRACE_EVENT(kvm_emulate_insn,
 818	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
 819	TP_ARGS(vcpu, failed),
 820
 821	TP_STRUCT__entry(
 822		__field(    __u64, rip                       )
 823		__field(    __u32, csbase                    )
 824		__field(    __u8,  len                       )
 825		__array(    __u8,  insn,    15	             )
 826		__field(    __u8,  flags       	   	     )
 827		__field(    __u8,  failed                    )
 828		),
 829
 830	TP_fast_assign(
 831		__entry->csbase = kvm_x86_call(get_segment_base)(vcpu,
 832								 VCPU_SREG_CS);
 833		__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
 834			       - vcpu->arch.emulate_ctxt->fetch.data;
 835		__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
 836		memcpy(__entry->insn,
 837		       vcpu->arch.emulate_ctxt->fetch.data,
 838		       15);
 839		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode);
 840		__entry->failed = failed;
 841		),
 842
 843	TP_printk("%x:%llx:%s (%s)%s",
 844		  __entry->csbase, __entry->rip,
 845		  __print_hex(__entry->insn, __entry->len),
 846		  __print_symbolic(__entry->flags,
 847				   kvm_trace_symbol_emul_flags),
 848		  __entry->failed ? " failed" : ""
 849		)
 850	);
 851
 852#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
 853#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
 854
 855TRACE_EVENT(
 856	vcpu_match_mmio,
 857	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
 858	TP_ARGS(gva, gpa, write, gpa_match),
 859
 860	TP_STRUCT__entry(
 861		__field(gva_t, gva)
 862		__field(gpa_t, gpa)
 863		__field(bool, write)
 864		__field(bool, gpa_match)
 865		),
 866
 867	TP_fast_assign(
 868		__entry->gva = gva;
 869		__entry->gpa = gpa;
 870		__entry->write = write;
 871		__entry->gpa_match = gpa_match
 872		),
 873
 874	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
 875		  __entry->write ? "Write" : "Read",
 876		  __entry->gpa_match ? "GPA" : "GVA")
 877);
 878
 879TRACE_EVENT(kvm_write_tsc_offset,
 880	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
 881		 __u64 next_tsc_offset),
 882	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
 883
 884	TP_STRUCT__entry(
 885		__field( unsigned int,	vcpu_id				)
 886		__field(	__u64,	previous_tsc_offset		)
 887		__field(	__u64,	next_tsc_offset			)
 888	),
 889
 890	TP_fast_assign(
 891		__entry->vcpu_id		= vcpu_id;
 892		__entry->previous_tsc_offset	= previous_tsc_offset;
 893		__entry->next_tsc_offset	= next_tsc_offset;
 894	),
 895
 896	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
 897		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
 898);
 899
 900#ifdef CONFIG_X86_64
 901
 902#define host_clocks					\
 903	{VDSO_CLOCKMODE_NONE, "none"},			\
 904	{VDSO_CLOCKMODE_TSC,  "tsc"}			\
 905
 906TRACE_EVENT(kvm_update_master_clock,
 907	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
 908	TP_ARGS(use_master_clock, host_clock, offset_matched),
 909
 910	TP_STRUCT__entry(
 911		__field(		bool,	use_master_clock	)
 912		__field(	unsigned int,	host_clock		)
 913		__field(		bool,	offset_matched		)
 914	),
 915
 916	TP_fast_assign(
 917		__entry->use_master_clock	= use_master_clock;
 918		__entry->host_clock		= host_clock;
 919		__entry->offset_matched		= offset_matched;
 920	),
 921
 922	TP_printk("masterclock %d hostclock %s offsetmatched %u",
 923		  __entry->use_master_clock,
 924		  __print_symbolic(__entry->host_clock, host_clocks),
 925		  __entry->offset_matched)
 926);
 927
 928TRACE_EVENT(kvm_track_tsc,
 929	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
 930		 unsigned int online_vcpus, bool use_master_clock,
 931		 unsigned int host_clock),
 932	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
 933		host_clock),
 934
 935	TP_STRUCT__entry(
 936		__field(	unsigned int,	vcpu_id			)
 937		__field(	unsigned int,	nr_vcpus_matched_tsc	)
 938		__field(	unsigned int,	online_vcpus		)
 939		__field(	bool,		use_master_clock	)
 940		__field(	unsigned int,	host_clock		)
 941	),
 942
 943	TP_fast_assign(
 944		__entry->vcpu_id		= vcpu_id;
 945		__entry->nr_vcpus_matched_tsc	= nr_matched;
 946		__entry->online_vcpus		= online_vcpus;
 947		__entry->use_master_clock	= use_master_clock;
 948		__entry->host_clock		= host_clock;
 949	),
 950
 951	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
 952		  " hostclock %s",
 953		  __entry->vcpu_id, __entry->use_master_clock,
 954		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
 955		  __print_symbolic(__entry->host_clock, host_clocks))
 956);
 957
 958#endif /* CONFIG_X86_64 */
 959
 960/*
 961 * Tracepoint for PML full VMEXIT.
 962 */
 963TRACE_EVENT(kvm_pml_full,
 964	TP_PROTO(unsigned int vcpu_id),
 965	TP_ARGS(vcpu_id),
 966
 967	TP_STRUCT__entry(
 968		__field(	unsigned int,	vcpu_id			)
 969	),
 970
 971	TP_fast_assign(
 972		__entry->vcpu_id		= vcpu_id;
 973	),
 974
 975	TP_printk("vcpu %d: PML full", __entry->vcpu_id)
 976);
 977
 978TRACE_EVENT(kvm_ple_window_update,
 979	TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old),
 980	TP_ARGS(vcpu_id, new, old),
 981
 982	TP_STRUCT__entry(
 983		__field(        unsigned int,   vcpu_id         )
 984		__field(        unsigned int,       new         )
 985		__field(        unsigned int,       old         )
 986	),
 987
 988	TP_fast_assign(
 989		__entry->vcpu_id        = vcpu_id;
 990		__entry->new            = new;
 991		__entry->old            = old;
 992	),
 993
 994	TP_printk("vcpu %u old %u new %u (%s)",
 995	          __entry->vcpu_id, __entry->old, __entry->new,
 996		  __entry->old < __entry->new ? "growed" : "shrinked")
 997);
 998
 999TRACE_EVENT(kvm_pvclock_update,
1000	TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
1001	TP_ARGS(vcpu_id, pvclock),
1002
1003	TP_STRUCT__entry(
1004		__field(	unsigned int,	vcpu_id			)
1005		__field(	__u32,		version			)
1006		__field(	__u64,		tsc_timestamp		)
1007		__field(	__u64,		system_time		)
1008		__field(	__u32,		tsc_to_system_mul	)
1009		__field(	__s8,		tsc_shift		)
1010		__field(	__u8,		flags			)
1011	),
1012
1013	TP_fast_assign(
1014		__entry->vcpu_id	   = vcpu_id;
1015		__entry->version	   = pvclock->version;
1016		__entry->tsc_timestamp	   = pvclock->tsc_timestamp;
1017		__entry->system_time	   = pvclock->system_time;
1018		__entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
1019		__entry->tsc_shift	   = pvclock->tsc_shift;
1020		__entry->flags		   = pvclock->flags;
1021	),
1022
1023	TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
1024		  "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
1025		  "flags 0x%x }",
1026		  __entry->vcpu_id,
1027		  __entry->version,
1028		  __entry->tsc_timestamp,
1029		  __entry->system_time,
1030		  __entry->tsc_to_system_mul,
1031		  __entry->tsc_shift,
1032		  __entry->flags)
1033);
1034
1035TRACE_EVENT(kvm_wait_lapic_expire,
1036	TP_PROTO(unsigned int vcpu_id, s64 delta),
1037	TP_ARGS(vcpu_id, delta),
1038
1039	TP_STRUCT__entry(
1040		__field(	unsigned int,	vcpu_id		)
1041		__field(	s64,		delta		)
1042	),
1043
1044	TP_fast_assign(
1045		__entry->vcpu_id	   = vcpu_id;
1046		__entry->delta             = delta;
1047	),
1048
1049	TP_printk("vcpu %u: delta %lld (%s)",
1050		  __entry->vcpu_id,
1051		  __entry->delta,
1052		  __entry->delta < 0 ? "early" : "late")
1053);
1054
1055TRACE_EVENT(kvm_smm_transition,
1056	TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
1057	TP_ARGS(vcpu_id, smbase, entering),
1058
1059	TP_STRUCT__entry(
1060		__field(	unsigned int,	vcpu_id		)
1061		__field(	u64,		smbase		)
1062		__field(	bool,		entering	)
1063	),
1064
1065	TP_fast_assign(
1066		__entry->vcpu_id	= vcpu_id;
1067		__entry->smbase		= smbase;
1068		__entry->entering	= entering;
1069	),
1070
1071	TP_printk("vcpu %u: %s SMM, smbase 0x%llx",
1072		  __entry->vcpu_id,
1073		  __entry->entering ? "entering" : "leaving",
1074		  __entry->smbase)
1075);
1076
1077/*
1078 * Tracepoint for VT-d posted-interrupts and AMD-Vi Guest Virtual APIC.
1079 */
1080TRACE_EVENT(kvm_pi_irte_update,
1081	TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
1082		 unsigned int gsi, unsigned int gvec,
1083		 u64 pi_desc_addr, bool set),
1084	TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
1085
1086	TP_STRUCT__entry(
1087		__field(	unsigned int,	host_irq	)
1088		__field(	unsigned int,	vcpu_id		)
1089		__field(	unsigned int,	gsi		)
1090		__field(	unsigned int,	gvec		)
1091		__field(	u64,		pi_desc_addr	)
1092		__field(	bool,		set		)
1093	),
1094
1095	TP_fast_assign(
1096		__entry->host_irq	= host_irq;
1097		__entry->vcpu_id	= vcpu_id;
1098		__entry->gsi		= gsi;
1099		__entry->gvec		= gvec;
1100		__entry->pi_desc_addr	= pi_desc_addr;
1101		__entry->set		= set;
1102	),
1103
1104	TP_printk("PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
1105		  "gvec: 0x%x, pi_desc_addr: 0x%llx",
1106		  __entry->set ? "enabled and being updated" : "disabled",
1107		  __entry->host_irq,
1108		  __entry->vcpu_id,
1109		  __entry->gsi,
1110		  __entry->gvec,
1111		  __entry->pi_desc_addr)
1112);
1113
1114/*
1115 * Tracepoint for kvm_hv_notify_acked_sint.
1116 */
1117TRACE_EVENT(kvm_hv_notify_acked_sint,
1118	TP_PROTO(int vcpu_id, u32 sint),
1119	TP_ARGS(vcpu_id, sint),
1120
1121	TP_STRUCT__entry(
1122		__field(int, vcpu_id)
1123		__field(u32, sint)
1124	),
1125
1126	TP_fast_assign(
1127		__entry->vcpu_id = vcpu_id;
1128		__entry->sint = sint;
1129	),
1130
1131	TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint)
1132);
1133
1134/*
1135 * Tracepoint for synic_set_irq.
1136 */
1137TRACE_EVENT(kvm_hv_synic_set_irq,
1138	TP_PROTO(int vcpu_id, u32 sint, int vector, int ret),
1139	TP_ARGS(vcpu_id, sint, vector, ret),
1140
1141	TP_STRUCT__entry(
1142		__field(int, vcpu_id)
1143		__field(u32, sint)
1144		__field(int, vector)
1145		__field(int, ret)
1146	),
1147
1148	TP_fast_assign(
1149		__entry->vcpu_id = vcpu_id;
1150		__entry->sint = sint;
1151		__entry->vector = vector;
1152		__entry->ret = ret;
1153	),
1154
1155	TP_printk("vcpu_id %d sint %u vector %d ret %d",
1156		  __entry->vcpu_id, __entry->sint, __entry->vector,
1157		  __entry->ret)
1158);
1159
1160/*
1161 * Tracepoint for kvm_hv_synic_send_eoi.
1162 */
1163TRACE_EVENT(kvm_hv_synic_send_eoi,
1164	TP_PROTO(int vcpu_id, int vector),
1165	TP_ARGS(vcpu_id, vector),
1166
1167	TP_STRUCT__entry(
1168		__field(int, vcpu_id)
1169		__field(u32, sint)
1170		__field(int, vector)
1171		__field(int, ret)
1172	),
1173
1174	TP_fast_assign(
1175		__entry->vcpu_id = vcpu_id;
1176		__entry->vector	= vector;
1177	),
1178
1179	TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector)
1180);
1181
1182/*
1183 * Tracepoint for synic_set_msr.
1184 */
1185TRACE_EVENT(kvm_hv_synic_set_msr,
1186	TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host),
1187	TP_ARGS(vcpu_id, msr, data, host),
1188
1189	TP_STRUCT__entry(
1190		__field(int, vcpu_id)
1191		__field(u32, msr)
1192		__field(u64, data)
1193		__field(bool, host)
1194	),
1195
1196	TP_fast_assign(
1197		__entry->vcpu_id = vcpu_id;
1198		__entry->msr = msr;
1199		__entry->data = data;
1200		__entry->host = host
1201	),
1202
1203	TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d",
1204		  __entry->vcpu_id, __entry->msr, __entry->data, __entry->host)
1205);
1206
1207/*
1208 * Tracepoint for stimer_set_config.
1209 */
1210TRACE_EVENT(kvm_hv_stimer_set_config,
1211	TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host),
1212	TP_ARGS(vcpu_id, timer_index, config, host),
1213
1214	TP_STRUCT__entry(
1215		__field(int, vcpu_id)
1216		__field(int, timer_index)
1217		__field(u64, config)
1218		__field(bool, host)
1219	),
1220
1221	TP_fast_assign(
1222		__entry->vcpu_id = vcpu_id;
1223		__entry->timer_index = timer_index;
1224		__entry->config = config;
1225		__entry->host = host;
1226	),
1227
1228	TP_printk("vcpu_id %d timer %d config 0x%llx host %d",
1229		  __entry->vcpu_id, __entry->timer_index, __entry->config,
1230		  __entry->host)
1231);
1232
1233/*
1234 * Tracepoint for stimer_set_count.
1235 */
1236TRACE_EVENT(kvm_hv_stimer_set_count,
1237	TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host),
1238	TP_ARGS(vcpu_id, timer_index, count, host),
1239
1240	TP_STRUCT__entry(
1241		__field(int, vcpu_id)
1242		__field(int, timer_index)
1243		__field(u64, count)
1244		__field(bool, host)
1245	),
1246
1247	TP_fast_assign(
1248		__entry->vcpu_id = vcpu_id;
1249		__entry->timer_index = timer_index;
1250		__entry->count = count;
1251		__entry->host = host;
1252	),
1253
1254	TP_printk("vcpu_id %d timer %d count %llu host %d",
1255		  __entry->vcpu_id, __entry->timer_index, __entry->count,
1256		  __entry->host)
1257);
1258
1259/*
1260 * Tracepoint for stimer_start(periodic timer case).
1261 */
1262TRACE_EVENT(kvm_hv_stimer_start_periodic,
1263	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time),
1264	TP_ARGS(vcpu_id, timer_index, time_now, exp_time),
1265
1266	TP_STRUCT__entry(
1267		__field(int, vcpu_id)
1268		__field(int, timer_index)
1269		__field(u64, time_now)
1270		__field(u64, exp_time)
1271	),
1272
1273	TP_fast_assign(
1274		__entry->vcpu_id = vcpu_id;
1275		__entry->timer_index = timer_index;
1276		__entry->time_now = time_now;
1277		__entry->exp_time = exp_time;
1278	),
1279
1280	TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu",
1281		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1282		  __entry->exp_time)
1283);
1284
1285/*
1286 * Tracepoint for stimer_start(one-shot timer case).
1287 */
1288TRACE_EVENT(kvm_hv_stimer_start_one_shot,
1289	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count),
1290	TP_ARGS(vcpu_id, timer_index, time_now, count),
1291
1292	TP_STRUCT__entry(
1293		__field(int, vcpu_id)
1294		__field(int, timer_index)
1295		__field(u64, time_now)
1296		__field(u64, count)
1297	),
1298
1299	TP_fast_assign(
1300		__entry->vcpu_id = vcpu_id;
1301		__entry->timer_index = timer_index;
1302		__entry->time_now = time_now;
1303		__entry->count = count;
1304	),
1305
1306	TP_printk("vcpu_id %d timer %d time_now %llu count %llu",
1307		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1308		  __entry->count)
1309);
1310
1311/*
1312 * Tracepoint for stimer_timer_callback.
1313 */
1314TRACE_EVENT(kvm_hv_stimer_callback,
1315	TP_PROTO(int vcpu_id, int timer_index),
1316	TP_ARGS(vcpu_id, timer_index),
1317
1318	TP_STRUCT__entry(
1319		__field(int, vcpu_id)
1320		__field(int, timer_index)
1321	),
1322
1323	TP_fast_assign(
1324		__entry->vcpu_id = vcpu_id;
1325		__entry->timer_index = timer_index;
1326	),
1327
1328	TP_printk("vcpu_id %d timer %d",
1329		  __entry->vcpu_id, __entry->timer_index)
1330);
1331
1332/*
1333 * Tracepoint for stimer_expiration.
1334 */
1335TRACE_EVENT(kvm_hv_stimer_expiration,
1336	TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result),
1337	TP_ARGS(vcpu_id, timer_index, direct, msg_send_result),
1338
1339	TP_STRUCT__entry(
1340		__field(int, vcpu_id)
1341		__field(int, timer_index)
1342		__field(int, direct)
1343		__field(int, msg_send_result)
1344	),
1345
1346	TP_fast_assign(
1347		__entry->vcpu_id = vcpu_id;
1348		__entry->timer_index = timer_index;
1349		__entry->direct = direct;
1350		__entry->msg_send_result = msg_send_result;
1351	),
1352
1353	TP_printk("vcpu_id %d timer %d direct %d send result %d",
1354		  __entry->vcpu_id, __entry->timer_index,
1355		  __entry->direct, __entry->msg_send_result)
1356);
1357
1358/*
1359 * Tracepoint for stimer_cleanup.
1360 */
1361TRACE_EVENT(kvm_hv_stimer_cleanup,
1362	TP_PROTO(int vcpu_id, int timer_index),
1363	TP_ARGS(vcpu_id, timer_index),
1364
1365	TP_STRUCT__entry(
1366		__field(int, vcpu_id)
1367		__field(int, timer_index)
1368	),
1369
1370	TP_fast_assign(
1371		__entry->vcpu_id = vcpu_id;
1372		__entry->timer_index = timer_index;
1373	),
1374
1375	TP_printk("vcpu_id %d timer %d",
1376		  __entry->vcpu_id, __entry->timer_index)
1377);
1378
1379#define kvm_print_apicv_inhibit_reasons(inhibits)	\
1380	(inhibits), (inhibits) ? " " : "",		\
1381	(inhibits) ? __print_flags(inhibits, "|", APICV_INHIBIT_REASONS) : ""
1382
1383TRACE_EVENT(kvm_apicv_inhibit_changed,
1384	    TP_PROTO(int reason, bool set, unsigned long inhibits),
1385	    TP_ARGS(reason, set, inhibits),
1386
1387	TP_STRUCT__entry(
1388		__field(int, reason)
1389		__field(bool, set)
1390		__field(unsigned long, inhibits)
1391	),
1392
1393	TP_fast_assign(
1394		__entry->reason = reason;
1395		__entry->set = set;
1396		__entry->inhibits = inhibits;
1397	),
1398
1399	TP_printk("%s reason=%u, inhibits=0x%lx%s%s",
1400		  __entry->set ? "set" : "cleared",
1401		  __entry->reason,
1402		  kvm_print_apicv_inhibit_reasons(__entry->inhibits))
1403);
1404
1405TRACE_EVENT(kvm_apicv_accept_irq,
1406	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
1407	    TP_ARGS(apicid, dm, tm, vec),
1408
1409	TP_STRUCT__entry(
1410		__field(	__u32,		apicid		)
1411		__field(	__u16,		dm		)
1412		__field(	__u16,		tm		)
1413		__field(	__u8,		vec		)
1414	),
1415
1416	TP_fast_assign(
1417		__entry->apicid		= apicid;
1418		__entry->dm		= dm;
1419		__entry->tm		= tm;
1420		__entry->vec		= vec;
1421	),
1422
1423	TP_printk("apicid %x vec %u (%s|%s)",
1424		  __entry->apicid, __entry->vec,
1425		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
1426		  __entry->tm ? "level" : "edge")
1427);
1428
1429/*
1430 * Tracepoint for AMD AVIC
1431 */
1432TRACE_EVENT(kvm_avic_incomplete_ipi,
1433	    TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
1434	    TP_ARGS(vcpu, icrh, icrl, id, index),
1435
1436	TP_STRUCT__entry(
1437		__field(u32, vcpu)
1438		__field(u32, icrh)
1439		__field(u32, icrl)
1440		__field(u32, id)
1441		__field(u32, index)
1442	),
1443
1444	TP_fast_assign(
1445		__entry->vcpu = vcpu;
1446		__entry->icrh = icrh;
1447		__entry->icrl = icrl;
1448		__entry->id = id;
1449		__entry->index = index;
1450	),
1451
1452	TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u",
1453		  __entry->vcpu, __entry->icrh, __entry->icrl,
1454		  __entry->id, __entry->index)
1455);
1456
1457TRACE_EVENT(kvm_avic_unaccelerated_access,
1458	    TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
1459	    TP_ARGS(vcpu, offset, ft, rw, vec),
1460
1461	TP_STRUCT__entry(
1462		__field(u32, vcpu)
1463		__field(u32, offset)
1464		__field(bool, ft)
1465		__field(bool, rw)
1466		__field(u32, vec)
1467	),
1468
1469	TP_fast_assign(
1470		__entry->vcpu = vcpu;
1471		__entry->offset = offset;
1472		__entry->ft = ft;
1473		__entry->rw = rw;
1474		__entry->vec = vec;
1475	),
1476
1477	TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x",
1478		  __entry->vcpu,
1479		  __entry->offset,
1480		  __print_symbolic(__entry->offset, kvm_trace_symbol_apic),
1481		  __entry->ft ? "trap" : "fault",
1482		  __entry->rw ? "write" : "read",
1483		  __entry->vec)
1484);
1485
1486TRACE_EVENT(kvm_avic_ga_log,
1487	    TP_PROTO(u32 vmid, u32 vcpuid),
1488	    TP_ARGS(vmid, vcpuid),
1489
1490	TP_STRUCT__entry(
1491		__field(u32, vmid)
1492		__field(u32, vcpuid)
1493	),
1494
1495	TP_fast_assign(
1496		__entry->vmid = vmid;
1497		__entry->vcpuid = vcpuid;
1498	),
1499
1500	TP_printk("vmid=%u, vcpuid=%u",
1501		  __entry->vmid, __entry->vcpuid)
1502);
1503
1504TRACE_EVENT(kvm_avic_kick_vcpu_slowpath,
1505	    TP_PROTO(u32 icrh, u32 icrl, u32 index),
1506	    TP_ARGS(icrh, icrl, index),
1507
1508	TP_STRUCT__entry(
1509		__field(u32, icrh)
1510		__field(u32, icrl)
1511		__field(u32, index)
1512	),
1513
1514	TP_fast_assign(
1515		__entry->icrh = icrh;
1516		__entry->icrl = icrl;
1517		__entry->index = index;
1518	),
1519
1520	TP_printk("icrh:icrl=%#08x:%08x, index=%u",
1521		  __entry->icrh, __entry->icrl, __entry->index)
1522);
1523
1524TRACE_EVENT(kvm_avic_doorbell,
1525	    TP_PROTO(u32 vcpuid, u32 apicid),
1526	    TP_ARGS(vcpuid, apicid),
1527
1528	TP_STRUCT__entry(
1529		__field(u32, vcpuid)
1530		__field(u32, apicid)
1531	),
1532
1533	TP_fast_assign(
1534		__entry->vcpuid = vcpuid;
1535		__entry->apicid = apicid;
1536	),
1537
1538	TP_printk("vcpuid=%u, apicid=%u",
1539		  __entry->vcpuid, __entry->apicid)
1540);
1541
1542TRACE_EVENT(kvm_hv_timer_state,
1543		TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
1544		TP_ARGS(vcpu_id, hv_timer_in_use),
1545		TP_STRUCT__entry(
1546			__field(unsigned int, vcpu_id)
1547			__field(unsigned int, hv_timer_in_use)
1548			),
1549		TP_fast_assign(
1550			__entry->vcpu_id = vcpu_id;
1551			__entry->hv_timer_in_use = hv_timer_in_use;
1552			),
1553		TP_printk("vcpu_id %x hv_timer %x",
1554			__entry->vcpu_id,
1555			__entry->hv_timer_in_use)
1556);
1557
1558/*
1559 * Tracepoint for kvm_hv_flush_tlb.
1560 */
1561TRACE_EVENT(kvm_hv_flush_tlb,
1562	TP_PROTO(u64 processor_mask, u64 address_space, u64 flags, bool guest_mode),
1563	TP_ARGS(processor_mask, address_space, flags, guest_mode),
1564
1565	TP_STRUCT__entry(
1566		__field(u64, processor_mask)
1567		__field(u64, address_space)
1568		__field(u64, flags)
1569		__field(bool, guest_mode)
1570	),
1571
1572	TP_fast_assign(
1573		__entry->processor_mask = processor_mask;
1574		__entry->address_space = address_space;
1575		__entry->flags = flags;
1576		__entry->guest_mode = guest_mode;
1577	),
1578
1579	TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx %s",
1580		  __entry->processor_mask, __entry->address_space,
1581		  __entry->flags, __entry->guest_mode ? "(L2)" : "")
1582);
1583
1584/*
1585 * Tracepoint for kvm_hv_flush_tlb_ex.
1586 */
1587TRACE_EVENT(kvm_hv_flush_tlb_ex,
1588	TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags, bool guest_mode),
1589	TP_ARGS(valid_bank_mask, format, address_space, flags, guest_mode),
1590
1591	TP_STRUCT__entry(
1592		__field(u64, valid_bank_mask)
1593		__field(u64, format)
1594		__field(u64, address_space)
1595		__field(u64, flags)
1596		__field(bool, guest_mode)
1597	),
1598
1599	TP_fast_assign(
1600		__entry->valid_bank_mask = valid_bank_mask;
1601		__entry->format = format;
1602		__entry->address_space = address_space;
1603		__entry->flags = flags;
1604		__entry->guest_mode = guest_mode;
1605	),
1606
1607	TP_printk("valid_bank_mask 0x%llx format 0x%llx "
1608		  "address_space 0x%llx flags 0x%llx %s",
1609		  __entry->valid_bank_mask, __entry->format,
1610		  __entry->address_space, __entry->flags,
1611		  __entry->guest_mode ? "(L2)" : "")
1612);
1613
1614/*
1615 * Tracepoints for kvm_hv_send_ipi.
1616 */
1617TRACE_EVENT(kvm_hv_send_ipi,
1618	TP_PROTO(u32 vector, u64 processor_mask),
1619	TP_ARGS(vector, processor_mask),
1620
1621	TP_STRUCT__entry(
1622		__field(u32, vector)
1623		__field(u64, processor_mask)
1624	),
1625
1626	TP_fast_assign(
1627		__entry->vector = vector;
1628		__entry->processor_mask = processor_mask;
1629	),
1630
1631	TP_printk("vector %x processor_mask 0x%llx",
1632		  __entry->vector, __entry->processor_mask)
1633);
1634
1635TRACE_EVENT(kvm_hv_send_ipi_ex,
1636	TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
1637	TP_ARGS(vector, format, valid_bank_mask),
1638
1639	TP_STRUCT__entry(
1640		__field(u32, vector)
1641		__field(u64, format)
1642		__field(u64, valid_bank_mask)
1643	),
1644
1645	TP_fast_assign(
1646		__entry->vector = vector;
1647		__entry->format = format;
1648		__entry->valid_bank_mask = valid_bank_mask;
1649	),
1650
1651	TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
1652		  __entry->vector, __entry->format,
1653		  __entry->valid_bank_mask)
1654);
1655
1656TRACE_EVENT(kvm_pv_tlb_flush,
1657	TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb),
1658	TP_ARGS(vcpu_id, need_flush_tlb),
1659
1660	TP_STRUCT__entry(
1661		__field(	unsigned int,	vcpu_id		)
1662		__field(	bool,	need_flush_tlb		)
1663	),
1664
1665	TP_fast_assign(
1666		__entry->vcpu_id	= vcpu_id;
1667		__entry->need_flush_tlb = need_flush_tlb;
1668	),
1669
1670	TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id,
1671		__entry->need_flush_tlb ? "true" : "false")
1672);
1673
1674/*
1675 * Tracepoint for failed nested VMX VM-Enter.
1676 */
1677TRACE_EVENT(kvm_nested_vmenter_failed,
1678	TP_PROTO(const char *msg, u32 err),
1679	TP_ARGS(msg, err),
1680
1681	TP_STRUCT__entry(
1682		__string(msg, msg)
1683		__field(u32, err)
1684	),
1685
1686	TP_fast_assign(
1687		__assign_str(msg);
1688		__entry->err = err;
1689	),
1690
1691	TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
1692		__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
1693);
1694
1695/*
1696 * Tracepoint for syndbg_set_msr.
1697 */
1698TRACE_EVENT(kvm_hv_syndbg_set_msr,
1699	TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
1700	TP_ARGS(vcpu_id, vp_index, msr, data),
1701
1702	TP_STRUCT__entry(
1703		__field(int, vcpu_id)
1704		__field(u32, vp_index)
1705		__field(u32, msr)
1706		__field(u64, data)
1707	),
1708
1709	TP_fast_assign(
1710		__entry->vcpu_id = vcpu_id;
1711		__entry->vp_index = vp_index;
1712		__entry->msr = msr;
1713		__entry->data = data;
1714	),
1715
1716	TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
1717		  __entry->vcpu_id, __entry->vp_index, __entry->msr,
1718		  __entry->data)
1719);
1720
1721/*
1722 * Tracepoint for syndbg_get_msr.
1723 */
1724TRACE_EVENT(kvm_hv_syndbg_get_msr,
1725	TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
1726	TP_ARGS(vcpu_id, vp_index, msr, data),
1727
1728	TP_STRUCT__entry(
1729		__field(int, vcpu_id)
1730		__field(u32, vp_index)
1731		__field(u32, msr)
1732		__field(u64, data)
1733	),
1734
1735	TP_fast_assign(
1736		__entry->vcpu_id = vcpu_id;
1737		__entry->vp_index = vp_index;
1738		__entry->msr = msr;
1739		__entry->data = data;
1740	),
1741
1742	TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
1743		  __entry->vcpu_id, __entry->vp_index, __entry->msr,
1744		  __entry->data)
1745);
1746
1747/*
1748 * Tracepoint for the start of VMGEXIT processing
1749 */
1750TRACE_EVENT(kvm_vmgexit_enter,
1751	TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
1752	TP_ARGS(vcpu_id, ghcb),
1753
1754	TP_STRUCT__entry(
1755		__field(unsigned int, vcpu_id)
1756		__field(u64, exit_reason)
1757		__field(u64, info1)
1758		__field(u64, info2)
1759	),
1760
1761	TP_fast_assign(
1762		__entry->vcpu_id     = vcpu_id;
1763		__entry->exit_reason = ghcb->save.sw_exit_code;
1764		__entry->info1       = ghcb->save.sw_exit_info_1;
1765		__entry->info2       = ghcb->save.sw_exit_info_2;
1766	),
1767
1768	TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
1769		  __entry->vcpu_id, __entry->exit_reason,
1770		  __entry->info1, __entry->info2)
1771);
1772
1773/*
1774 * Tracepoint for the end of VMGEXIT processing
1775 */
1776TRACE_EVENT(kvm_vmgexit_exit,
1777	TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
1778	TP_ARGS(vcpu_id, ghcb),
1779
1780	TP_STRUCT__entry(
1781		__field(unsigned int, vcpu_id)
1782		__field(u64, exit_reason)
1783		__field(u64, info1)
1784		__field(u64, info2)
1785	),
1786
1787	TP_fast_assign(
1788		__entry->vcpu_id     = vcpu_id;
1789		__entry->exit_reason = ghcb->save.sw_exit_code;
1790		__entry->info1       = ghcb->save.sw_exit_info_1;
1791		__entry->info2       = ghcb->save.sw_exit_info_2;
1792	),
1793
1794	TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
1795		  __entry->vcpu_id, __entry->exit_reason,
1796		  __entry->info1, __entry->info2)
1797);
1798
1799/*
1800 * Tracepoint for the start of VMGEXIT MSR procotol processing
1801 */
1802TRACE_EVENT(kvm_vmgexit_msr_protocol_enter,
1803	TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa),
1804	TP_ARGS(vcpu_id, ghcb_gpa),
1805
1806	TP_STRUCT__entry(
1807		__field(unsigned int, vcpu_id)
1808		__field(u64, ghcb_gpa)
1809	),
1810
1811	TP_fast_assign(
1812		__entry->vcpu_id  = vcpu_id;
1813		__entry->ghcb_gpa = ghcb_gpa;
1814	),
1815
1816	TP_printk("vcpu %u, ghcb_gpa %016llx",
1817		  __entry->vcpu_id, __entry->ghcb_gpa)
1818);
1819
1820/*
1821 * Tracepoint for the end of VMGEXIT MSR procotol processing
1822 */
1823TRACE_EVENT(kvm_vmgexit_msr_protocol_exit,
1824	TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result),
1825	TP_ARGS(vcpu_id, ghcb_gpa, result),
1826
1827	TP_STRUCT__entry(
1828		__field(unsigned int, vcpu_id)
1829		__field(u64, ghcb_gpa)
1830		__field(int, result)
1831	),
1832
1833	TP_fast_assign(
1834		__entry->vcpu_id  = vcpu_id;
1835		__entry->ghcb_gpa = ghcb_gpa;
1836		__entry->result   = result;
1837	),
1838
1839	TP_printk("vcpu %u, ghcb_gpa %016llx, result %d",
1840		  __entry->vcpu_id, __entry->ghcb_gpa, __entry->result)
1841);
1842
1843/*
1844 * Tracepoint for #NPFs due to RMP faults.
1845 */
1846TRACE_EVENT(kvm_rmp_fault,
1847	TP_PROTO(struct kvm_vcpu *vcpu, u64 gpa, u64 pfn, u64 error_code,
1848		 int rmp_level, int psmash_ret),
1849	TP_ARGS(vcpu, gpa, pfn, error_code, rmp_level, psmash_ret),
1850
1851	TP_STRUCT__entry(
1852		__field(unsigned int, vcpu_id)
1853		__field(u64, gpa)
1854		__field(u64, pfn)
1855		__field(u64, error_code)
1856		__field(int, rmp_level)
1857		__field(int, psmash_ret)
1858	),
1859
1860	TP_fast_assign(
1861		__entry->vcpu_id	= vcpu->vcpu_id;
1862		__entry->gpa		= gpa;
1863		__entry->pfn		= pfn;
1864		__entry->error_code	= error_code;
1865		__entry->rmp_level	= rmp_level;
1866		__entry->psmash_ret	= psmash_ret;
1867	),
1868
1869	TP_printk("vcpu %u gpa %016llx pfn 0x%llx error_code 0x%llx rmp_level %d psmash_ret %d",
1870		  __entry->vcpu_id, __entry->gpa, __entry->pfn,
1871		  __entry->error_code, __entry->rmp_level, __entry->psmash_ret)
1872);
1873
1874#endif /* _TRACE_KVM_H */
1875
1876#undef TRACE_INCLUDE_PATH
1877#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
1878#undef TRACE_INCLUDE_FILE
1879#define TRACE_INCLUDE_FILE trace
1880
1881/* This part must be outside protection */
1882#include <trace/define_trace.h>
v6.8
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
   3#define _TRACE_KVM_H
   4
   5#include <linux/tracepoint.h>
   6#include <asm/vmx.h>
   7#include <asm/svm.h>
   8#include <asm/clocksource.h>
   9#include <asm/pvclock-abi.h>
  10
  11#undef TRACE_SYSTEM
  12#define TRACE_SYSTEM kvm
  13
  14/*
  15 * Tracepoint for guest mode entry.
  16 */
  17TRACE_EVENT(kvm_entry,
  18	TP_PROTO(struct kvm_vcpu *vcpu),
  19	TP_ARGS(vcpu),
  20
  21	TP_STRUCT__entry(
  22		__field(	unsigned int,	vcpu_id		)
  23		__field(	unsigned long,	rip		)
 
  24	),
  25
  26	TP_fast_assign(
  27		__entry->vcpu_id        = vcpu->vcpu_id;
  28		__entry->rip		= kvm_rip_read(vcpu);
 
  29	),
  30
  31	TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip)
 
  32);
  33
  34/*
  35 * Tracepoint for hypercall.
  36 */
  37TRACE_EVENT(kvm_hypercall,
  38	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
  39		 unsigned long a2, unsigned long a3),
  40	TP_ARGS(nr, a0, a1, a2, a3),
  41
  42	TP_STRUCT__entry(
  43		__field(	unsigned long, 	nr		)
  44		__field(	unsigned long,	a0		)
  45		__field(	unsigned long,	a1		)
  46		__field(	unsigned long,	a2		)
  47		__field(	unsigned long,	a3		)
  48	),
  49
  50	TP_fast_assign(
  51		__entry->nr		= nr;
  52		__entry->a0		= a0;
  53		__entry->a1		= a1;
  54		__entry->a2		= a2;
  55		__entry->a3		= a3;
  56	),
  57
  58	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
  59		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
  60		 __entry->a3)
  61);
  62
  63/*
  64 * Tracepoint for hypercall.
  65 */
  66TRACE_EVENT(kvm_hv_hypercall,
  67	TP_PROTO(__u16 code, bool fast,  __u16 var_cnt, __u16 rep_cnt,
  68		 __u16 rep_idx, __u64 ingpa, __u64 outgpa),
  69	TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa),
  70
  71	TP_STRUCT__entry(
  72		__field(	__u16,		rep_cnt		)
  73		__field(	__u16,		rep_idx		)
  74		__field(	__u64,		ingpa		)
  75		__field(	__u64,		outgpa		)
  76		__field(	__u16, 		code		)
  77		__field(	__u16,		var_cnt		)
  78		__field(	bool,		fast		)
  79	),
  80
  81	TP_fast_assign(
  82		__entry->rep_cnt	= rep_cnt;
  83		__entry->rep_idx	= rep_idx;
  84		__entry->ingpa		= ingpa;
  85		__entry->outgpa		= outgpa;
  86		__entry->code		= code;
  87		__entry->var_cnt	= var_cnt;
  88		__entry->fast		= fast;
  89	),
  90
  91	TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
  92		  __entry->code, __entry->fast ? "fast" : "slow",
  93		  __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx,
  94		  __entry->ingpa, __entry->outgpa)
  95);
  96
  97TRACE_EVENT(kvm_hv_hypercall_done,
  98	TP_PROTO(u64 result),
  99	TP_ARGS(result),
 100
 101	TP_STRUCT__entry(
 102		__field(__u64, result)
 103	),
 104
 105	TP_fast_assign(
 106		__entry->result	= result;
 107	),
 108
 109	TP_printk("result 0x%llx", __entry->result)
 110);
 111
 112/*
 113 * Tracepoint for Xen hypercall.
 114 */
 115TRACE_EVENT(kvm_xen_hypercall,
 116	    TP_PROTO(u8 cpl, unsigned long nr,
 117		     unsigned long a0, unsigned long a1, unsigned long a2,
 118		     unsigned long a3, unsigned long a4, unsigned long a5),
 119	    TP_ARGS(cpl, nr, a0, a1, a2, a3, a4, a5),
 120
 121	TP_STRUCT__entry(
 122		__field(u8, cpl)
 123		__field(unsigned long, nr)
 124		__field(unsigned long, a0)
 125		__field(unsigned long, a1)
 126		__field(unsigned long, a2)
 127		__field(unsigned long, a3)
 128		__field(unsigned long, a4)
 129		__field(unsigned long, a5)
 130	),
 131
 132	TP_fast_assign(
 133		__entry->cpl = cpl;
 134		__entry->nr = nr;
 135		__entry->a0 = a0;
 136		__entry->a1 = a1;
 137		__entry->a2 = a2;
 138		__entry->a3 = a3;
 139		__entry->a4 = a4;
 140		__entry->a4 = a5;
 141	),
 142
 143	TP_printk("cpl %d nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx",
 144		  __entry->cpl, __entry->nr,
 145		  __entry->a0, __entry->a1, __entry->a2,
 146		  __entry->a3, __entry->a4, __entry->a5)
 147);
 148
 149
 150
 151/*
 152 * Tracepoint for PIO.
 153 */
 154
 155#define KVM_PIO_IN   0
 156#define KVM_PIO_OUT  1
 157
 158TRACE_EVENT(kvm_pio,
 159	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
 160		 unsigned int count, const void *data),
 161	TP_ARGS(rw, port, size, count, data),
 162
 163	TP_STRUCT__entry(
 164		__field(	unsigned int, 	rw		)
 165		__field(	unsigned int, 	port		)
 166		__field(	unsigned int, 	size		)
 167		__field(	unsigned int,	count		)
 168		__field(	unsigned int,	val		)
 169	),
 170
 171	TP_fast_assign(
 172		__entry->rw		= rw;
 173		__entry->port		= port;
 174		__entry->size		= size;
 175		__entry->count		= count;
 176		if (size == 1)
 177			__entry->val	= *(unsigned char *)data;
 178		else if (size == 2)
 179			__entry->val	= *(unsigned short *)data;
 180		else
 181			__entry->val	= *(unsigned int *)data;
 182	),
 183
 184	TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s",
 185		  __entry->rw ? "write" : "read",
 186		  __entry->port, __entry->size, __entry->count, __entry->val,
 187		  __entry->count > 1 ? "(...)" : "")
 188);
 189
 190/*
 191 * Tracepoint for fast mmio.
 192 */
 193TRACE_EVENT(kvm_fast_mmio,
 194	TP_PROTO(u64 gpa),
 195	TP_ARGS(gpa),
 196
 197	TP_STRUCT__entry(
 198		__field(u64,	gpa)
 199	),
 200
 201	TP_fast_assign(
 202		__entry->gpa		= gpa;
 203	),
 204
 205	TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
 206);
 207
 208/*
 209 * Tracepoint for cpuid.
 210 */
 211TRACE_EVENT(kvm_cpuid,
 212	TP_PROTO(unsigned int function, unsigned int index, unsigned long rax,
 213		 unsigned long rbx, unsigned long rcx, unsigned long rdx,
 214		 bool found, bool used_max_basic),
 215	TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic),
 216
 217	TP_STRUCT__entry(
 218		__field(	unsigned int,	function	)
 219		__field(	unsigned int,	index		)
 220		__field(	unsigned long,	rax		)
 221		__field(	unsigned long,	rbx		)
 222		__field(	unsigned long,	rcx		)
 223		__field(	unsigned long,	rdx		)
 224		__field(	bool,		found		)
 225		__field(	bool,		used_max_basic	)
 226	),
 227
 228	TP_fast_assign(
 229		__entry->function	= function;
 230		__entry->index		= index;
 231		__entry->rax		= rax;
 232		__entry->rbx		= rbx;
 233		__entry->rcx		= rcx;
 234		__entry->rdx		= rdx;
 235		__entry->found		= found;
 236		__entry->used_max_basic	= used_max_basic;
 237	),
 238
 239	TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s",
 240		  __entry->function, __entry->index, __entry->rax,
 241		  __entry->rbx, __entry->rcx, __entry->rdx,
 242		  __entry->found ? "found" : "not found",
 243		  __entry->used_max_basic ? ", used max basic" : "")
 244);
 245
 246#define AREG(x) { APIC_##x, "APIC_" #x }
 247
 248#define kvm_trace_symbol_apic						    \
 249	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
 250	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
 251	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
 252	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
 253	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
 254	AREG(ECTRL)
 255/*
 256 * Tracepoint for apic access.
 257 */
 258TRACE_EVENT(kvm_apic,
 259	TP_PROTO(unsigned int rw, unsigned int reg, u64 val),
 260	TP_ARGS(rw, reg, val),
 261
 262	TP_STRUCT__entry(
 263		__field(	unsigned int,	rw		)
 264		__field(	unsigned int,	reg		)
 265		__field(	u64,		val		)
 266	),
 267
 268	TP_fast_assign(
 269		__entry->rw		= rw;
 270		__entry->reg		= reg;
 271		__entry->val		= val;
 272	),
 273
 274	TP_printk("apic_%s %s = 0x%llx",
 275		  __entry->rw ? "write" : "read",
 276		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
 277		  __entry->val)
 278);
 279
 280#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
 281#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
 282
 283#define KVM_ISA_VMX   1
 284#define KVM_ISA_SVM   2
 285
 286#define kvm_print_exit_reason(exit_reason, isa)				\
 287	(isa == KVM_ISA_VMX) ?						\
 288	__print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) :	\
 289	__print_symbolic(exit_reason, SVM_EXIT_REASONS),		\
 290	(isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "",	\
 291	(isa == KVM_ISA_VMX) ?						\
 292	__print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : ""
 293
 294#define TRACE_EVENT_KVM_EXIT(name)					     \
 295TRACE_EVENT(name,							     \
 296	TP_PROTO(struct kvm_vcpu *vcpu, u32 isa),			     \
 297	TP_ARGS(vcpu, isa),						     \
 298									     \
 299	TP_STRUCT__entry(						     \
 300		__field(	unsigned int,	exit_reason	)	     \
 301		__field(	unsigned long,	guest_rip	)	     \
 302		__field(	u32,	        isa             )	     \
 303		__field(	u64,	        info1           )	     \
 304		__field(	u64,	        info2           )	     \
 305		__field(	u32,	        intr_info	)	     \
 306		__field(	u32,	        error_code	)	     \
 307		__field(	unsigned int,	vcpu_id         )	     \
 308	),								     \
 309									     \
 310	TP_fast_assign(							     \
 311		__entry->guest_rip	= kvm_rip_read(vcpu);		     \
 312		__entry->isa            = isa;				     \
 313		__entry->vcpu_id        = vcpu->vcpu_id;		     \
 314		static_call(kvm_x86_get_exit_info)(vcpu,		     \
 315					  &__entry->exit_reason,	     \
 316					  &__entry->info1,		     \
 317					  &__entry->info2,		     \
 318					  &__entry->intr_info,		     \
 319					  &__entry->error_code);	     \
 320	),								     \
 321									     \
 322	TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx "	     \
 323		  "info2 0x%016llx intr_info 0x%08x error_code 0x%08x",	     \
 324		  __entry->vcpu_id,					     \
 325		  kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \
 326		  __entry->guest_rip, __entry->info1, __entry->info2,	     \
 327		  __entry->intr_info, __entry->error_code)		     \
 328)
 329
 330/*
 331 * Tracepoint for kvm guest exit:
 332 */
 333TRACE_EVENT_KVM_EXIT(kvm_exit);
 334
 335/*
 336 * Tracepoint for kvm interrupt injection:
 337 */
 338TRACE_EVENT(kvm_inj_virq,
 339	TP_PROTO(unsigned int vector, bool soft, bool reinjected),
 340	TP_ARGS(vector, soft, reinjected),
 341
 342	TP_STRUCT__entry(
 343		__field(	unsigned int,	vector		)
 344		__field(	bool,		soft		)
 345		__field(	bool,		reinjected	)
 346	),
 347
 348	TP_fast_assign(
 349		__entry->vector		= vector;
 350		__entry->soft		= soft;
 351		__entry->reinjected	= reinjected;
 352	),
 353
 354	TP_printk("%s 0x%x%s",
 355		  __entry->soft ? "Soft/INTn" : "IRQ", __entry->vector,
 356		  __entry->reinjected ? " [reinjected]" : "")
 357);
 358
 359#define EXS(x) { x##_VECTOR, "#" #x }
 360
 361#define kvm_trace_sym_exc						\
 362	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
 363	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
 364	EXS(MF), EXS(AC), EXS(MC)
 365
 366/*
 367 * Tracepoint for kvm interrupt injection:
 368 */
 369TRACE_EVENT(kvm_inj_exception,
 370	TP_PROTO(unsigned exception, bool has_error, unsigned error_code,
 371		 bool reinjected),
 372	TP_ARGS(exception, has_error, error_code, reinjected),
 373
 374	TP_STRUCT__entry(
 375		__field(	u8,	exception	)
 376		__field(	u8,	has_error	)
 377		__field(	u32,	error_code	)
 378		__field(	bool,	reinjected	)
 379	),
 380
 381	TP_fast_assign(
 382		__entry->exception	= exception;
 383		__entry->has_error	= has_error;
 384		__entry->error_code	= error_code;
 385		__entry->reinjected	= reinjected;
 386	),
 387
 388	TP_printk("%s%s%s%s%s",
 389		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
 390		  !__entry->has_error ? "" : " (",
 391		  !__entry->has_error ? "" : __print_symbolic(__entry->error_code, { }),
 392		  !__entry->has_error ? "" : ")",
 393		  __entry->reinjected ? " [reinjected]" : "")
 394);
 395
 396/*
 397 * Tracepoint for page fault.
 398 */
 399TRACE_EVENT(kvm_page_fault,
 400	TP_PROTO(struct kvm_vcpu *vcpu, u64 fault_address, u64 error_code),
 401	TP_ARGS(vcpu, fault_address, error_code),
 402
 403	TP_STRUCT__entry(
 404		__field(	unsigned int,	vcpu_id		)
 405		__field(	unsigned long,	guest_rip	)
 406		__field(	u64,		fault_address	)
 407		__field(	u64,		error_code	)
 408	),
 409
 410	TP_fast_assign(
 411		__entry->vcpu_id	= vcpu->vcpu_id;
 412		__entry->guest_rip	= kvm_rip_read(vcpu);
 413		__entry->fault_address	= fault_address;
 414		__entry->error_code	= error_code;
 415	),
 416
 417	TP_printk("vcpu %u rip 0x%lx address 0x%016llx error_code 0x%llx",
 418		  __entry->vcpu_id, __entry->guest_rip,
 419		  __entry->fault_address, __entry->error_code)
 420);
 421
 422/*
 423 * Tracepoint for guest MSR access.
 424 */
 425TRACE_EVENT(kvm_msr,
 426	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
 427	TP_ARGS(write, ecx, data, exception),
 428
 429	TP_STRUCT__entry(
 430		__field(	unsigned,	write		)
 431		__field(	u32,		ecx		)
 432		__field(	u64,		data		)
 433		__field(	u8,		exception	)
 434	),
 435
 436	TP_fast_assign(
 437		__entry->write		= write;
 438		__entry->ecx		= ecx;
 439		__entry->data		= data;
 440		__entry->exception	= exception;
 441	),
 442
 443	TP_printk("msr_%s %x = 0x%llx%s",
 444		  __entry->write ? "write" : "read",
 445		  __entry->ecx, __entry->data,
 446		  __entry->exception ? " (#GP)" : "")
 447);
 448
 449#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
 450#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
 451#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
 452#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
 453
 454/*
 455 * Tracepoint for guest CR access.
 456 */
 457TRACE_EVENT(kvm_cr,
 458	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
 459	TP_ARGS(rw, cr, val),
 460
 461	TP_STRUCT__entry(
 462		__field(	unsigned int,	rw		)
 463		__field(	unsigned int,	cr		)
 464		__field(	unsigned long,	val		)
 465	),
 466
 467	TP_fast_assign(
 468		__entry->rw		= rw;
 469		__entry->cr		= cr;
 470		__entry->val		= val;
 471	),
 472
 473	TP_printk("cr_%s %x = 0x%lx",
 474		  __entry->rw ? "write" : "read",
 475		  __entry->cr, __entry->val)
 476);
 477
 478#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
 479#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
 480
 481TRACE_EVENT(kvm_pic_set_irq,
 482	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
 483	    TP_ARGS(chip, pin, elcr, imr, coalesced),
 484
 485	TP_STRUCT__entry(
 486		__field(	__u8,		chip		)
 487		__field(	__u8,		pin		)
 488		__field(	__u8,		elcr		)
 489		__field(	__u8,		imr		)
 490		__field(	bool,		coalesced	)
 491	),
 492
 493	TP_fast_assign(
 494		__entry->chip		= chip;
 495		__entry->pin		= pin;
 496		__entry->elcr		= elcr;
 497		__entry->imr		= imr;
 498		__entry->coalesced	= coalesced;
 499	),
 500
 501	TP_printk("chip %u pin %u (%s%s)%s",
 502		  __entry->chip, __entry->pin,
 503		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
 504		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
 505		  __entry->coalesced ? " (coalesced)" : "")
 506);
 507
 508#define kvm_apic_dst_shorthand		\
 509	{0x0, "dst"},			\
 510	{0x1, "self"},			\
 511	{0x2, "all"},			\
 512	{0x3, "all-but-self"}
 513
 514TRACE_EVENT(kvm_apic_ipi,
 515	    TP_PROTO(__u32 icr_low, __u32 dest_id),
 516	    TP_ARGS(icr_low, dest_id),
 517
 518	TP_STRUCT__entry(
 519		__field(	__u32,		icr_low		)
 520		__field(	__u32,		dest_id		)
 521	),
 522
 523	TP_fast_assign(
 524		__entry->icr_low	= icr_low;
 525		__entry->dest_id	= dest_id;
 526	),
 527
 528	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
 529		  __entry->dest_id, (u8)__entry->icr_low,
 530		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
 531				   kvm_deliver_mode),
 532		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
 533		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
 534		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
 535		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
 536				   kvm_apic_dst_shorthand))
 537);
 538
 539TRACE_EVENT(kvm_apic_accept_irq,
 540	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
 541	    TP_ARGS(apicid, dm, tm, vec),
 542
 543	TP_STRUCT__entry(
 544		__field(	__u32,		apicid		)
 545		__field(	__u16,		dm		)
 546		__field(	__u16,		tm		)
 547		__field(	__u8,		vec		)
 548	),
 549
 550	TP_fast_assign(
 551		__entry->apicid		= apicid;
 552		__entry->dm		= dm;
 553		__entry->tm		= tm;
 554		__entry->vec		= vec;
 555	),
 556
 557	TP_printk("apicid %x vec %u (%s|%s)",
 558		  __entry->apicid, __entry->vec,
 559		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
 560		  __entry->tm ? "level" : "edge")
 561);
 562
 563TRACE_EVENT(kvm_eoi,
 564	    TP_PROTO(struct kvm_lapic *apic, int vector),
 565	    TP_ARGS(apic, vector),
 566
 567	TP_STRUCT__entry(
 568		__field(	__u32,		apicid		)
 569		__field(	int,		vector		)
 570	),
 571
 572	TP_fast_assign(
 573		__entry->apicid		= apic->vcpu->vcpu_id;
 574		__entry->vector		= vector;
 575	),
 576
 577	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
 578);
 579
 580TRACE_EVENT(kvm_pv_eoi,
 581	    TP_PROTO(struct kvm_lapic *apic, int vector),
 582	    TP_ARGS(apic, vector),
 583
 584	TP_STRUCT__entry(
 585		__field(	__u32,		apicid		)
 586		__field(	int,		vector		)
 587	),
 588
 589	TP_fast_assign(
 590		__entry->apicid		= apic->vcpu->vcpu_id;
 591		__entry->vector		= vector;
 592	),
 593
 594	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
 595);
 596
 597/*
 598 * Tracepoint for nested VMRUN
 599 */
 600TRACE_EVENT(kvm_nested_vmenter,
 601	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
 602		     __u32 event_inj, bool tdp_enabled, __u64 guest_tdp_pgd,
 603		     __u64 guest_cr3, __u32 isa),
 604	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled,
 605		    guest_tdp_pgd, guest_cr3, isa),
 606
 607	TP_STRUCT__entry(
 608		__field(	__u64,		rip		)
 609		__field(	__u64,		vmcb		)
 610		__field(	__u64,		nested_rip	)
 611		__field(	__u32,		int_ctl		)
 612		__field(	__u32,		event_inj	)
 613		__field(	bool,		tdp_enabled	)
 614		__field(	__u64,		guest_pgd	)
 615		__field(	__u32,		isa		)
 616	),
 617
 618	TP_fast_assign(
 619		__entry->rip		= rip;
 620		__entry->vmcb		= vmcb;
 621		__entry->nested_rip	= nested_rip;
 622		__entry->int_ctl	= int_ctl;
 623		__entry->event_inj	= event_inj;
 624		__entry->tdp_enabled	= tdp_enabled;
 625		__entry->guest_pgd	= tdp_enabled ? guest_tdp_pgd : guest_cr3;
 626		__entry->isa		= isa;
 627	),
 628
 629	TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx "
 630		  "int_ctl: 0x%08x event_inj: 0x%08x nested_%s=%s %s: 0x%016llx",
 631		  __entry->rip,
 632		  __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb",
 633		  __entry->vmcb,
 634		  __entry->nested_rip,
 635		  __entry->int_ctl,
 636		  __entry->event_inj,
 637		  __entry->isa == KVM_ISA_VMX ? "ept" : "npt",
 638		  __entry->tdp_enabled ? "y" : "n",
 639		  !__entry->tdp_enabled ? "guest_cr3" :
 640		  __entry->isa == KVM_ISA_VMX ? "nested_eptp" : "nested_cr3",
 641		  __entry->guest_pgd)
 642);
 643
 644TRACE_EVENT(kvm_nested_intercepts,
 645	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions,
 646		     __u32 intercept1, __u32 intercept2, __u32 intercept3),
 647	    TP_ARGS(cr_read, cr_write, exceptions, intercept1,
 648		    intercept2, intercept3),
 649
 650	TP_STRUCT__entry(
 651		__field(	__u16,		cr_read		)
 652		__field(	__u16,		cr_write	)
 653		__field(	__u32,		exceptions	)
 654		__field(	__u32,		intercept1	)
 655		__field(	__u32,		intercept2	)
 656		__field(	__u32,		intercept3	)
 657	),
 658
 659	TP_fast_assign(
 660		__entry->cr_read	= cr_read;
 661		__entry->cr_write	= cr_write;
 662		__entry->exceptions	= exceptions;
 663		__entry->intercept1	= intercept1;
 664		__entry->intercept2	= intercept2;
 665		__entry->intercept3	= intercept3;
 666	),
 667
 668	TP_printk("cr_read: %04x cr_write: %04x excp: %08x "
 669		  "intercepts: %08x %08x %08x",
 670		  __entry->cr_read, __entry->cr_write, __entry->exceptions,
 671		  __entry->intercept1, __entry->intercept2, __entry->intercept3)
 672);
 673/*
 674 * Tracepoint for #VMEXIT while nested
 675 */
 676TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit);
 677
 678/*
 679 * Tracepoint for #VMEXIT reinjected to the guest
 680 */
 681TRACE_EVENT(kvm_nested_vmexit_inject,
 682	    TP_PROTO(__u32 exit_code,
 683		     __u64 exit_info1, __u64 exit_info2,
 684		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
 685	    TP_ARGS(exit_code, exit_info1, exit_info2,
 686		    exit_int_info, exit_int_info_err, isa),
 687
 688	TP_STRUCT__entry(
 689		__field(	__u32,		exit_code		)
 690		__field(	__u64,		exit_info1		)
 691		__field(	__u64,		exit_info2		)
 692		__field(	__u32,		exit_int_info		)
 693		__field(	__u32,		exit_int_info_err	)
 694		__field(	__u32,		isa			)
 695	),
 696
 697	TP_fast_assign(
 698		__entry->exit_code		= exit_code;
 699		__entry->exit_info1		= exit_info1;
 700		__entry->exit_info2		= exit_info2;
 701		__entry->exit_int_info		= exit_int_info;
 702		__entry->exit_int_info_err	= exit_int_info_err;
 703		__entry->isa			= isa;
 704	),
 705
 706	TP_printk("reason: %s%s%s ext_inf1: 0x%016llx "
 707		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
 708		  kvm_print_exit_reason(__entry->exit_code, __entry->isa),
 709		  __entry->exit_info1, __entry->exit_info2,
 710		  __entry->exit_int_info, __entry->exit_int_info_err)
 711);
 712
 713/*
 714 * Tracepoint for nested #vmexit because of interrupt pending
 715 */
 716TRACE_EVENT(kvm_nested_intr_vmexit,
 717	    TP_PROTO(__u64 rip),
 718	    TP_ARGS(rip),
 719
 720	TP_STRUCT__entry(
 721		__field(	__u64,	rip	)
 722	),
 723
 724	TP_fast_assign(
 725		__entry->rip	=	rip
 726	),
 727
 728	TP_printk("rip: 0x%016llx", __entry->rip)
 729);
 730
 731/*
 732 * Tracepoint for nested #vmexit because of interrupt pending
 733 */
 734TRACE_EVENT(kvm_invlpga,
 735	    TP_PROTO(__u64 rip, int asid, u64 address),
 736	    TP_ARGS(rip, asid, address),
 737
 738	TP_STRUCT__entry(
 739		__field(	__u64,	rip	)
 740		__field(	int,	asid	)
 741		__field(	__u64,	address	)
 742	),
 743
 744	TP_fast_assign(
 745		__entry->rip		=	rip;
 746		__entry->asid		=	asid;
 747		__entry->address	=	address;
 748	),
 749
 750	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
 751		  __entry->rip, __entry->asid, __entry->address)
 752);
 753
 754/*
 755 * Tracepoint for nested #vmexit because of interrupt pending
 756 */
 757TRACE_EVENT(kvm_skinit,
 758	    TP_PROTO(__u64 rip, __u32 slb),
 759	    TP_ARGS(rip, slb),
 760
 761	TP_STRUCT__entry(
 762		__field(	__u64,	rip	)
 763		__field(	__u32,	slb	)
 764	),
 765
 766	TP_fast_assign(
 767		__entry->rip		=	rip;
 768		__entry->slb		=	slb;
 769	),
 770
 771	TP_printk("rip: 0x%016llx slb: 0x%08x",
 772		  __entry->rip, __entry->slb)
 773);
 774
 775#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
 776#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
 777#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
 778#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
 779
 780#define kvm_trace_symbol_emul_flags	                  \
 781	{ 0,   			    "real" },		  \
 782	{ KVM_EMUL_INSN_F_CR0_PE			  \
 783	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
 784	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
 785	{ KVM_EMUL_INSN_F_CR0_PE			  \
 786	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
 787	{ KVM_EMUL_INSN_F_CR0_PE			  \
 788	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
 789
 790#define kei_decode_mode(mode) ({			\
 791	u8 flags = 0xff;				\
 792	switch (mode) {					\
 793	case X86EMUL_MODE_REAL:				\
 794		flags = 0;				\
 795		break;					\
 796	case X86EMUL_MODE_VM86:				\
 797		flags = KVM_EMUL_INSN_F_EFL_VM;		\
 798		break;					\
 799	case X86EMUL_MODE_PROT16:			\
 800		flags = KVM_EMUL_INSN_F_CR0_PE;		\
 801		break;					\
 802	case X86EMUL_MODE_PROT32:			\
 803		flags = KVM_EMUL_INSN_F_CR0_PE		\
 804			| KVM_EMUL_INSN_F_CS_D;		\
 805		break;					\
 806	case X86EMUL_MODE_PROT64:			\
 807		flags = KVM_EMUL_INSN_F_CR0_PE		\
 808			| KVM_EMUL_INSN_F_CS_L;		\
 809		break;					\
 810	}						\
 811	flags;						\
 812	})
 813
 814TRACE_EVENT(kvm_emulate_insn,
 815	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
 816	TP_ARGS(vcpu, failed),
 817
 818	TP_STRUCT__entry(
 819		__field(    __u64, rip                       )
 820		__field(    __u32, csbase                    )
 821		__field(    __u8,  len                       )
 822		__array(    __u8,  insn,    15	             )
 823		__field(    __u8,  flags       	   	     )
 824		__field(    __u8,  failed                    )
 825		),
 826
 827	TP_fast_assign(
 828		__entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS);
 
 829		__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
 830			       - vcpu->arch.emulate_ctxt->fetch.data;
 831		__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
 832		memcpy(__entry->insn,
 833		       vcpu->arch.emulate_ctxt->fetch.data,
 834		       15);
 835		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode);
 836		__entry->failed = failed;
 837		),
 838
 839	TP_printk("%x:%llx:%s (%s)%s",
 840		  __entry->csbase, __entry->rip,
 841		  __print_hex(__entry->insn, __entry->len),
 842		  __print_symbolic(__entry->flags,
 843				   kvm_trace_symbol_emul_flags),
 844		  __entry->failed ? " failed" : ""
 845		)
 846	);
 847
 848#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
 849#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
 850
 851TRACE_EVENT(
 852	vcpu_match_mmio,
 853	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
 854	TP_ARGS(gva, gpa, write, gpa_match),
 855
 856	TP_STRUCT__entry(
 857		__field(gva_t, gva)
 858		__field(gpa_t, gpa)
 859		__field(bool, write)
 860		__field(bool, gpa_match)
 861		),
 862
 863	TP_fast_assign(
 864		__entry->gva = gva;
 865		__entry->gpa = gpa;
 866		__entry->write = write;
 867		__entry->gpa_match = gpa_match
 868		),
 869
 870	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
 871		  __entry->write ? "Write" : "Read",
 872		  __entry->gpa_match ? "GPA" : "GVA")
 873);
 874
 875TRACE_EVENT(kvm_write_tsc_offset,
 876	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
 877		 __u64 next_tsc_offset),
 878	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
 879
 880	TP_STRUCT__entry(
 881		__field( unsigned int,	vcpu_id				)
 882		__field(	__u64,	previous_tsc_offset		)
 883		__field(	__u64,	next_tsc_offset			)
 884	),
 885
 886	TP_fast_assign(
 887		__entry->vcpu_id		= vcpu_id;
 888		__entry->previous_tsc_offset	= previous_tsc_offset;
 889		__entry->next_tsc_offset	= next_tsc_offset;
 890	),
 891
 892	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
 893		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
 894);
 895
 896#ifdef CONFIG_X86_64
 897
 898#define host_clocks					\
 899	{VDSO_CLOCKMODE_NONE, "none"},			\
 900	{VDSO_CLOCKMODE_TSC,  "tsc"}			\
 901
 902TRACE_EVENT(kvm_update_master_clock,
 903	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
 904	TP_ARGS(use_master_clock, host_clock, offset_matched),
 905
 906	TP_STRUCT__entry(
 907		__field(		bool,	use_master_clock	)
 908		__field(	unsigned int,	host_clock		)
 909		__field(		bool,	offset_matched		)
 910	),
 911
 912	TP_fast_assign(
 913		__entry->use_master_clock	= use_master_clock;
 914		__entry->host_clock		= host_clock;
 915		__entry->offset_matched		= offset_matched;
 916	),
 917
 918	TP_printk("masterclock %d hostclock %s offsetmatched %u",
 919		  __entry->use_master_clock,
 920		  __print_symbolic(__entry->host_clock, host_clocks),
 921		  __entry->offset_matched)
 922);
 923
 924TRACE_EVENT(kvm_track_tsc,
 925	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
 926		 unsigned int online_vcpus, bool use_master_clock,
 927		 unsigned int host_clock),
 928	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
 929		host_clock),
 930
 931	TP_STRUCT__entry(
 932		__field(	unsigned int,	vcpu_id			)
 933		__field(	unsigned int,	nr_vcpus_matched_tsc	)
 934		__field(	unsigned int,	online_vcpus		)
 935		__field(	bool,		use_master_clock	)
 936		__field(	unsigned int,	host_clock		)
 937	),
 938
 939	TP_fast_assign(
 940		__entry->vcpu_id		= vcpu_id;
 941		__entry->nr_vcpus_matched_tsc	= nr_matched;
 942		__entry->online_vcpus		= online_vcpus;
 943		__entry->use_master_clock	= use_master_clock;
 944		__entry->host_clock		= host_clock;
 945	),
 946
 947	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
 948		  " hostclock %s",
 949		  __entry->vcpu_id, __entry->use_master_clock,
 950		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
 951		  __print_symbolic(__entry->host_clock, host_clocks))
 952);
 953
 954#endif /* CONFIG_X86_64 */
 955
 956/*
 957 * Tracepoint for PML full VMEXIT.
 958 */
 959TRACE_EVENT(kvm_pml_full,
 960	TP_PROTO(unsigned int vcpu_id),
 961	TP_ARGS(vcpu_id),
 962
 963	TP_STRUCT__entry(
 964		__field(	unsigned int,	vcpu_id			)
 965	),
 966
 967	TP_fast_assign(
 968		__entry->vcpu_id		= vcpu_id;
 969	),
 970
 971	TP_printk("vcpu %d: PML full", __entry->vcpu_id)
 972);
 973
 974TRACE_EVENT(kvm_ple_window_update,
 975	TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old),
 976	TP_ARGS(vcpu_id, new, old),
 977
 978	TP_STRUCT__entry(
 979		__field(        unsigned int,   vcpu_id         )
 980		__field(        unsigned int,       new         )
 981		__field(        unsigned int,       old         )
 982	),
 983
 984	TP_fast_assign(
 985		__entry->vcpu_id        = vcpu_id;
 986		__entry->new            = new;
 987		__entry->old            = old;
 988	),
 989
 990	TP_printk("vcpu %u old %u new %u (%s)",
 991	          __entry->vcpu_id, __entry->old, __entry->new,
 992		  __entry->old < __entry->new ? "growed" : "shrinked")
 993);
 994
 995TRACE_EVENT(kvm_pvclock_update,
 996	TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
 997	TP_ARGS(vcpu_id, pvclock),
 998
 999	TP_STRUCT__entry(
1000		__field(	unsigned int,	vcpu_id			)
1001		__field(	__u32,		version			)
1002		__field(	__u64,		tsc_timestamp		)
1003		__field(	__u64,		system_time		)
1004		__field(	__u32,		tsc_to_system_mul	)
1005		__field(	__s8,		tsc_shift		)
1006		__field(	__u8,		flags			)
1007	),
1008
1009	TP_fast_assign(
1010		__entry->vcpu_id	   = vcpu_id;
1011		__entry->version	   = pvclock->version;
1012		__entry->tsc_timestamp	   = pvclock->tsc_timestamp;
1013		__entry->system_time	   = pvclock->system_time;
1014		__entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
1015		__entry->tsc_shift	   = pvclock->tsc_shift;
1016		__entry->flags		   = pvclock->flags;
1017	),
1018
1019	TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
1020		  "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
1021		  "flags 0x%x }",
1022		  __entry->vcpu_id,
1023		  __entry->version,
1024		  __entry->tsc_timestamp,
1025		  __entry->system_time,
1026		  __entry->tsc_to_system_mul,
1027		  __entry->tsc_shift,
1028		  __entry->flags)
1029);
1030
1031TRACE_EVENT(kvm_wait_lapic_expire,
1032	TP_PROTO(unsigned int vcpu_id, s64 delta),
1033	TP_ARGS(vcpu_id, delta),
1034
1035	TP_STRUCT__entry(
1036		__field(	unsigned int,	vcpu_id		)
1037		__field(	s64,		delta		)
1038	),
1039
1040	TP_fast_assign(
1041		__entry->vcpu_id	   = vcpu_id;
1042		__entry->delta             = delta;
1043	),
1044
1045	TP_printk("vcpu %u: delta %lld (%s)",
1046		  __entry->vcpu_id,
1047		  __entry->delta,
1048		  __entry->delta < 0 ? "early" : "late")
1049);
1050
1051TRACE_EVENT(kvm_smm_transition,
1052	TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
1053	TP_ARGS(vcpu_id, smbase, entering),
1054
1055	TP_STRUCT__entry(
1056		__field(	unsigned int,	vcpu_id		)
1057		__field(	u64,		smbase		)
1058		__field(	bool,		entering	)
1059	),
1060
1061	TP_fast_assign(
1062		__entry->vcpu_id	= vcpu_id;
1063		__entry->smbase		= smbase;
1064		__entry->entering	= entering;
1065	),
1066
1067	TP_printk("vcpu %u: %s SMM, smbase 0x%llx",
1068		  __entry->vcpu_id,
1069		  __entry->entering ? "entering" : "leaving",
1070		  __entry->smbase)
1071);
1072
1073/*
1074 * Tracepoint for VT-d posted-interrupts.
1075 */
1076TRACE_EVENT(kvm_pi_irte_update,
1077	TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
1078		 unsigned int gsi, unsigned int gvec,
1079		 u64 pi_desc_addr, bool set),
1080	TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
1081
1082	TP_STRUCT__entry(
1083		__field(	unsigned int,	host_irq	)
1084		__field(	unsigned int,	vcpu_id		)
1085		__field(	unsigned int,	gsi		)
1086		__field(	unsigned int,	gvec		)
1087		__field(	u64,		pi_desc_addr	)
1088		__field(	bool,		set		)
1089	),
1090
1091	TP_fast_assign(
1092		__entry->host_irq	= host_irq;
1093		__entry->vcpu_id	= vcpu_id;
1094		__entry->gsi		= gsi;
1095		__entry->gvec		= gvec;
1096		__entry->pi_desc_addr	= pi_desc_addr;
1097		__entry->set		= set;
1098	),
1099
1100	TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
1101		  "gvec: 0x%x, pi_desc_addr: 0x%llx",
1102		  __entry->set ? "enabled and being updated" : "disabled",
1103		  __entry->host_irq,
1104		  __entry->vcpu_id,
1105		  __entry->gsi,
1106		  __entry->gvec,
1107		  __entry->pi_desc_addr)
1108);
1109
1110/*
1111 * Tracepoint for kvm_hv_notify_acked_sint.
1112 */
1113TRACE_EVENT(kvm_hv_notify_acked_sint,
1114	TP_PROTO(int vcpu_id, u32 sint),
1115	TP_ARGS(vcpu_id, sint),
1116
1117	TP_STRUCT__entry(
1118		__field(int, vcpu_id)
1119		__field(u32, sint)
1120	),
1121
1122	TP_fast_assign(
1123		__entry->vcpu_id = vcpu_id;
1124		__entry->sint = sint;
1125	),
1126
1127	TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint)
1128);
1129
1130/*
1131 * Tracepoint for synic_set_irq.
1132 */
1133TRACE_EVENT(kvm_hv_synic_set_irq,
1134	TP_PROTO(int vcpu_id, u32 sint, int vector, int ret),
1135	TP_ARGS(vcpu_id, sint, vector, ret),
1136
1137	TP_STRUCT__entry(
1138		__field(int, vcpu_id)
1139		__field(u32, sint)
1140		__field(int, vector)
1141		__field(int, ret)
1142	),
1143
1144	TP_fast_assign(
1145		__entry->vcpu_id = vcpu_id;
1146		__entry->sint = sint;
1147		__entry->vector = vector;
1148		__entry->ret = ret;
1149	),
1150
1151	TP_printk("vcpu_id %d sint %u vector %d ret %d",
1152		  __entry->vcpu_id, __entry->sint, __entry->vector,
1153		  __entry->ret)
1154);
1155
1156/*
1157 * Tracepoint for kvm_hv_synic_send_eoi.
1158 */
1159TRACE_EVENT(kvm_hv_synic_send_eoi,
1160	TP_PROTO(int vcpu_id, int vector),
1161	TP_ARGS(vcpu_id, vector),
1162
1163	TP_STRUCT__entry(
1164		__field(int, vcpu_id)
1165		__field(u32, sint)
1166		__field(int, vector)
1167		__field(int, ret)
1168	),
1169
1170	TP_fast_assign(
1171		__entry->vcpu_id = vcpu_id;
1172		__entry->vector	= vector;
1173	),
1174
1175	TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector)
1176);
1177
1178/*
1179 * Tracepoint for synic_set_msr.
1180 */
1181TRACE_EVENT(kvm_hv_synic_set_msr,
1182	TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host),
1183	TP_ARGS(vcpu_id, msr, data, host),
1184
1185	TP_STRUCT__entry(
1186		__field(int, vcpu_id)
1187		__field(u32, msr)
1188		__field(u64, data)
1189		__field(bool, host)
1190	),
1191
1192	TP_fast_assign(
1193		__entry->vcpu_id = vcpu_id;
1194		__entry->msr = msr;
1195		__entry->data = data;
1196		__entry->host = host
1197	),
1198
1199	TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d",
1200		  __entry->vcpu_id, __entry->msr, __entry->data, __entry->host)
1201);
1202
1203/*
1204 * Tracepoint for stimer_set_config.
1205 */
1206TRACE_EVENT(kvm_hv_stimer_set_config,
1207	TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host),
1208	TP_ARGS(vcpu_id, timer_index, config, host),
1209
1210	TP_STRUCT__entry(
1211		__field(int, vcpu_id)
1212		__field(int, timer_index)
1213		__field(u64, config)
1214		__field(bool, host)
1215	),
1216
1217	TP_fast_assign(
1218		__entry->vcpu_id = vcpu_id;
1219		__entry->timer_index = timer_index;
1220		__entry->config = config;
1221		__entry->host = host;
1222	),
1223
1224	TP_printk("vcpu_id %d timer %d config 0x%llx host %d",
1225		  __entry->vcpu_id, __entry->timer_index, __entry->config,
1226		  __entry->host)
1227);
1228
1229/*
1230 * Tracepoint for stimer_set_count.
1231 */
1232TRACE_EVENT(kvm_hv_stimer_set_count,
1233	TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host),
1234	TP_ARGS(vcpu_id, timer_index, count, host),
1235
1236	TP_STRUCT__entry(
1237		__field(int, vcpu_id)
1238		__field(int, timer_index)
1239		__field(u64, count)
1240		__field(bool, host)
1241	),
1242
1243	TP_fast_assign(
1244		__entry->vcpu_id = vcpu_id;
1245		__entry->timer_index = timer_index;
1246		__entry->count = count;
1247		__entry->host = host;
1248	),
1249
1250	TP_printk("vcpu_id %d timer %d count %llu host %d",
1251		  __entry->vcpu_id, __entry->timer_index, __entry->count,
1252		  __entry->host)
1253);
1254
1255/*
1256 * Tracepoint for stimer_start(periodic timer case).
1257 */
1258TRACE_EVENT(kvm_hv_stimer_start_periodic,
1259	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time),
1260	TP_ARGS(vcpu_id, timer_index, time_now, exp_time),
1261
1262	TP_STRUCT__entry(
1263		__field(int, vcpu_id)
1264		__field(int, timer_index)
1265		__field(u64, time_now)
1266		__field(u64, exp_time)
1267	),
1268
1269	TP_fast_assign(
1270		__entry->vcpu_id = vcpu_id;
1271		__entry->timer_index = timer_index;
1272		__entry->time_now = time_now;
1273		__entry->exp_time = exp_time;
1274	),
1275
1276	TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu",
1277		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1278		  __entry->exp_time)
1279);
1280
1281/*
1282 * Tracepoint for stimer_start(one-shot timer case).
1283 */
1284TRACE_EVENT(kvm_hv_stimer_start_one_shot,
1285	TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count),
1286	TP_ARGS(vcpu_id, timer_index, time_now, count),
1287
1288	TP_STRUCT__entry(
1289		__field(int, vcpu_id)
1290		__field(int, timer_index)
1291		__field(u64, time_now)
1292		__field(u64, count)
1293	),
1294
1295	TP_fast_assign(
1296		__entry->vcpu_id = vcpu_id;
1297		__entry->timer_index = timer_index;
1298		__entry->time_now = time_now;
1299		__entry->count = count;
1300	),
1301
1302	TP_printk("vcpu_id %d timer %d time_now %llu count %llu",
1303		  __entry->vcpu_id, __entry->timer_index, __entry->time_now,
1304		  __entry->count)
1305);
1306
1307/*
1308 * Tracepoint for stimer_timer_callback.
1309 */
1310TRACE_EVENT(kvm_hv_stimer_callback,
1311	TP_PROTO(int vcpu_id, int timer_index),
1312	TP_ARGS(vcpu_id, timer_index),
1313
1314	TP_STRUCT__entry(
1315		__field(int, vcpu_id)
1316		__field(int, timer_index)
1317	),
1318
1319	TP_fast_assign(
1320		__entry->vcpu_id = vcpu_id;
1321		__entry->timer_index = timer_index;
1322	),
1323
1324	TP_printk("vcpu_id %d timer %d",
1325		  __entry->vcpu_id, __entry->timer_index)
1326);
1327
1328/*
1329 * Tracepoint for stimer_expiration.
1330 */
1331TRACE_EVENT(kvm_hv_stimer_expiration,
1332	TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result),
1333	TP_ARGS(vcpu_id, timer_index, direct, msg_send_result),
1334
1335	TP_STRUCT__entry(
1336		__field(int, vcpu_id)
1337		__field(int, timer_index)
1338		__field(int, direct)
1339		__field(int, msg_send_result)
1340	),
1341
1342	TP_fast_assign(
1343		__entry->vcpu_id = vcpu_id;
1344		__entry->timer_index = timer_index;
1345		__entry->direct = direct;
1346		__entry->msg_send_result = msg_send_result;
1347	),
1348
1349	TP_printk("vcpu_id %d timer %d direct %d send result %d",
1350		  __entry->vcpu_id, __entry->timer_index,
1351		  __entry->direct, __entry->msg_send_result)
1352);
1353
1354/*
1355 * Tracepoint for stimer_cleanup.
1356 */
1357TRACE_EVENT(kvm_hv_stimer_cleanup,
1358	TP_PROTO(int vcpu_id, int timer_index),
1359	TP_ARGS(vcpu_id, timer_index),
1360
1361	TP_STRUCT__entry(
1362		__field(int, vcpu_id)
1363		__field(int, timer_index)
1364	),
1365
1366	TP_fast_assign(
1367		__entry->vcpu_id = vcpu_id;
1368		__entry->timer_index = timer_index;
1369	),
1370
1371	TP_printk("vcpu_id %d timer %d",
1372		  __entry->vcpu_id, __entry->timer_index)
1373);
1374
 
 
 
 
1375TRACE_EVENT(kvm_apicv_inhibit_changed,
1376	    TP_PROTO(int reason, bool set, unsigned long inhibits),
1377	    TP_ARGS(reason, set, inhibits),
1378
1379	TP_STRUCT__entry(
1380		__field(int, reason)
1381		__field(bool, set)
1382		__field(unsigned long, inhibits)
1383	),
1384
1385	TP_fast_assign(
1386		__entry->reason = reason;
1387		__entry->set = set;
1388		__entry->inhibits = inhibits;
1389	),
1390
1391	TP_printk("%s reason=%u, inhibits=0x%lx",
1392		  __entry->set ? "set" : "cleared",
1393		  __entry->reason, __entry->inhibits)
 
1394);
1395
1396TRACE_EVENT(kvm_apicv_accept_irq,
1397	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
1398	    TP_ARGS(apicid, dm, tm, vec),
1399
1400	TP_STRUCT__entry(
1401		__field(	__u32,		apicid		)
1402		__field(	__u16,		dm		)
1403		__field(	__u16,		tm		)
1404		__field(	__u8,		vec		)
1405	),
1406
1407	TP_fast_assign(
1408		__entry->apicid		= apicid;
1409		__entry->dm		= dm;
1410		__entry->tm		= tm;
1411		__entry->vec		= vec;
1412	),
1413
1414	TP_printk("apicid %x vec %u (%s|%s)",
1415		  __entry->apicid, __entry->vec,
1416		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
1417		  __entry->tm ? "level" : "edge")
1418);
1419
1420/*
1421 * Tracepoint for AMD AVIC
1422 */
1423TRACE_EVENT(kvm_avic_incomplete_ipi,
1424	    TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
1425	    TP_ARGS(vcpu, icrh, icrl, id, index),
1426
1427	TP_STRUCT__entry(
1428		__field(u32, vcpu)
1429		__field(u32, icrh)
1430		__field(u32, icrl)
1431		__field(u32, id)
1432		__field(u32, index)
1433	),
1434
1435	TP_fast_assign(
1436		__entry->vcpu = vcpu;
1437		__entry->icrh = icrh;
1438		__entry->icrl = icrl;
1439		__entry->id = id;
1440		__entry->index = index;
1441	),
1442
1443	TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u",
1444		  __entry->vcpu, __entry->icrh, __entry->icrl,
1445		  __entry->id, __entry->index)
1446);
1447
1448TRACE_EVENT(kvm_avic_unaccelerated_access,
1449	    TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
1450	    TP_ARGS(vcpu, offset, ft, rw, vec),
1451
1452	TP_STRUCT__entry(
1453		__field(u32, vcpu)
1454		__field(u32, offset)
1455		__field(bool, ft)
1456		__field(bool, rw)
1457		__field(u32, vec)
1458	),
1459
1460	TP_fast_assign(
1461		__entry->vcpu = vcpu;
1462		__entry->offset = offset;
1463		__entry->ft = ft;
1464		__entry->rw = rw;
1465		__entry->vec = vec;
1466	),
1467
1468	TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x",
1469		  __entry->vcpu,
1470		  __entry->offset,
1471		  __print_symbolic(__entry->offset, kvm_trace_symbol_apic),
1472		  __entry->ft ? "trap" : "fault",
1473		  __entry->rw ? "write" : "read",
1474		  __entry->vec)
1475);
1476
1477TRACE_EVENT(kvm_avic_ga_log,
1478	    TP_PROTO(u32 vmid, u32 vcpuid),
1479	    TP_ARGS(vmid, vcpuid),
1480
1481	TP_STRUCT__entry(
1482		__field(u32, vmid)
1483		__field(u32, vcpuid)
1484	),
1485
1486	TP_fast_assign(
1487		__entry->vmid = vmid;
1488		__entry->vcpuid = vcpuid;
1489	),
1490
1491	TP_printk("vmid=%u, vcpuid=%u",
1492		  __entry->vmid, __entry->vcpuid)
1493);
1494
1495TRACE_EVENT(kvm_avic_kick_vcpu_slowpath,
1496	    TP_PROTO(u32 icrh, u32 icrl, u32 index),
1497	    TP_ARGS(icrh, icrl, index),
1498
1499	TP_STRUCT__entry(
1500		__field(u32, icrh)
1501		__field(u32, icrl)
1502		__field(u32, index)
1503	),
1504
1505	TP_fast_assign(
1506		__entry->icrh = icrh;
1507		__entry->icrl = icrl;
1508		__entry->index = index;
1509	),
1510
1511	TP_printk("icrh:icrl=%#08x:%08x, index=%u",
1512		  __entry->icrh, __entry->icrl, __entry->index)
1513);
1514
1515TRACE_EVENT(kvm_avic_doorbell,
1516	    TP_PROTO(u32 vcpuid, u32 apicid),
1517	    TP_ARGS(vcpuid, apicid),
1518
1519	TP_STRUCT__entry(
1520		__field(u32, vcpuid)
1521		__field(u32, apicid)
1522	),
1523
1524	TP_fast_assign(
1525		__entry->vcpuid = vcpuid;
1526		__entry->apicid = apicid;
1527	),
1528
1529	TP_printk("vcpuid=%u, apicid=%u",
1530		  __entry->vcpuid, __entry->apicid)
1531);
1532
1533TRACE_EVENT(kvm_hv_timer_state,
1534		TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
1535		TP_ARGS(vcpu_id, hv_timer_in_use),
1536		TP_STRUCT__entry(
1537			__field(unsigned int, vcpu_id)
1538			__field(unsigned int, hv_timer_in_use)
1539			),
1540		TP_fast_assign(
1541			__entry->vcpu_id = vcpu_id;
1542			__entry->hv_timer_in_use = hv_timer_in_use;
1543			),
1544		TP_printk("vcpu_id %x hv_timer %x",
1545			__entry->vcpu_id,
1546			__entry->hv_timer_in_use)
1547);
1548
1549/*
1550 * Tracepoint for kvm_hv_flush_tlb.
1551 */
1552TRACE_EVENT(kvm_hv_flush_tlb,
1553	TP_PROTO(u64 processor_mask, u64 address_space, u64 flags, bool guest_mode),
1554	TP_ARGS(processor_mask, address_space, flags, guest_mode),
1555
1556	TP_STRUCT__entry(
1557		__field(u64, processor_mask)
1558		__field(u64, address_space)
1559		__field(u64, flags)
1560		__field(bool, guest_mode)
1561	),
1562
1563	TP_fast_assign(
1564		__entry->processor_mask = processor_mask;
1565		__entry->address_space = address_space;
1566		__entry->flags = flags;
1567		__entry->guest_mode = guest_mode;
1568	),
1569
1570	TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx %s",
1571		  __entry->processor_mask, __entry->address_space,
1572		  __entry->flags, __entry->guest_mode ? "(L2)" : "")
1573);
1574
1575/*
1576 * Tracepoint for kvm_hv_flush_tlb_ex.
1577 */
1578TRACE_EVENT(kvm_hv_flush_tlb_ex,
1579	TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags, bool guest_mode),
1580	TP_ARGS(valid_bank_mask, format, address_space, flags, guest_mode),
1581
1582	TP_STRUCT__entry(
1583		__field(u64, valid_bank_mask)
1584		__field(u64, format)
1585		__field(u64, address_space)
1586		__field(u64, flags)
1587		__field(bool, guest_mode)
1588	),
1589
1590	TP_fast_assign(
1591		__entry->valid_bank_mask = valid_bank_mask;
1592		__entry->format = format;
1593		__entry->address_space = address_space;
1594		__entry->flags = flags;
1595		__entry->guest_mode = guest_mode;
1596	),
1597
1598	TP_printk("valid_bank_mask 0x%llx format 0x%llx "
1599		  "address_space 0x%llx flags 0x%llx %s",
1600		  __entry->valid_bank_mask, __entry->format,
1601		  __entry->address_space, __entry->flags,
1602		  __entry->guest_mode ? "(L2)" : "")
1603);
1604
1605/*
1606 * Tracepoints for kvm_hv_send_ipi.
1607 */
1608TRACE_EVENT(kvm_hv_send_ipi,
1609	TP_PROTO(u32 vector, u64 processor_mask),
1610	TP_ARGS(vector, processor_mask),
1611
1612	TP_STRUCT__entry(
1613		__field(u32, vector)
1614		__field(u64, processor_mask)
1615	),
1616
1617	TP_fast_assign(
1618		__entry->vector = vector;
1619		__entry->processor_mask = processor_mask;
1620	),
1621
1622	TP_printk("vector %x processor_mask 0x%llx",
1623		  __entry->vector, __entry->processor_mask)
1624);
1625
1626TRACE_EVENT(kvm_hv_send_ipi_ex,
1627	TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
1628	TP_ARGS(vector, format, valid_bank_mask),
1629
1630	TP_STRUCT__entry(
1631		__field(u32, vector)
1632		__field(u64, format)
1633		__field(u64, valid_bank_mask)
1634	),
1635
1636	TP_fast_assign(
1637		__entry->vector = vector;
1638		__entry->format = format;
1639		__entry->valid_bank_mask = valid_bank_mask;
1640	),
1641
1642	TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
1643		  __entry->vector, __entry->format,
1644		  __entry->valid_bank_mask)
1645);
1646
1647TRACE_EVENT(kvm_pv_tlb_flush,
1648	TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb),
1649	TP_ARGS(vcpu_id, need_flush_tlb),
1650
1651	TP_STRUCT__entry(
1652		__field(	unsigned int,	vcpu_id		)
1653		__field(	bool,	need_flush_tlb		)
1654	),
1655
1656	TP_fast_assign(
1657		__entry->vcpu_id	= vcpu_id;
1658		__entry->need_flush_tlb = need_flush_tlb;
1659	),
1660
1661	TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id,
1662		__entry->need_flush_tlb ? "true" : "false")
1663);
1664
1665/*
1666 * Tracepoint for failed nested VMX VM-Enter.
1667 */
1668TRACE_EVENT(kvm_nested_vmenter_failed,
1669	TP_PROTO(const char *msg, u32 err),
1670	TP_ARGS(msg, err),
1671
1672	TP_STRUCT__entry(
1673		__string(msg, msg)
1674		__field(u32, err)
1675	),
1676
1677	TP_fast_assign(
1678		__assign_str(msg, msg);
1679		__entry->err = err;
1680	),
1681
1682	TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
1683		__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
1684);
1685
1686/*
1687 * Tracepoint for syndbg_set_msr.
1688 */
1689TRACE_EVENT(kvm_hv_syndbg_set_msr,
1690	TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
1691	TP_ARGS(vcpu_id, vp_index, msr, data),
1692
1693	TP_STRUCT__entry(
1694		__field(int, vcpu_id)
1695		__field(u32, vp_index)
1696		__field(u32, msr)
1697		__field(u64, data)
1698	),
1699
1700	TP_fast_assign(
1701		__entry->vcpu_id = vcpu_id;
1702		__entry->vp_index = vp_index;
1703		__entry->msr = msr;
1704		__entry->data = data;
1705	),
1706
1707	TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
1708		  __entry->vcpu_id, __entry->vp_index, __entry->msr,
1709		  __entry->data)
1710);
1711
1712/*
1713 * Tracepoint for syndbg_get_msr.
1714 */
1715TRACE_EVENT(kvm_hv_syndbg_get_msr,
1716	TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
1717	TP_ARGS(vcpu_id, vp_index, msr, data),
1718
1719	TP_STRUCT__entry(
1720		__field(int, vcpu_id)
1721		__field(u32, vp_index)
1722		__field(u32, msr)
1723		__field(u64, data)
1724	),
1725
1726	TP_fast_assign(
1727		__entry->vcpu_id = vcpu_id;
1728		__entry->vp_index = vp_index;
1729		__entry->msr = msr;
1730		__entry->data = data;
1731	),
1732
1733	TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
1734		  __entry->vcpu_id, __entry->vp_index, __entry->msr,
1735		  __entry->data)
1736);
1737
1738/*
1739 * Tracepoint for the start of VMGEXIT processing
1740 */
1741TRACE_EVENT(kvm_vmgexit_enter,
1742	TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
1743	TP_ARGS(vcpu_id, ghcb),
1744
1745	TP_STRUCT__entry(
1746		__field(unsigned int, vcpu_id)
1747		__field(u64, exit_reason)
1748		__field(u64, info1)
1749		__field(u64, info2)
1750	),
1751
1752	TP_fast_assign(
1753		__entry->vcpu_id     = vcpu_id;
1754		__entry->exit_reason = ghcb->save.sw_exit_code;
1755		__entry->info1       = ghcb->save.sw_exit_info_1;
1756		__entry->info2       = ghcb->save.sw_exit_info_2;
1757	),
1758
1759	TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
1760		  __entry->vcpu_id, __entry->exit_reason,
1761		  __entry->info1, __entry->info2)
1762);
1763
1764/*
1765 * Tracepoint for the end of VMGEXIT processing
1766 */
1767TRACE_EVENT(kvm_vmgexit_exit,
1768	TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
1769	TP_ARGS(vcpu_id, ghcb),
1770
1771	TP_STRUCT__entry(
1772		__field(unsigned int, vcpu_id)
1773		__field(u64, exit_reason)
1774		__field(u64, info1)
1775		__field(u64, info2)
1776	),
1777
1778	TP_fast_assign(
1779		__entry->vcpu_id     = vcpu_id;
1780		__entry->exit_reason = ghcb->save.sw_exit_code;
1781		__entry->info1       = ghcb->save.sw_exit_info_1;
1782		__entry->info2       = ghcb->save.sw_exit_info_2;
1783	),
1784
1785	TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
1786		  __entry->vcpu_id, __entry->exit_reason,
1787		  __entry->info1, __entry->info2)
1788);
1789
1790/*
1791 * Tracepoint for the start of VMGEXIT MSR procotol processing
1792 */
1793TRACE_EVENT(kvm_vmgexit_msr_protocol_enter,
1794	TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa),
1795	TP_ARGS(vcpu_id, ghcb_gpa),
1796
1797	TP_STRUCT__entry(
1798		__field(unsigned int, vcpu_id)
1799		__field(u64, ghcb_gpa)
1800	),
1801
1802	TP_fast_assign(
1803		__entry->vcpu_id  = vcpu_id;
1804		__entry->ghcb_gpa = ghcb_gpa;
1805	),
1806
1807	TP_printk("vcpu %u, ghcb_gpa %016llx",
1808		  __entry->vcpu_id, __entry->ghcb_gpa)
1809);
1810
1811/*
1812 * Tracepoint for the end of VMGEXIT MSR procotol processing
1813 */
1814TRACE_EVENT(kvm_vmgexit_msr_protocol_exit,
1815	TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result),
1816	TP_ARGS(vcpu_id, ghcb_gpa, result),
1817
1818	TP_STRUCT__entry(
1819		__field(unsigned int, vcpu_id)
1820		__field(u64, ghcb_gpa)
1821		__field(int, result)
1822	),
1823
1824	TP_fast_assign(
1825		__entry->vcpu_id  = vcpu_id;
1826		__entry->ghcb_gpa = ghcb_gpa;
1827		__entry->result   = result;
1828	),
1829
1830	TP_printk("vcpu %u, ghcb_gpa %016llx, result %d",
1831		  __entry->vcpu_id, __entry->ghcb_gpa, __entry->result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1832);
1833
1834#endif /* _TRACE_KVM_H */
1835
1836#undef TRACE_INCLUDE_PATH
1837#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
1838#undef TRACE_INCLUDE_FILE
1839#define TRACE_INCLUDE_FILE trace
1840
1841/* This part must be outside protection */
1842#include <trace/define_trace.h>