Linux Audio

Check our new training course

Loading...
v3.1
  1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
  2#define _TRACE_KVM_MAIN_H
  3
  4#include <linux/tracepoint.h>
  5
  6#undef TRACE_SYSTEM
  7#define TRACE_SYSTEM kvm
  8
  9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
 10
 11#define kvm_trace_exit_reason						\
 12	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
 13	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
 14	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
 15	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
 16	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
 
 17
 18TRACE_EVENT(kvm_userspace_exit,
 19	    TP_PROTO(__u32 reason, int errno),
 20	    TP_ARGS(reason, errno),
 21
 22	TP_STRUCT__entry(
 23		__field(	__u32,		reason		)
 24		__field(	int,		errno		)
 25	),
 26
 27	TP_fast_assign(
 28		__entry->reason		= reason;
 29		__entry->errno		= errno;
 30	),
 31
 32	TP_printk("reason %s (%d)",
 33		  __entry->errno < 0 ?
 34		  (__entry->errno == -EINTR ? "restart" : "error") :
 35		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
 36		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
 37);
 38
 39#if defined(__KVM_HAVE_IOAPIC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40TRACE_EVENT(kvm_set_irq,
 41	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
 42	TP_ARGS(gsi, level, irq_source_id),
 43
 44	TP_STRUCT__entry(
 45		__field(	unsigned int,	gsi		)
 46		__field(	int,		level		)
 47		__field(	int,		irq_source_id	)
 48	),
 49
 50	TP_fast_assign(
 51		__entry->gsi		= gsi;
 52		__entry->level		= level;
 53		__entry->irq_source_id	= irq_source_id;
 54	),
 55
 56	TP_printk("gsi %u level %d source %d",
 57		  __entry->gsi, __entry->level, __entry->irq_source_id)
 58);
 
 59
 
 60#define kvm_deliver_mode		\
 61	{0x0, "Fixed"},			\
 62	{0x1, "LowPrio"},		\
 63	{0x2, "SMI"},			\
 64	{0x3, "Res3"},			\
 65	{0x4, "NMI"},			\
 66	{0x5, "INIT"},			\
 67	{0x6, "SIPI"},			\
 68	{0x7, "ExtINT"}
 69
 70TRACE_EVENT(kvm_ioapic_set_irq,
 71	    TP_PROTO(__u64 e, int pin, bool coalesced),
 72	    TP_ARGS(e, pin, coalesced),
 73
 74	TP_STRUCT__entry(
 75		__field(	__u64,		e		)
 76		__field(	int,		pin		)
 77		__field(	bool,		coalesced	)
 78	),
 79
 80	TP_fast_assign(
 81		__entry->e		= e;
 82		__entry->pin		= pin;
 83		__entry->coalesced	= coalesced;
 84	),
 85
 86	TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
 87		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
 88		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
 89		  (__entry->e & (1<<11)) ? "logical" : "physical",
 90		  (__entry->e & (1<<15)) ? "level" : "edge",
 91		  (__entry->e & (1<<16)) ? "|masked" : "",
 92		  __entry->coalesced ? " (coalesced)" : "")
 93);
 94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95TRACE_EVENT(kvm_msi_set_irq,
 96	    TP_PROTO(__u64 address, __u64 data),
 97	    TP_ARGS(address, data),
 98
 99	TP_STRUCT__entry(
100		__field(	__u64,		address		)
101		__field(	__u64,		data		)
102	),
103
104	TP_fast_assign(
105		__entry->address	= address;
106		__entry->data		= data;
107	),
108
109	TP_printk("dst %u vec %x (%s|%s|%s%s)",
110		  (u8)(__entry->address >> 12), (u8)__entry->data,
111		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
112		  (__entry->address & (1<<2)) ? "logical" : "physical",
113		  (__entry->data & (1<<15)) ? "level" : "edge",
114		  (__entry->address & (1<<3)) ? "|rh" : "")
115);
116
117#define kvm_irqchips						\
118	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
119	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
120	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
121
 
 
 
 
 
 
 
 
 
 
 
 
122TRACE_EVENT(kvm_ack_irq,
123	TP_PROTO(unsigned int irqchip, unsigned int pin),
124	TP_ARGS(irqchip, pin),
125
126	TP_STRUCT__entry(
127		__field(	unsigned int,	irqchip		)
128		__field(	unsigned int,	pin		)
129	),
130
131	TP_fast_assign(
132		__entry->irqchip	= irqchip;
133		__entry->pin		= pin;
134	),
135
136	TP_printk("irqchip %s pin %u",
137		  __print_symbolic(__entry->irqchip, kvm_irqchips),
138		 __entry->pin)
139);
140
 
141
142
143#endif /* defined(__KVM_HAVE_IOAPIC) */
144
145#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
146#define KVM_TRACE_MMIO_READ 1
147#define KVM_TRACE_MMIO_WRITE 2
148
149#define kvm_trace_symbol_mmio \
150	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
151	{ KVM_TRACE_MMIO_READ, "read" }, \
152	{ KVM_TRACE_MMIO_WRITE, "write" }
153
154TRACE_EVENT(kvm_mmio,
155	TP_PROTO(int type, int len, u64 gpa, u64 val),
156	TP_ARGS(type, len, gpa, val),
157
158	TP_STRUCT__entry(
159		__field(	u32,	type		)
160		__field(	u32,	len		)
161		__field(	u64,	gpa		)
162		__field(	u64,	val		)
163	),
164
165	TP_fast_assign(
166		__entry->type		= type;
167		__entry->len		= len;
168		__entry->gpa		= gpa;
169		__entry->val		= val;
170	),
171
172	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
173		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
174		  __entry->len, __entry->gpa, __entry->val)
175);
176
177#define kvm_fpu_load_symbol	\
178	{0, "unload"},		\
179	{1, "load"}
180
181TRACE_EVENT(kvm_fpu,
182	TP_PROTO(int load),
183	TP_ARGS(load),
184
185	TP_STRUCT__entry(
186		__field(	u32,	        load		)
187	),
188
189	TP_fast_assign(
190		__entry->load		= load;
191	),
192
193	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
194);
195
196TRACE_EVENT(kvm_age_page,
197	TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
198	TP_ARGS(hva, slot, ref),
199
200	TP_STRUCT__entry(
201		__field(	u64,	hva		)
202		__field(	u64,	gfn		)
 
203		__field(	u8,	referenced	)
204	),
205
206	TP_fast_assign(
207		__entry->hva		= hva;
208		__entry->gfn		=
209		  slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
 
210		__entry->referenced	= ref;
211	),
212
213	TP_printk("hva %llx gfn %llx %s",
214		  __entry->hva, __entry->gfn,
215		  __entry->referenced ? "YOUNG" : "OLD")
216);
217
218#ifdef CONFIG_KVM_ASYNC_PF
219DECLARE_EVENT_CLASS(kvm_async_get_page_class,
220
221	TP_PROTO(u64 gva, u64 gfn),
222
223	TP_ARGS(gva, gfn),
224
225	TP_STRUCT__entry(
226		__field(__u64, gva)
227		__field(u64, gfn)
228	),
229
230	TP_fast_assign(
231		__entry->gva = gva;
232		__entry->gfn = gfn;
233	),
234
235	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
236);
237
238DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
239
240	TP_PROTO(u64 gva, u64 gfn),
241
242	TP_ARGS(gva, gfn)
243);
244
245DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
246
247	TP_PROTO(u64 gva, u64 gfn),
248
249	TP_ARGS(gva, gfn)
250);
251
252DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
253
254	TP_PROTO(u64 token, u64 gva),
255
256	TP_ARGS(token, gva),
257
258	TP_STRUCT__entry(
259		__field(__u64, token)
260		__field(__u64, gva)
261	),
262
263	TP_fast_assign(
264		__entry->token = token;
265		__entry->gva = gva;
266	),
267
268	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
269
270);
271
272DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
273
274	TP_PROTO(u64 token, u64 gva),
275
276	TP_ARGS(token, gva)
277);
278
279DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
280
281	TP_PROTO(u64 token, u64 gva),
282
283	TP_ARGS(token, gva)
284);
285
286TRACE_EVENT(
287	kvm_async_pf_completed,
288	TP_PROTO(unsigned long address, struct page *page, u64 gva),
289	TP_ARGS(address, page, gva),
290
291	TP_STRUCT__entry(
292		__field(unsigned long, address)
293		__field(pfn_t, pfn)
294		__field(u64, gva)
295		),
296
297	TP_fast_assign(
298		__entry->address = address;
299		__entry->pfn = page ? page_to_pfn(page) : 0;
300		__entry->gva = gva;
301		),
302
303	TP_printk("gva %#llx address %#lx pfn %#llx",  __entry->gva,
304		  __entry->address, __entry->pfn)
305);
306
307#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
309#endif /* _TRACE_KVM_MAIN_H */
310
311/* This part must be outside protection */
312#include <trace/define_trace.h>
v4.6
  1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
  2#define _TRACE_KVM_MAIN_H
  3
  4#include <linux/tracepoint.h>
  5
  6#undef TRACE_SYSTEM
  7#define TRACE_SYSTEM kvm
  8
  9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
 10
 11#define kvm_trace_exit_reason						\
 12	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
 13	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
 14	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
 15	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
 16	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\
 17	ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
 18
 19TRACE_EVENT(kvm_userspace_exit,
 20	    TP_PROTO(__u32 reason, int errno),
 21	    TP_ARGS(reason, errno),
 22
 23	TP_STRUCT__entry(
 24		__field(	__u32,		reason		)
 25		__field(	int,		errno		)
 26	),
 27
 28	TP_fast_assign(
 29		__entry->reason		= reason;
 30		__entry->errno		= errno;
 31	),
 32
 33	TP_printk("reason %s (%d)",
 34		  __entry->errno < 0 ?
 35		  (__entry->errno == -EINTR ? "restart" : "error") :
 36		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
 37		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
 38);
 39
 40TRACE_EVENT(kvm_vcpu_wakeup,
 41	    TP_PROTO(__u64 ns, bool waited),
 42	    TP_ARGS(ns, waited),
 43
 44	TP_STRUCT__entry(
 45		__field(	__u64,		ns		)
 46		__field(	bool,		waited		)
 47	),
 48
 49	TP_fast_assign(
 50		__entry->ns		= ns;
 51		__entry->waited		= waited;
 52	),
 53
 54	TP_printk("%s time %lld ns",
 55		  __entry->waited ? "wait" : "poll",
 56		  __entry->ns)
 57);
 58
 59#if defined(CONFIG_HAVE_KVM_IRQFD)
 60TRACE_EVENT(kvm_set_irq,
 61	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
 62	TP_ARGS(gsi, level, irq_source_id),
 63
 64	TP_STRUCT__entry(
 65		__field(	unsigned int,	gsi		)
 66		__field(	int,		level		)
 67		__field(	int,		irq_source_id	)
 68	),
 69
 70	TP_fast_assign(
 71		__entry->gsi		= gsi;
 72		__entry->level		= level;
 73		__entry->irq_source_id	= irq_source_id;
 74	),
 75
 76	TP_printk("gsi %u level %d source %d",
 77		  __entry->gsi, __entry->level, __entry->irq_source_id)
 78);
 79#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
 80
 81#if defined(__KVM_HAVE_IOAPIC)
 82#define kvm_deliver_mode		\
 83	{0x0, "Fixed"},			\
 84	{0x1, "LowPrio"},		\
 85	{0x2, "SMI"},			\
 86	{0x3, "Res3"},			\
 87	{0x4, "NMI"},			\
 88	{0x5, "INIT"},			\
 89	{0x6, "SIPI"},			\
 90	{0x7, "ExtINT"}
 91
 92TRACE_EVENT(kvm_ioapic_set_irq,
 93	    TP_PROTO(__u64 e, int pin, bool coalesced),
 94	    TP_ARGS(e, pin, coalesced),
 95
 96	TP_STRUCT__entry(
 97		__field(	__u64,		e		)
 98		__field(	int,		pin		)
 99		__field(	bool,		coalesced	)
100	),
101
102	TP_fast_assign(
103		__entry->e		= e;
104		__entry->pin		= pin;
105		__entry->coalesced	= coalesced;
106	),
107
108	TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
109		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
110		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
111		  (__entry->e & (1<<11)) ? "logical" : "physical",
112		  (__entry->e & (1<<15)) ? "level" : "edge",
113		  (__entry->e & (1<<16)) ? "|masked" : "",
114		  __entry->coalesced ? " (coalesced)" : "")
115);
116
117TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
118	    TP_PROTO(__u64 e),
119	    TP_ARGS(e),
120
121	TP_STRUCT__entry(
122		__field(	__u64,		e		)
123	),
124
125	TP_fast_assign(
126		__entry->e		= e;
127	),
128
129	TP_printk("dst %x vec=%u (%s|%s|%s%s)",
130		  (u8)(__entry->e >> 56), (u8)__entry->e,
131		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
132		  (__entry->e & (1<<11)) ? "logical" : "physical",
133		  (__entry->e & (1<<15)) ? "level" : "edge",
134		  (__entry->e & (1<<16)) ? "|masked" : "")
135);
136
137TRACE_EVENT(kvm_msi_set_irq,
138	    TP_PROTO(__u64 address, __u64 data),
139	    TP_ARGS(address, data),
140
141	TP_STRUCT__entry(
142		__field(	__u64,		address		)
143		__field(	__u64,		data		)
144	),
145
146	TP_fast_assign(
147		__entry->address	= address;
148		__entry->data		= data;
149	),
150
151	TP_printk("dst %u vec %x (%s|%s|%s%s)",
152		  (u8)(__entry->address >> 12), (u8)__entry->data,
153		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
154		  (__entry->address & (1<<2)) ? "logical" : "physical",
155		  (__entry->data & (1<<15)) ? "level" : "edge",
156		  (__entry->address & (1<<3)) ? "|rh" : "")
157);
158
159#define kvm_irqchips						\
160	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
161	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
162	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
163
164#endif /* defined(__KVM_HAVE_IOAPIC) */
165
166#if defined(CONFIG_HAVE_KVM_IRQFD)
167
168#ifdef kvm_irqchips
169#define kvm_ack_irq_string "irqchip %s pin %u"
170#define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
171#else
172#define kvm_ack_irq_string "irqchip %d pin %u"
173#define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
174#endif
175
176TRACE_EVENT(kvm_ack_irq,
177	TP_PROTO(unsigned int irqchip, unsigned int pin),
178	TP_ARGS(irqchip, pin),
179
180	TP_STRUCT__entry(
181		__field(	unsigned int,	irqchip		)
182		__field(	unsigned int,	pin		)
183	),
184
185	TP_fast_assign(
186		__entry->irqchip	= irqchip;
187		__entry->pin		= pin;
188	),
189
190	TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
 
 
191);
192
193#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
194
195
 
196
197#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
198#define KVM_TRACE_MMIO_READ 1
199#define KVM_TRACE_MMIO_WRITE 2
200
201#define kvm_trace_symbol_mmio \
202	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
203	{ KVM_TRACE_MMIO_READ, "read" }, \
204	{ KVM_TRACE_MMIO_WRITE, "write" }
205
206TRACE_EVENT(kvm_mmio,
207	TP_PROTO(int type, int len, u64 gpa, u64 val),
208	TP_ARGS(type, len, gpa, val),
209
210	TP_STRUCT__entry(
211		__field(	u32,	type		)
212		__field(	u32,	len		)
213		__field(	u64,	gpa		)
214		__field(	u64,	val		)
215	),
216
217	TP_fast_assign(
218		__entry->type		= type;
219		__entry->len		= len;
220		__entry->gpa		= gpa;
221		__entry->val		= val;
222	),
223
224	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
225		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
226		  __entry->len, __entry->gpa, __entry->val)
227);
228
229#define kvm_fpu_load_symbol	\
230	{0, "unload"},		\
231	{1, "load"}
232
233TRACE_EVENT(kvm_fpu,
234	TP_PROTO(int load),
235	TP_ARGS(load),
236
237	TP_STRUCT__entry(
238		__field(	u32,	        load		)
239	),
240
241	TP_fast_assign(
242		__entry->load		= load;
243	),
244
245	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
246);
247
248TRACE_EVENT(kvm_age_page,
249	TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
250	TP_ARGS(gfn, level, slot, ref),
251
252	TP_STRUCT__entry(
253		__field(	u64,	hva		)
254		__field(	u64,	gfn		)
255		__field(	u8,	level		)
256		__field(	u8,	referenced	)
257	),
258
259	TP_fast_assign(
260		__entry->gfn		= gfn;
261		__entry->level		= level;
262		__entry->hva		= ((gfn - slot->base_gfn) <<
263					    PAGE_SHIFT) + slot->userspace_addr;
264		__entry->referenced	= ref;
265	),
266
267	TP_printk("hva %llx gfn %llx level %u %s",
268		  __entry->hva, __entry->gfn, __entry->level,
269		  __entry->referenced ? "YOUNG" : "OLD")
270);
271
272#ifdef CONFIG_KVM_ASYNC_PF
273DECLARE_EVENT_CLASS(kvm_async_get_page_class,
274
275	TP_PROTO(u64 gva, u64 gfn),
276
277	TP_ARGS(gva, gfn),
278
279	TP_STRUCT__entry(
280		__field(__u64, gva)
281		__field(u64, gfn)
282	),
283
284	TP_fast_assign(
285		__entry->gva = gva;
286		__entry->gfn = gfn;
287	),
288
289	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
290);
291
292DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
293
294	TP_PROTO(u64 gva, u64 gfn),
295
296	TP_ARGS(gva, gfn)
297);
298
299DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
300
301	TP_PROTO(u64 gva, u64 gfn),
302
303	TP_ARGS(gva, gfn)
304);
305
306DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
307
308	TP_PROTO(u64 token, u64 gva),
309
310	TP_ARGS(token, gva),
311
312	TP_STRUCT__entry(
313		__field(__u64, token)
314		__field(__u64, gva)
315	),
316
317	TP_fast_assign(
318		__entry->token = token;
319		__entry->gva = gva;
320	),
321
322	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
323
324);
325
326DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
327
328	TP_PROTO(u64 token, u64 gva),
329
330	TP_ARGS(token, gva)
331);
332
333DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
334
335	TP_PROTO(u64 token, u64 gva),
336
337	TP_ARGS(token, gva)
338);
339
340TRACE_EVENT(
341	kvm_async_pf_completed,
342	TP_PROTO(unsigned long address, u64 gva),
343	TP_ARGS(address, gva),
344
345	TP_STRUCT__entry(
346		__field(unsigned long, address)
 
347		__field(u64, gva)
348		),
349
350	TP_fast_assign(
351		__entry->address = address;
 
352		__entry->gva = gva;
353		),
354
355	TP_printk("gva %#llx address %#lx",  __entry->gva,
356		  __entry->address)
357);
358
359#endif
360
361TRACE_EVENT(kvm_halt_poll_ns,
362	TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
363		 unsigned int old),
364	TP_ARGS(grow, vcpu_id, new, old),
365
366	TP_STRUCT__entry(
367		__field(bool, grow)
368		__field(unsigned int, vcpu_id)
369		__field(unsigned int, new)
370		__field(unsigned int, old)
371	),
372
373	TP_fast_assign(
374		__entry->grow           = grow;
375		__entry->vcpu_id        = vcpu_id;
376		__entry->new            = new;
377		__entry->old            = old;
378	),
379
380	TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
381			__entry->vcpu_id,
382			__entry->new,
383			__entry->grow ? "grow" : "shrink",
384			__entry->old)
385);
386
387#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
388	trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
389#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
390	trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
391
392#endif /* _TRACE_KVM_MAIN_H */
393
394/* This part must be outside protection */
395#include <trace/define_trace.h>