Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
  4#define _TRACE_KVM_PR_H
  5
  6#include <linux/tracepoint.h>
  7#include "trace_book3s.h"
  8
  9#undef TRACE_SYSTEM
 10#define TRACE_SYSTEM kvm_pr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 11
 12TRACE_EVENT(kvm_book3s_reenter,
 13	TP_PROTO(int r, struct kvm_vcpu *vcpu),
 14	TP_ARGS(r, vcpu),
 15
 16	TP_STRUCT__entry(
 17		__field(	unsigned int,	r		)
 18		__field(	unsigned long,	pc		)
 19	),
 20
 21	TP_fast_assign(
 22		__entry->r		= r;
 23		__entry->pc		= kvmppc_get_pc(vcpu);
 24	),
 25
 26	TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
 27);
 28
 29#ifdef CONFIG_PPC_BOOK3S_64
 30
 31TRACE_EVENT(kvm_book3s_64_mmu_map,
 32	TP_PROTO(int rflags, ulong hpteg, ulong va, kvm_pfn_t hpaddr,
 33		 struct kvmppc_pte *orig_pte),
 34	TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
 35
 36	TP_STRUCT__entry(
 37		__field(	unsigned char,		flag_w		)
 38		__field(	unsigned char,		flag_x		)
 39		__field(	unsigned long,		eaddr		)
 40		__field(	unsigned long,		hpteg		)
 41		__field(	unsigned long,		va		)
 42		__field(	unsigned long long,	vpage		)
 43		__field(	unsigned long,		hpaddr		)
 44	),
 45
 46	TP_fast_assign(
 47		__entry->flag_w	= ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
 48		__entry->flag_x	= (rflags & HPTE_R_N) ? '-' : 'x';
 49		__entry->eaddr	= orig_pte->eaddr;
 50		__entry->hpteg	= hpteg;
 51		__entry->va	= va;
 52		__entry->vpage	= orig_pte->vpage;
 53		__entry->hpaddr	= hpaddr;
 54	),
 55
 56	TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
 57		  __entry->flag_w, __entry->flag_x, __entry->eaddr,
 58		  __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
 59);
 60
 61#endif /* CONFIG_PPC_BOOK3S_64 */
 62
 63TRACE_EVENT(kvm_book3s_mmu_map,
 64	TP_PROTO(struct hpte_cache *pte),
 65	TP_ARGS(pte),
 66
 67	TP_STRUCT__entry(
 68		__field(	u64,		host_vpn	)
 69		__field(	u64,		pfn		)
 70		__field(	ulong,		eaddr		)
 71		__field(	u64,		vpage		)
 72		__field(	ulong,		raddr		)
 73		__field(	int,		flags		)
 74	),
 75
 76	TP_fast_assign(
 77		__entry->host_vpn	= pte->host_vpn;
 78		__entry->pfn		= pte->pfn;
 79		__entry->eaddr		= pte->pte.eaddr;
 80		__entry->vpage		= pte->pte.vpage;
 81		__entry->raddr		= pte->pte.raddr;
 82		__entry->flags		= (pte->pte.may_read ? 0x4 : 0) |
 83					  (pte->pte.may_write ? 0x2 : 0) |
 84					  (pte->pte.may_execute ? 0x1 : 0);
 85	),
 86
 87	TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
 88		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
 89		  __entry->vpage, __entry->raddr, __entry->flags)
 90);
 91
 92TRACE_EVENT(kvm_book3s_mmu_invalidate,
 93	TP_PROTO(struct hpte_cache *pte),
 94	TP_ARGS(pte),
 95
 96	TP_STRUCT__entry(
 97		__field(	u64,		host_vpn	)
 98		__field(	u64,		pfn		)
 99		__field(	ulong,		eaddr		)
100		__field(	u64,		vpage		)
101		__field(	ulong,		raddr		)
102		__field(	int,		flags		)
103	),
104
105	TP_fast_assign(
106		__entry->host_vpn	= pte->host_vpn;
107		__entry->pfn		= pte->pfn;
108		__entry->eaddr		= pte->pte.eaddr;
109		__entry->vpage		= pte->pte.vpage;
110		__entry->raddr		= pte->pte.raddr;
111		__entry->flags		= (pte->pte.may_read ? 0x4 : 0) |
112					  (pte->pte.may_write ? 0x2 : 0) |
113					  (pte->pte.may_execute ? 0x1 : 0);
114	),
115
116	TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
117		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
118		  __entry->vpage, __entry->raddr, __entry->flags)
119);
120
121TRACE_EVENT(kvm_book3s_mmu_flush,
122	TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
123		 unsigned long long p2),
124	TP_ARGS(type, vcpu, p1, p2),
125
126	TP_STRUCT__entry(
127		__field(	int,			count		)
128		__field(	unsigned long long,	p1		)
129		__field(	unsigned long long,	p2		)
130		__field(	const char *,		type		)
131	),
132
133	TP_fast_assign(
134		__entry->count		= to_book3s(vcpu)->hpte_cache_count;
135		__entry->p1		= p1;
136		__entry->p2		= p2;
137		__entry->type		= type;
138	),
139
140	TP_printk("Flush %d %sPTEs: %llx - %llx",
141		  __entry->count, __entry->type, __entry->p1, __entry->p2)
142);
143
144TRACE_EVENT(kvm_book3s_slb_found,
145	TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
146	TP_ARGS(gvsid, hvsid),
147
148	TP_STRUCT__entry(
149		__field(	unsigned long long,	gvsid		)
150		__field(	unsigned long long,	hvsid		)
151	),
152
153	TP_fast_assign(
154		__entry->gvsid		= gvsid;
155		__entry->hvsid		= hvsid;
156	),
157
158	TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
159);
160
161TRACE_EVENT(kvm_book3s_slb_fail,
162	TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
163	TP_ARGS(sid_map_mask, gvsid),
164
165	TP_STRUCT__entry(
166		__field(	unsigned short,		sid_map_mask	)
167		__field(	unsigned long long,	gvsid		)
168	),
169
170	TP_fast_assign(
171		__entry->sid_map_mask	= sid_map_mask;
172		__entry->gvsid		= gvsid;
173	),
174
175	TP_printk("%x/%x: %llx", __entry->sid_map_mask,
176		  SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
177);
178
179TRACE_EVENT(kvm_book3s_slb_map,
180	TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
181		 unsigned long long hvsid),
182	TP_ARGS(sid_map_mask, gvsid, hvsid),
183
184	TP_STRUCT__entry(
185		__field(	unsigned short,		sid_map_mask	)
186		__field(	unsigned long long,	guest_vsid	)
187		__field(	unsigned long long,	host_vsid	)
188	),
189
190	TP_fast_assign(
191		__entry->sid_map_mask	= sid_map_mask;
192		__entry->guest_vsid	= gvsid;
193		__entry->host_vsid	= hvsid;
194	),
195
196	TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
197		  __entry->guest_vsid, __entry->host_vsid)
198);
199
200TRACE_EVENT(kvm_book3s_slbmte,
201	TP_PROTO(u64 slb_vsid, u64 slb_esid),
202	TP_ARGS(slb_vsid, slb_esid),
203
204	TP_STRUCT__entry(
205		__field(	u64,	slb_vsid	)
206		__field(	u64,	slb_esid	)
207	),
208
209	TP_fast_assign(
210		__entry->slb_vsid	= slb_vsid;
211		__entry->slb_esid	= slb_esid;
212	),
213
214	TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
215);
216
217TRACE_EVENT(kvm_exit,
218	TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
219	TP_ARGS(exit_nr, vcpu),
220
221	TP_STRUCT__entry(
222		__field(	unsigned int,	exit_nr		)
223		__field(	unsigned long,	pc		)
224		__field(	unsigned long,	msr		)
225		__field(	unsigned long,	dar		)
226		__field(	unsigned long,	srr1		)
227		__field(	unsigned long,	last_inst	)
228	),
229
230	TP_fast_assign(
231		__entry->exit_nr	= exit_nr;
232		__entry->pc		= kvmppc_get_pc(vcpu);
233		__entry->dar		= kvmppc_get_fault_dar(vcpu);
234		__entry->msr		= kvmppc_get_msr(vcpu);
235		__entry->srr1		= vcpu->arch.shadow_srr1;
236		__entry->last_inst	= vcpu->arch.last_inst;
237	),
238
239	TP_printk("exit=%s"
240		" | pc=0x%lx"
241		" | msr=0x%lx"
242		" | dar=0x%lx"
243		" | srr1=0x%lx"
244		" | last_inst=0x%lx"
245		,
246		__print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
247		__entry->pc,
248		__entry->msr,
249		__entry->dar,
250		__entry->srr1,
251		__entry->last_inst
252		)
253);
254
255#endif /* _TRACE_KVM_H */
 
 
256
257/* This part must be outside protection */
 
 
258
259#undef TRACE_INCLUDE_PATH
260#undef TRACE_INCLUDE_FILE
 
261
262#define TRACE_INCLUDE_PATH .
263#define TRACE_INCLUDE_FILE trace_pr
264
 
 
 
265#include <trace/define_trace.h>
v3.15
 
  1
  2#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
  3#define _TRACE_KVM_PR_H
  4
  5#include <linux/tracepoint.h>
 
  6
  7#undef TRACE_SYSTEM
  8#define TRACE_SYSTEM kvm_pr
  9#define TRACE_INCLUDE_PATH .
 10#define TRACE_INCLUDE_FILE trace_pr
 11
 12#define kvm_trace_symbol_exit \
 13	{0x100, "SYSTEM_RESET"}, \
 14	{0x200, "MACHINE_CHECK"}, \
 15	{0x300, "DATA_STORAGE"}, \
 16	{0x380, "DATA_SEGMENT"}, \
 17	{0x400, "INST_STORAGE"}, \
 18	{0x480, "INST_SEGMENT"}, \
 19	{0x500, "EXTERNAL"}, \
 20	{0x501, "EXTERNAL_LEVEL"}, \
 21	{0x502, "EXTERNAL_HV"}, \
 22	{0x600, "ALIGNMENT"}, \
 23	{0x700, "PROGRAM"}, \
 24	{0x800, "FP_UNAVAIL"}, \
 25	{0x900, "DECREMENTER"}, \
 26	{0x980, "HV_DECREMENTER"}, \
 27	{0xc00, "SYSCALL"}, \
 28	{0xd00, "TRACE"}, \
 29	{0xe00, "H_DATA_STORAGE"}, \
 30	{0xe20, "H_INST_STORAGE"}, \
 31	{0xe40, "H_EMUL_ASSIST"}, \
 32	{0xf00, "PERFMON"}, \
 33	{0xf20, "ALTIVEC"}, \
 34	{0xf40, "VSX"}
 35
 36TRACE_EVENT(kvm_book3s_reenter,
 37	TP_PROTO(int r, struct kvm_vcpu *vcpu),
 38	TP_ARGS(r, vcpu),
 39
 40	TP_STRUCT__entry(
 41		__field(	unsigned int,	r		)
 42		__field(	unsigned long,	pc		)
 43	),
 44
 45	TP_fast_assign(
 46		__entry->r		= r;
 47		__entry->pc		= kvmppc_get_pc(vcpu);
 48	),
 49
 50	TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
 51);
 52
 53#ifdef CONFIG_PPC_BOOK3S_64
 54
 55TRACE_EVENT(kvm_book3s_64_mmu_map,
 56	TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
 57		 struct kvmppc_pte *orig_pte),
 58	TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
 59
 60	TP_STRUCT__entry(
 61		__field(	unsigned char,		flag_w		)
 62		__field(	unsigned char,		flag_x		)
 63		__field(	unsigned long,		eaddr		)
 64		__field(	unsigned long,		hpteg		)
 65		__field(	unsigned long,		va		)
 66		__field(	unsigned long long,	vpage		)
 67		__field(	unsigned long,		hpaddr		)
 68	),
 69
 70	TP_fast_assign(
 71		__entry->flag_w	= ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
 72		__entry->flag_x	= (rflags & HPTE_R_N) ? '-' : 'x';
 73		__entry->eaddr	= orig_pte->eaddr;
 74		__entry->hpteg	= hpteg;
 75		__entry->va	= va;
 76		__entry->vpage	= orig_pte->vpage;
 77		__entry->hpaddr	= hpaddr;
 78	),
 79
 80	TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
 81		  __entry->flag_w, __entry->flag_x, __entry->eaddr,
 82		  __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
 83);
 84
 85#endif /* CONFIG_PPC_BOOK3S_64 */
 86
 87TRACE_EVENT(kvm_book3s_mmu_map,
 88	TP_PROTO(struct hpte_cache *pte),
 89	TP_ARGS(pte),
 90
 91	TP_STRUCT__entry(
 92		__field(	u64,		host_vpn	)
 93		__field(	u64,		pfn		)
 94		__field(	ulong,		eaddr		)
 95		__field(	u64,		vpage		)
 96		__field(	ulong,		raddr		)
 97		__field(	int,		flags		)
 98	),
 99
100	TP_fast_assign(
101		__entry->host_vpn	= pte->host_vpn;
102		__entry->pfn		= pte->pfn;
103		__entry->eaddr		= pte->pte.eaddr;
104		__entry->vpage		= pte->pte.vpage;
105		__entry->raddr		= pte->pte.raddr;
106		__entry->flags		= (pte->pte.may_read ? 0x4 : 0) |
107					  (pte->pte.may_write ? 0x2 : 0) |
108					  (pte->pte.may_execute ? 0x1 : 0);
109	),
110
111	TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
112		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
113		  __entry->vpage, __entry->raddr, __entry->flags)
114);
115
116TRACE_EVENT(kvm_book3s_mmu_invalidate,
117	TP_PROTO(struct hpte_cache *pte),
118	TP_ARGS(pte),
119
120	TP_STRUCT__entry(
121		__field(	u64,		host_vpn	)
122		__field(	u64,		pfn		)
123		__field(	ulong,		eaddr		)
124		__field(	u64,		vpage		)
125		__field(	ulong,		raddr		)
126		__field(	int,		flags		)
127	),
128
129	TP_fast_assign(
130		__entry->host_vpn	= pte->host_vpn;
131		__entry->pfn		= pte->pfn;
132		__entry->eaddr		= pte->pte.eaddr;
133		__entry->vpage		= pte->pte.vpage;
134		__entry->raddr		= pte->pte.raddr;
135		__entry->flags		= (pte->pte.may_read ? 0x4 : 0) |
136					  (pte->pte.may_write ? 0x2 : 0) |
137					  (pte->pte.may_execute ? 0x1 : 0);
138	),
139
140	TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
141		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
142		  __entry->vpage, __entry->raddr, __entry->flags)
143);
144
145TRACE_EVENT(kvm_book3s_mmu_flush,
146	TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
147		 unsigned long long p2),
148	TP_ARGS(type, vcpu, p1, p2),
149
150	TP_STRUCT__entry(
151		__field(	int,			count		)
152		__field(	unsigned long long,	p1		)
153		__field(	unsigned long long,	p2		)
154		__field(	const char *,		type		)
155	),
156
157	TP_fast_assign(
158		__entry->count		= to_book3s(vcpu)->hpte_cache_count;
159		__entry->p1		= p1;
160		__entry->p2		= p2;
161		__entry->type		= type;
162	),
163
164	TP_printk("Flush %d %sPTEs: %llx - %llx",
165		  __entry->count, __entry->type, __entry->p1, __entry->p2)
166);
167
168TRACE_EVENT(kvm_book3s_slb_found,
169	TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
170	TP_ARGS(gvsid, hvsid),
171
172	TP_STRUCT__entry(
173		__field(	unsigned long long,	gvsid		)
174		__field(	unsigned long long,	hvsid		)
175	),
176
177	TP_fast_assign(
178		__entry->gvsid		= gvsid;
179		__entry->hvsid		= hvsid;
180	),
181
182	TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
183);
184
185TRACE_EVENT(kvm_book3s_slb_fail,
186	TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
187	TP_ARGS(sid_map_mask, gvsid),
188
189	TP_STRUCT__entry(
190		__field(	unsigned short,		sid_map_mask	)
191		__field(	unsigned long long,	gvsid		)
192	),
193
194	TP_fast_assign(
195		__entry->sid_map_mask	= sid_map_mask;
196		__entry->gvsid		= gvsid;
197	),
198
199	TP_printk("%x/%x: %llx", __entry->sid_map_mask,
200		  SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
201);
202
203TRACE_EVENT(kvm_book3s_slb_map,
204	TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
205		 unsigned long long hvsid),
206	TP_ARGS(sid_map_mask, gvsid, hvsid),
207
208	TP_STRUCT__entry(
209		__field(	unsigned short,		sid_map_mask	)
210		__field(	unsigned long long,	guest_vsid	)
211		__field(	unsigned long long,	host_vsid	)
212	),
213
214	TP_fast_assign(
215		__entry->sid_map_mask	= sid_map_mask;
216		__entry->guest_vsid	= gvsid;
217		__entry->host_vsid	= hvsid;
218	),
219
220	TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
221		  __entry->guest_vsid, __entry->host_vsid)
222);
223
224TRACE_EVENT(kvm_book3s_slbmte,
225	TP_PROTO(u64 slb_vsid, u64 slb_esid),
226	TP_ARGS(slb_vsid, slb_esid),
227
228	TP_STRUCT__entry(
229		__field(	u64,	slb_vsid	)
230		__field(	u64,	slb_esid	)
231	),
232
233	TP_fast_assign(
234		__entry->slb_vsid	= slb_vsid;
235		__entry->slb_esid	= slb_esid;
236	),
237
238	TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
239);
240
241TRACE_EVENT(kvm_exit,
242	TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
243	TP_ARGS(exit_nr, vcpu),
244
245	TP_STRUCT__entry(
246		__field(	unsigned int,	exit_nr		)
247		__field(	unsigned long,	pc		)
248		__field(	unsigned long,	msr		)
249		__field(	unsigned long,	dar		)
250		__field(	unsigned long,	srr1		)
251		__field(	unsigned long,	last_inst	)
252	),
253
254	TP_fast_assign(
255		__entry->exit_nr	= exit_nr;
256		__entry->pc		= kvmppc_get_pc(vcpu);
257		__entry->dar		= kvmppc_get_fault_dar(vcpu);
258		__entry->msr		= vcpu->arch.shared->msr;
259		__entry->srr1		= vcpu->arch.shadow_srr1;
260		__entry->last_inst	= vcpu->arch.last_inst;
261	),
262
263	TP_printk("exit=%s"
264		" | pc=0x%lx"
265		" | msr=0x%lx"
266		" | dar=0x%lx"
267		" | srr1=0x%lx"
268		" | last_inst=0x%lx"
269		,
270		__print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
271		__entry->pc,
272		__entry->msr,
273		__entry->dar,
274		__entry->srr1,
275		__entry->last_inst
276		)
277);
278
279TRACE_EVENT(kvm_unmap_hva,
280	TP_PROTO(unsigned long hva),
281	TP_ARGS(hva),
282
283	TP_STRUCT__entry(
284		__field(	unsigned long,	hva		)
285	),
286
287	TP_fast_assign(
288		__entry->hva		= hva;
289	),
290
291	TP_printk("unmap hva 0x%lx\n", __entry->hva)
292);
293
294#endif /* _TRACE_KVM_H */
295
296/* This part must be outside protection */
297#include <trace/define_trace.h>