Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright © 2024 Intel Corporation
4 */
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM xe
8
9#if !defined(_XE_TRACE_BO_H_) || defined(TRACE_HEADER_MULTI_READ)
10#define _XE_TRACE_BO_H_
11
12#include <linux/tracepoint.h>
13#include <linux/types.h>
14
15#include "xe_bo.h"
16#include "xe_bo_types.h"
17#include "xe_vm.h"
18
19#define __dev_name_bo(bo) dev_name(xe_bo_device(bo)->drm.dev)
20#define __dev_name_vm(vm) dev_name((vm)->xe->drm.dev)
21#define __dev_name_vma(vma) __dev_name_vm(xe_vma_vm(vma))
22
23DECLARE_EVENT_CLASS(xe_bo,
24 TP_PROTO(struct xe_bo *bo),
25 TP_ARGS(bo),
26
27 TP_STRUCT__entry(
28 __string(dev, __dev_name_bo(bo))
29 __field(size_t, size)
30 __field(u32, flags)
31 __field(struct xe_vm *, vm)
32 ),
33
34 TP_fast_assign(
35 __assign_str(dev);
36 __entry->size = bo->size;
37 __entry->flags = bo->flags;
38 __entry->vm = bo->vm;
39 ),
40
41 TP_printk("dev=%s, size=%zu, flags=0x%02x, vm=%p",
42 __get_str(dev), __entry->size,
43 __entry->flags, __entry->vm)
44);
45
46DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
47 TP_PROTO(struct xe_bo *bo),
48 TP_ARGS(bo)
49);
50
51TRACE_EVENT(xe_bo_move,
52 TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
53 bool move_lacks_source),
54 TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
55 TP_STRUCT__entry(
56 __field(struct xe_bo *, bo)
57 __field(size_t, size)
58 __string(new_placement_name, xe_mem_type_to_name[new_placement])
59 __string(old_placement_name, xe_mem_type_to_name[old_placement])
60 __string(device_id, __dev_name_bo(bo))
61 __field(bool, move_lacks_source)
62 ),
63
64 TP_fast_assign(
65 __entry->bo = bo;
66 __entry->size = bo->size;
67 __assign_str(new_placement_name);
68 __assign_str(old_placement_name);
69 __assign_str(device_id);
70 __entry->move_lacks_source = move_lacks_source;
71 ),
72 TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
73 __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
74 __get_str(old_placement_name),
75 __get_str(new_placement_name), __get_str(device_id))
76);
77
78DECLARE_EVENT_CLASS(xe_vma,
79 TP_PROTO(struct xe_vma *vma),
80 TP_ARGS(vma),
81
82 TP_STRUCT__entry(
83 __string(dev, __dev_name_vma(vma))
84 __field(struct xe_vma *, vma)
85 __field(u32, asid)
86 __field(u64, start)
87 __field(u64, end)
88 __field(u64, ptr)
89 ),
90
91 TP_fast_assign(
92 __assign_str(dev);
93 __entry->vma = vma;
94 __entry->asid = xe_vma_vm(vma)->usm.asid;
95 __entry->start = xe_vma_start(vma);
96 __entry->end = xe_vma_end(vma) - 1;
97 __entry->ptr = xe_vma_userptr(vma);
98 ),
99
100 TP_printk("dev=%s, vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
101 __get_str(dev), __entry->vma, __entry->asid, __entry->start,
102 __entry->end, __entry->ptr)
103)
104
105DEFINE_EVENT(xe_vma, xe_vma_flush,
106 TP_PROTO(struct xe_vma *vma),
107 TP_ARGS(vma)
108);
109
110DEFINE_EVENT(xe_vma, xe_vma_pagefault,
111 TP_PROTO(struct xe_vma *vma),
112 TP_ARGS(vma)
113);
114
115DEFINE_EVENT(xe_vma, xe_vma_acc,
116 TP_PROTO(struct xe_vma *vma),
117 TP_ARGS(vma)
118);
119
120DEFINE_EVENT(xe_vma, xe_vma_bind,
121 TP_PROTO(struct xe_vma *vma),
122 TP_ARGS(vma)
123);
124
125DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
126 TP_PROTO(struct xe_vma *vma),
127 TP_ARGS(vma)
128);
129
130DEFINE_EVENT(xe_vma, xe_vma_unbind,
131 TP_PROTO(struct xe_vma *vma),
132 TP_ARGS(vma)
133);
134
135DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
136 TP_PROTO(struct xe_vma *vma),
137 TP_ARGS(vma)
138);
139
140DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
141 TP_PROTO(struct xe_vma *vma),
142 TP_ARGS(vma)
143);
144
145DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
146 TP_PROTO(struct xe_vma *vma),
147 TP_ARGS(vma)
148);
149
150DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
151 TP_PROTO(struct xe_vma *vma),
152 TP_ARGS(vma)
153);
154
155DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
156 TP_PROTO(struct xe_vma *vma),
157 TP_ARGS(vma)
158);
159
160DEFINE_EVENT(xe_vma, xe_vma_invalidate,
161 TP_PROTO(struct xe_vma *vma),
162 TP_ARGS(vma)
163);
164
165DEFINE_EVENT(xe_vma, xe_vma_evict,
166 TP_PROTO(struct xe_vma *vma),
167 TP_ARGS(vma)
168);
169
170DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
171 TP_PROTO(struct xe_vma *vma),
172 TP_ARGS(vma)
173);
174
175DECLARE_EVENT_CLASS(xe_vm,
176 TP_PROTO(struct xe_vm *vm),
177 TP_ARGS(vm),
178
179 TP_STRUCT__entry(
180 __string(dev, __dev_name_vm(vm))
181 __field(struct xe_vm *, vm)
182 __field(u32, asid)
183 ),
184
185 TP_fast_assign(
186 __assign_str(dev);
187 __entry->vm = vm;
188 __entry->asid = vm->usm.asid;
189 ),
190
191 TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev),
192 __entry->vm, __entry->asid)
193);
194
195DEFINE_EVENT(xe_vm, xe_vm_kill,
196 TP_PROTO(struct xe_vm *vm),
197 TP_ARGS(vm)
198);
199
200DEFINE_EVENT(xe_vm, xe_vm_create,
201 TP_PROTO(struct xe_vm *vm),
202 TP_ARGS(vm)
203);
204
205DEFINE_EVENT(xe_vm, xe_vm_free,
206 TP_PROTO(struct xe_vm *vm),
207 TP_ARGS(vm)
208);
209
210DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
211 TP_PROTO(struct xe_vm *vm),
212 TP_ARGS(vm)
213);
214
215DEFINE_EVENT(xe_vm, xe_vm_restart,
216 TP_PROTO(struct xe_vm *vm),
217 TP_ARGS(vm)
218);
219
220DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
221 TP_PROTO(struct xe_vm *vm),
222 TP_ARGS(vm)
223);
224
225DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
226 TP_PROTO(struct xe_vm *vm),
227 TP_ARGS(vm)
228);
229
230DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
231 TP_PROTO(struct xe_vm *vm),
232 TP_ARGS(vm)
233);
234
235DEFINE_EVENT(xe_vm, xe_vm_ops_fail,
236 TP_PROTO(struct xe_vm *vm),
237 TP_ARGS(vm)
238);
239
240#endif
241
242/* This part must be outside protection */
243#undef TRACE_INCLUDE_PATH
244#undef TRACE_INCLUDE_FILE
245#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
246#define TRACE_INCLUDE_FILE xe_trace_bo
247#include <trace/define_trace.h>