Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM io_uring
  4
  5#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_IO_URING_H
  7
  8#include <linux/tracepoint.h>
  9
 10struct io_wq_work;
 11
 12/**
 13 * io_uring_create - called after a new io_uring context was prepared
 14 *
 15 * @fd:			corresponding file descriptor
 16 * @ctx:		pointer to a ring context structure
 17 * @sq_entries:	actual SQ size
 18 * @cq_entries:	actual CQ size
 19 * @flags:		SQ ring flags, provided to io_uring_setup(2)
 20 *
 21 * Allows to trace io_uring creation and provide pointer to a context, that can
 22 * be used later to find correlated events.
 23 */
 24TRACE_EVENT(io_uring_create,
 25
 26	TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
 27
 28	TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
 29
 30	TP_STRUCT__entry (
 31		__field(  int,		fd			)
 32		__field(  void *,	ctx			)
 33		__field(  u32,		sq_entries	)
 34		__field(  u32,		cq_entries	)
 35		__field(  u32,		flags		)
 36	),
 37
 38	TP_fast_assign(
 39		__entry->fd			= fd;
 40		__entry->ctx		= ctx;
 41		__entry->sq_entries	= sq_entries;
 42		__entry->cq_entries	= cq_entries;
 43		__entry->flags		= flags;
 44	),
 45
 46	TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
 47			  __entry->ctx, __entry->fd, __entry->sq_entries,
 48			  __entry->cq_entries, __entry->flags)
 49);
 50
 51/**
 52 * io_uring_register - called after a buffer/file/eventfd was succesfully
 53 * 					   registered for a ring
 54 *
 55 * @ctx:			pointer to a ring context structure
 56 * @opcode:			describes which operation to perform
 57 * @nr_user_files:	number of registered files
 58 * @nr_user_bufs:	number of registered buffers
 59 * @cq_ev_fd:		whether eventfs registered or not
 60 * @ret:			return code
 61 *
 62 * Allows to trace fixed files/buffers/eventfds, that could be registered to
 63 * avoid an overhead of getting references to them for every operation. This
 64 * event, together with io_uring_file_get, can provide a full picture of how
 65 * much overhead one can reduce via fixing.
 66 */
 67TRACE_EVENT(io_uring_register,
 68
 69	TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
 70			 unsigned nr_bufs, bool eventfd, long ret),
 71
 72	TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
 73
 74	TP_STRUCT__entry (
 75		__field(  void *,	ctx			)
 76		__field(  unsigned,	opcode		)
 77		__field(  unsigned,	nr_files	)
 78		__field(  unsigned,	nr_bufs		)
 79		__field(  bool,		eventfd		)
 80		__field(  long,		ret			)
 81	),
 82
 83	TP_fast_assign(
 84		__entry->ctx		= ctx;
 85		__entry->opcode		= opcode;
 86		__entry->nr_files	= nr_files;
 87		__entry->nr_bufs	= nr_bufs;
 88		__entry->eventfd	= eventfd;
 89		__entry->ret		= ret;
 90	),
 91
 92	TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
 93			  "eventfd %d, ret %ld",
 94			  __entry->ctx, __entry->opcode, __entry->nr_files,
 95			  __entry->nr_bufs, __entry->eventfd, __entry->ret)
 96);
 97
 98/**
 99 * io_uring_file_get - called before getting references to an SQE file
100 *
101 * @ctx:	pointer to a ring context structure
102 * @fd:		SQE file descriptor
103 *
104 * Allows to trace out how often an SQE file reference is obtained, which can
105 * help figuring out if it makes sense to use fixed files, or check that fixed
106 * files are used correctly.
107 */
108TRACE_EVENT(io_uring_file_get,
109
110	TP_PROTO(void *ctx, int fd),
111
112	TP_ARGS(ctx, fd),
113
114	TP_STRUCT__entry (
115		__field(  void *,	ctx	)
116		__field(  int,		fd	)
117	),
118
119	TP_fast_assign(
120		__entry->ctx	= ctx;
121		__entry->fd		= fd;
122	),
123
124	TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
125);
126
127/**
128 * io_uring_queue_async_work - called before submitting a new async work
129 *
130 * @ctx:	pointer to a ring context structure
131 * @hashed:	type of workqueue, hashed or normal
132 * @req:	pointer to a submitted request
133 * @work:	pointer to a submitted io_wq_work
134 *
135 * Allows to trace asynchronous work submission.
136 */
137TRACE_EVENT(io_uring_queue_async_work,
138
139	TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
140			 unsigned int flags),
141
142	TP_ARGS(ctx, rw, req, work, flags),
143
144	TP_STRUCT__entry (
145		__field(  void *,				ctx		)
146		__field(  int,					rw		)
147		__field(  void *,				req		)
148		__field(  struct io_wq_work *,		work	)
149		__field(  unsigned int,			flags	)
150	),
151
152	TP_fast_assign(
153		__entry->ctx	= ctx;
154		__entry->rw		= rw;
155		__entry->req	= req;
156		__entry->work	= work;
157		__entry->flags	= flags;
158	),
159
160	TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
161			  __entry->ctx, __entry->req, __entry->flags,
162			  __entry->rw ? "hashed" : "normal", __entry->work)
163);
164
165/**
166 * io_uring_defer - called when an io_uring request is deferred
167 *
168 * @ctx:	pointer to a ring context structure
169 * @req:	pointer to a deferred request
170 * @user_data:	user data associated with the request
171 *
172 * Allows to track deferred requests, to get an insight about what requests are
173 * not started immediately.
174 */
175TRACE_EVENT(io_uring_defer,
176
177	TP_PROTO(void *ctx, void *req, unsigned long long user_data),
178
179	TP_ARGS(ctx, req, user_data),
180
181	TP_STRUCT__entry (
182		__field(  void *,	ctx		)
183		__field(  void *,	req		)
184		__field(  unsigned long long, data	)
185	),
186
187	TP_fast_assign(
188		__entry->ctx	= ctx;
189		__entry->req	= req;
190		__entry->data	= user_data;
191	),
192
193	TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
194			__entry->req, __entry->data)
195);
196
197/**
198 * io_uring_link - called before the io_uring request added into link_list of
199 * 				   another request
200 *
201 * @ctx:			pointer to a ring context structure
202 * @req:			pointer to a linked request
203 * @target_req:		pointer to a previous request, that would contain @req
204 *
205 * Allows to track linked requests, to understand dependencies between requests
206 * and how does it influence their execution flow.
207 */
208TRACE_EVENT(io_uring_link,
209
210	TP_PROTO(void *ctx, void *req, void *target_req),
211
212	TP_ARGS(ctx, req, target_req),
213
214	TP_STRUCT__entry (
215		__field(  void *,	ctx			)
216		__field(  void *,	req			)
217		__field(  void *,	target_req	)
218	),
219
220	TP_fast_assign(
221		__entry->ctx		= ctx;
222		__entry->req		= req;
223		__entry->target_req	= target_req;
224	),
225
226	TP_printk("ring %p, request %p linked after %p",
227			  __entry->ctx, __entry->req, __entry->target_req)
228);
229
230/**
231 * io_uring_cqring_wait - called before start waiting for an available CQE
232 *
233 * @ctx:		pointer to a ring context structure
234 * @min_events:	minimal number of events to wait for
235 *
236 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
237 * situations, when an application wants to wait for an event, that never
238 * comes.
239 */
240TRACE_EVENT(io_uring_cqring_wait,
241
242	TP_PROTO(void *ctx, int min_events),
243
244	TP_ARGS(ctx, min_events),
245
246	TP_STRUCT__entry (
247		__field(  void *,	ctx			)
248		__field(  int,		min_events	)
249	),
250
251	TP_fast_assign(
252		__entry->ctx	= ctx;
253		__entry->min_events	= min_events;
254	),
255
256	TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
257);
258
259/**
260 * io_uring_fail_link - called before failing a linked request
261 *
262 * @req:	request, which links were cancelled
263 * @link:	cancelled link
264 *
265 * Allows to track linked requests cancellation, to see not only that some work
266 * was cancelled, but also which request was the reason.
267 */
268TRACE_EVENT(io_uring_fail_link,
269
270	TP_PROTO(void *req, void *link),
271
272	TP_ARGS(req, link),
273
274	TP_STRUCT__entry (
275		__field(  void *,	req		)
276		__field(  void *,	link	)
277	),
278
279	TP_fast_assign(
280		__entry->req	= req;
281		__entry->link	= link;
282	),
283
284	TP_printk("request %p, link %p", __entry->req, __entry->link)
285);
286
287/**
288 * io_uring_complete - called when completing an SQE
289 *
290 * @ctx:		pointer to a ring context structure
291 * @user_data:		user data associated with the request
292 * @res:		result of the request
293 *
294 */
295TRACE_EVENT(io_uring_complete,
296
297	TP_PROTO(void *ctx, u64 user_data, long res),
298
299	TP_ARGS(ctx, user_data, res),
300
301	TP_STRUCT__entry (
302		__field(  void *,	ctx		)
303		__field(  u64,		user_data	)
304		__field(  long,		res		)
305	),
306
307	TP_fast_assign(
308		__entry->ctx		= ctx;
309		__entry->user_data	= user_data;
310		__entry->res		= res;
311	),
312
313	TP_printk("ring %p, user_data 0x%llx, result %ld",
314			  __entry->ctx, (unsigned long long)__entry->user_data,
315			  __entry->res)
316);
317
318
319/**
320 * io_uring_submit_sqe - called before submitting one SQE
321 *
322 * @ctx:		pointer to a ring context structure
323 * @opcode:		opcode of request
324 * @user_data:		user data associated with the request
325 * @force_nonblock:	whether a context blocking or not
326 * @sq_thread:		true if sq_thread has submitted this SQE
327 *
328 * Allows to track SQE submitting, to understand what was the source of it, SQ
329 * thread or io_uring_enter call.
330 */
331TRACE_EVENT(io_uring_submit_sqe,
332
333	TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
334		 bool sq_thread),
335
336	TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
337
338	TP_STRUCT__entry (
339		__field(  void *,	ctx		)
340		__field(  u8,		opcode		)
341		__field(  u64,		user_data	)
342		__field(  bool,		force_nonblock	)
343		__field(  bool,		sq_thread	)
344	),
345
346	TP_fast_assign(
347		__entry->ctx		= ctx;
348		__entry->opcode		= opcode;
349		__entry->user_data	= user_data;
350		__entry->force_nonblock	= force_nonblock;
351		__entry->sq_thread	= sq_thread;
352	),
353
354	TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
355			  __entry->ctx, __entry->opcode,
356			  (unsigned long long) __entry->user_data,
357			  __entry->force_nonblock, __entry->sq_thread)
358);
359
360TRACE_EVENT(io_uring_poll_arm,
361
362	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events),
363
364	TP_ARGS(ctx, opcode, user_data, mask, events),
365
366	TP_STRUCT__entry (
367		__field(  void *,	ctx		)
368		__field(  u8,		opcode		)
369		__field(  u64,		user_data	)
370		__field(  int,		mask		)
371		__field(  int,		events		)
372	),
373
374	TP_fast_assign(
375		__entry->ctx		= ctx;
376		__entry->opcode		= opcode;
377		__entry->user_data	= user_data;
378		__entry->mask		= mask;
379		__entry->events		= events;
380	),
381
382	TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
383			  __entry->ctx, __entry->opcode,
384			  (unsigned long long) __entry->user_data,
385			  __entry->mask, __entry->events)
386);
387
388TRACE_EVENT(io_uring_poll_wake,
389
390	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
391
392	TP_ARGS(ctx, opcode, user_data, mask),
393
394	TP_STRUCT__entry (
395		__field(  void *,	ctx		)
396		__field(  u8,		opcode		)
397		__field(  u64,		user_data	)
398		__field(  int,		mask		)
399	),
400
401	TP_fast_assign(
402		__entry->ctx		= ctx;
403		__entry->opcode		= opcode;
404		__entry->user_data	= user_data;
405		__entry->mask		= mask;
406	),
407
408	TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
409			  __entry->ctx, __entry->opcode,
410			  (unsigned long long) __entry->user_data,
411			  __entry->mask)
412);
413
414TRACE_EVENT(io_uring_task_add,
415
416	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
417
418	TP_ARGS(ctx, opcode, user_data, mask),
419
420	TP_STRUCT__entry (
421		__field(  void *,	ctx		)
422		__field(  u8,		opcode		)
423		__field(  u64,		user_data	)
424		__field(  int,		mask		)
425	),
426
427	TP_fast_assign(
428		__entry->ctx		= ctx;
429		__entry->opcode		= opcode;
430		__entry->user_data	= user_data;
431		__entry->mask		= mask;
432	),
433
434	TP_printk("ring %p, op %d, data 0x%llx, mask %x",
435			  __entry->ctx, __entry->opcode,
436			  (unsigned long long) __entry->user_data,
437			  __entry->mask)
438);
439
440TRACE_EVENT(io_uring_task_run,
441
442	TP_PROTO(void *ctx, u8 opcode, u64 user_data),
443
444	TP_ARGS(ctx, opcode, user_data),
445
446	TP_STRUCT__entry (
447		__field(  void *,	ctx		)
448		__field(  u8,		opcode		)
449		__field(  u64,		user_data	)
450	),
451
452	TP_fast_assign(
453		__entry->ctx		= ctx;
454		__entry->opcode		= opcode;
455		__entry->user_data	= user_data;
456	),
457
458	TP_printk("ring %p, op %d, data 0x%llx",
459			  __entry->ctx, __entry->opcode,
460			  (unsigned long long) __entry->user_data)
461);
462
463#endif /* _TRACE_IO_URING_H */
464
465/* This part must be outside protection */
466#include <trace/define_trace.h>