Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM io_uring
4
5#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_IO_URING_H
7
8#include <linux/tracepoint.h>
9
10struct io_wq_work;
11
12/**
13 * io_uring_create - called after a new io_uring context was prepared
14 *
15 * @fd: corresponding file descriptor
16 * @ctx: pointer to a ring context structure
17 * @sq_entries: actual SQ size
18 * @cq_entries: actual CQ size
19 * @flags: SQ ring flags, provided to io_uring_setup(2)
20 *
21 * Allows to trace io_uring creation and provide pointer to a context, that can
22 * be used later to find correlated events.
23 */
24TRACE_EVENT(io_uring_create,
25
26 TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
27
28 TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
29
30 TP_STRUCT__entry (
31 __field( int, fd )
32 __field( void *, ctx )
33 __field( u32, sq_entries )
34 __field( u32, cq_entries )
35 __field( u32, flags )
36 ),
37
38 TP_fast_assign(
39 __entry->fd = fd;
40 __entry->ctx = ctx;
41 __entry->sq_entries = sq_entries;
42 __entry->cq_entries = cq_entries;
43 __entry->flags = flags;
44 ),
45
46 TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
47 __entry->ctx, __entry->fd, __entry->sq_entries,
48 __entry->cq_entries, __entry->flags)
49);
50
51/**
52 * io_uring_register - called after a buffer/file/eventfd was successfully
53 * registered for a ring
54 *
55 * @ctx: pointer to a ring context structure
56 * @opcode: describes which operation to perform
57 * @nr_user_files: number of registered files
58 * @nr_user_bufs: number of registered buffers
59 * @cq_ev_fd: whether eventfs registered or not
60 * @ret: return code
61 *
62 * Allows to trace fixed files/buffers/eventfds, that could be registered to
63 * avoid an overhead of getting references to them for every operation. This
64 * event, together with io_uring_file_get, can provide a full picture of how
65 * much overhead one can reduce via fixing.
66 */
67TRACE_EVENT(io_uring_register,
68
69 TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
70 unsigned nr_bufs, bool eventfd, long ret),
71
72 TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
73
74 TP_STRUCT__entry (
75 __field( void *, ctx )
76 __field( unsigned, opcode )
77 __field( unsigned, nr_files )
78 __field( unsigned, nr_bufs )
79 __field( bool, eventfd )
80 __field( long, ret )
81 ),
82
83 TP_fast_assign(
84 __entry->ctx = ctx;
85 __entry->opcode = opcode;
86 __entry->nr_files = nr_files;
87 __entry->nr_bufs = nr_bufs;
88 __entry->eventfd = eventfd;
89 __entry->ret = ret;
90 ),
91
92 TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
93 "eventfd %d, ret %ld",
94 __entry->ctx, __entry->opcode, __entry->nr_files,
95 __entry->nr_bufs, __entry->eventfd, __entry->ret)
96);
97
98/**
99 * io_uring_file_get - called before getting references to an SQE file
100 *
101 * @ctx: pointer to a ring context structure
102 * @fd: SQE file descriptor
103 *
104 * Allows to trace out how often an SQE file reference is obtained, which can
105 * help figuring out if it makes sense to use fixed files, or check that fixed
106 * files are used correctly.
107 */
108TRACE_EVENT(io_uring_file_get,
109
110 TP_PROTO(void *ctx, int fd),
111
112 TP_ARGS(ctx, fd),
113
114 TP_STRUCT__entry (
115 __field( void *, ctx )
116 __field( int, fd )
117 ),
118
119 TP_fast_assign(
120 __entry->ctx = ctx;
121 __entry->fd = fd;
122 ),
123
124 TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
125);
126
127/**
128 * io_uring_queue_async_work - called before submitting a new async work
129 *
130 * @ctx: pointer to a ring context structure
131 * @hashed: type of workqueue, hashed or normal
132 * @req: pointer to a submitted request
133 * @work: pointer to a submitted io_wq_work
134 *
135 * Allows to trace asynchronous work submission.
136 */
137TRACE_EVENT(io_uring_queue_async_work,
138
139 TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
140 unsigned int flags),
141
142 TP_ARGS(ctx, rw, req, work, flags),
143
144 TP_STRUCT__entry (
145 __field( void *, ctx )
146 __field( int, rw )
147 __field( void *, req )
148 __field( struct io_wq_work *, work )
149 __field( unsigned int, flags )
150 ),
151
152 TP_fast_assign(
153 __entry->ctx = ctx;
154 __entry->rw = rw;
155 __entry->req = req;
156 __entry->work = work;
157 __entry->flags = flags;
158 ),
159
160 TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
161 __entry->ctx, __entry->req, __entry->flags,
162 __entry->rw ? "hashed" : "normal", __entry->work)
163);
164
165/**
166 * io_uring_defer - called when an io_uring request is deferred
167 *
168 * @ctx: pointer to a ring context structure
169 * @req: pointer to a deferred request
170 * @user_data: user data associated with the request
171 *
172 * Allows to track deferred requests, to get an insight about what requests are
173 * not started immediately.
174 */
175TRACE_EVENT(io_uring_defer,
176
177 TP_PROTO(void *ctx, void *req, unsigned long long user_data),
178
179 TP_ARGS(ctx, req, user_data),
180
181 TP_STRUCT__entry (
182 __field( void *, ctx )
183 __field( void *, req )
184 __field( unsigned long long, data )
185 ),
186
187 TP_fast_assign(
188 __entry->ctx = ctx;
189 __entry->req = req;
190 __entry->data = user_data;
191 ),
192
193 TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
194 __entry->req, __entry->data)
195);
196
197/**
198 * io_uring_link - called before the io_uring request added into link_list of
199 * another request
200 *
201 * @ctx: pointer to a ring context structure
202 * @req: pointer to a linked request
203 * @target_req: pointer to a previous request, that would contain @req
204 *
205 * Allows to track linked requests, to understand dependencies between requests
206 * and how does it influence their execution flow.
207 */
208TRACE_EVENT(io_uring_link,
209
210 TP_PROTO(void *ctx, void *req, void *target_req),
211
212 TP_ARGS(ctx, req, target_req),
213
214 TP_STRUCT__entry (
215 __field( void *, ctx )
216 __field( void *, req )
217 __field( void *, target_req )
218 ),
219
220 TP_fast_assign(
221 __entry->ctx = ctx;
222 __entry->req = req;
223 __entry->target_req = target_req;
224 ),
225
226 TP_printk("ring %p, request %p linked after %p",
227 __entry->ctx, __entry->req, __entry->target_req)
228);
229
230/**
231 * io_uring_cqring_wait - called before start waiting for an available CQE
232 *
233 * @ctx: pointer to a ring context structure
234 * @min_events: minimal number of events to wait for
235 *
236 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
237 * situations, when an application wants to wait for an event, that never
238 * comes.
239 */
240TRACE_EVENT(io_uring_cqring_wait,
241
242 TP_PROTO(void *ctx, int min_events),
243
244 TP_ARGS(ctx, min_events),
245
246 TP_STRUCT__entry (
247 __field( void *, ctx )
248 __field( int, min_events )
249 ),
250
251 TP_fast_assign(
252 __entry->ctx = ctx;
253 __entry->min_events = min_events;
254 ),
255
256 TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
257);
258
259/**
260 * io_uring_fail_link - called before failing a linked request
261 *
262 * @req: request, which links were cancelled
263 * @link: cancelled link
264 *
265 * Allows to track linked requests cancellation, to see not only that some work
266 * was cancelled, but also which request was the reason.
267 */
268TRACE_EVENT(io_uring_fail_link,
269
270 TP_PROTO(void *req, void *link),
271
272 TP_ARGS(req, link),
273
274 TP_STRUCT__entry (
275 __field( void *, req )
276 __field( void *, link )
277 ),
278
279 TP_fast_assign(
280 __entry->req = req;
281 __entry->link = link;
282 ),
283
284 TP_printk("request %p, link %p", __entry->req, __entry->link)
285);
286
287/**
288 * io_uring_complete - called when completing an SQE
289 *
290 * @ctx: pointer to a ring context structure
291 * @user_data: user data associated with the request
292 * @res: result of the request
293 * @cflags: completion flags
294 *
295 */
296TRACE_EVENT(io_uring_complete,
297
298 TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags),
299
300 TP_ARGS(ctx, user_data, res, cflags),
301
302 TP_STRUCT__entry (
303 __field( void *, ctx )
304 __field( u64, user_data )
305 __field( int, res )
306 __field( unsigned, cflags )
307 ),
308
309 TP_fast_assign(
310 __entry->ctx = ctx;
311 __entry->user_data = user_data;
312 __entry->res = res;
313 __entry->cflags = cflags;
314 ),
315
316 TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x",
317 __entry->ctx, (unsigned long long)__entry->user_data,
318 __entry->res, __entry->cflags)
319);
320
321/**
322 * io_uring_submit_sqe - called before submitting one SQE
323 *
324 * @ctx: pointer to a ring context structure
325 * @req: pointer to a submitted request
326 * @opcode: opcode of request
327 * @user_data: user data associated with the request
328 * @flags request flags
329 * @force_nonblock: whether a context blocking or not
330 * @sq_thread: true if sq_thread has submitted this SQE
331 *
332 * Allows to track SQE submitting, to understand what was the source of it, SQ
333 * thread or io_uring_enter call.
334 */
335TRACE_EVENT(io_uring_submit_sqe,
336
337 TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags,
338 bool force_nonblock, bool sq_thread),
339
340 TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread),
341
342 TP_STRUCT__entry (
343 __field( void *, ctx )
344 __field( void *, req )
345 __field( u8, opcode )
346 __field( u64, user_data )
347 __field( u32, flags )
348 __field( bool, force_nonblock )
349 __field( bool, sq_thread )
350 ),
351
352 TP_fast_assign(
353 __entry->ctx = ctx;
354 __entry->req = req;
355 __entry->opcode = opcode;
356 __entry->user_data = user_data;
357 __entry->flags = flags;
358 __entry->force_nonblock = force_nonblock;
359 __entry->sq_thread = sq_thread;
360 ),
361
362 TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, "
363 "non block %d, sq_thread %d", __entry->ctx, __entry->req,
364 __entry->opcode, (unsigned long long)__entry->user_data,
365 __entry->flags, __entry->force_nonblock, __entry->sq_thread)
366);
367
368/*
369 * io_uring_poll_arm - called after arming a poll wait if successful
370 *
371 * @ctx: pointer to a ring context structure
372 * @req: pointer to the armed request
373 * @opcode: opcode of request
374 * @user_data: user data associated with the request
375 * @mask: request poll events mask
376 * @events: registered events of interest
377 *
378 * Allows to track which fds are waiting for and what are the events of
379 * interest.
380 */
381TRACE_EVENT(io_uring_poll_arm,
382
383 TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data,
384 int mask, int events),
385
386 TP_ARGS(ctx, req, opcode, user_data, mask, events),
387
388 TP_STRUCT__entry (
389 __field( void *, ctx )
390 __field( void *, req )
391 __field( u8, opcode )
392 __field( u64, user_data )
393 __field( int, mask )
394 __field( int, events )
395 ),
396
397 TP_fast_assign(
398 __entry->ctx = ctx;
399 __entry->req = req;
400 __entry->opcode = opcode;
401 __entry->user_data = user_data;
402 __entry->mask = mask;
403 __entry->events = events;
404 ),
405
406 TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
407 __entry->ctx, __entry->req, __entry->opcode,
408 (unsigned long long) __entry->user_data,
409 __entry->mask, __entry->events)
410);
411
412TRACE_EVENT(io_uring_poll_wake,
413
414 TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
415
416 TP_ARGS(ctx, opcode, user_data, mask),
417
418 TP_STRUCT__entry (
419 __field( void *, ctx )
420 __field( u8, opcode )
421 __field( u64, user_data )
422 __field( int, mask )
423 ),
424
425 TP_fast_assign(
426 __entry->ctx = ctx;
427 __entry->opcode = opcode;
428 __entry->user_data = user_data;
429 __entry->mask = mask;
430 ),
431
432 TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
433 __entry->ctx, __entry->opcode,
434 (unsigned long long) __entry->user_data,
435 __entry->mask)
436);
437
438TRACE_EVENT(io_uring_task_add,
439
440 TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
441
442 TP_ARGS(ctx, opcode, user_data, mask),
443
444 TP_STRUCT__entry (
445 __field( void *, ctx )
446 __field( u8, opcode )
447 __field( u64, user_data )
448 __field( int, mask )
449 ),
450
451 TP_fast_assign(
452 __entry->ctx = ctx;
453 __entry->opcode = opcode;
454 __entry->user_data = user_data;
455 __entry->mask = mask;
456 ),
457
458 TP_printk("ring %p, op %d, data 0x%llx, mask %x",
459 __entry->ctx, __entry->opcode,
460 (unsigned long long) __entry->user_data,
461 __entry->mask)
462);
463
464/*
465 * io_uring_task_run - called when task_work_run() executes the poll events
466 * notification callbacks
467 *
468 * @ctx: pointer to a ring context structure
469 * @req: pointer to the armed request
470 * @opcode: opcode of request
471 * @user_data: user data associated with the request
472 *
473 * Allows to track when notified poll events are processed
474 */
475TRACE_EVENT(io_uring_task_run,
476
477 TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data),
478
479 TP_ARGS(ctx, req, opcode, user_data),
480
481 TP_STRUCT__entry (
482 __field( void *, ctx )
483 __field( void *, req )
484 __field( u8, opcode )
485 __field( u64, user_data )
486 ),
487
488 TP_fast_assign(
489 __entry->ctx = ctx;
490 __entry->req = req;
491 __entry->opcode = opcode;
492 __entry->user_data = user_data;
493 ),
494
495 TP_printk("ring %p, req %p, op %d, data 0x%llx",
496 __entry->ctx, __entry->req, __entry->opcode,
497 (unsigned long long) __entry->user_data)
498);
499
500#endif /* _TRACE_IO_URING_H */
501
502/* This part must be outside protection */
503#include <trace/define_trace.h>