Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM io_uring
4
5#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_IO_URING_H
7
8#include <linux/tracepoint.h>
9#include <uapi/linux/io_uring.h>
10#include <linux/io_uring_types.h>
11#include <linux/io_uring.h>
12
13struct io_wq_work;
14
15/**
16 * io_uring_create - called after a new io_uring context was prepared
17 *
18 * @fd: corresponding file descriptor
19 * @ctx: pointer to a ring context structure
20 * @sq_entries: actual SQ size
21 * @cq_entries: actual CQ size
22 * @flags: SQ ring flags, provided to io_uring_setup(2)
23 *
24 * Allows to trace io_uring creation and provide pointer to a context, that can
25 * be used later to find correlated events.
26 */
27TRACE_EVENT(io_uring_create,
28
29 TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
30
31 TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
32
33 TP_STRUCT__entry (
34 __field( int, fd )
35 __field( void *, ctx )
36 __field( u32, sq_entries )
37 __field( u32, cq_entries )
38 __field( u32, flags )
39 ),
40
41 TP_fast_assign(
42 __entry->fd = fd;
43 __entry->ctx = ctx;
44 __entry->sq_entries = sq_entries;
45 __entry->cq_entries = cq_entries;
46 __entry->flags = flags;
47 ),
48
49 TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
50 __entry->ctx, __entry->fd, __entry->sq_entries,
51 __entry->cq_entries, __entry->flags)
52);
53
54/**
55 * io_uring_register - called after a buffer/file/eventfd was successfully
56 * registered for a ring
57 *
58 * @ctx: pointer to a ring context structure
59 * @opcode: describes which operation to perform
60 * @nr_user_files: number of registered files
61 * @nr_user_bufs: number of registered buffers
62 * @ret: return code
63 *
64 * Allows to trace fixed files/buffers, that could be registered to
65 * avoid an overhead of getting references to them for every operation. This
66 * event, together with io_uring_file_get, can provide a full picture of how
67 * much overhead one can reduce via fixing.
68 */
69TRACE_EVENT(io_uring_register,
70
71 TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
72 unsigned nr_bufs, long ret),
73
74 TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
75
76 TP_STRUCT__entry (
77 __field( void *, ctx )
78 __field( unsigned, opcode )
79 __field( unsigned, nr_files)
80 __field( unsigned, nr_bufs )
81 __field( long, ret )
82 ),
83
84 TP_fast_assign(
85 __entry->ctx = ctx;
86 __entry->opcode = opcode;
87 __entry->nr_files = nr_files;
88 __entry->nr_bufs = nr_bufs;
89 __entry->ret = ret;
90 ),
91
92 TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
93 "ret %ld",
94 __entry->ctx, __entry->opcode, __entry->nr_files,
95 __entry->nr_bufs, __entry->ret)
96);
97
98/**
99 * io_uring_file_get - called before getting references to an SQE file
100 *
101 * @req: pointer to a submitted request
102 * @fd: SQE file descriptor
103 *
104 * Allows to trace out how often an SQE file reference is obtained, which can
105 * help figuring out if it makes sense to use fixed files, or check that fixed
106 * files are used correctly.
107 */
108TRACE_EVENT(io_uring_file_get,
109
110 TP_PROTO(struct io_kiocb *req, int fd),
111
112 TP_ARGS(req, fd),
113
114 TP_STRUCT__entry (
115 __field( void *, ctx )
116 __field( void *, req )
117 __field( u64, user_data )
118 __field( int, fd )
119 ),
120
121 TP_fast_assign(
122 __entry->ctx = req->ctx;
123 __entry->req = req;
124 __entry->user_data = req->cqe.user_data;
125 __entry->fd = fd;
126 ),
127
128 TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
129 __entry->ctx, __entry->req, __entry->user_data, __entry->fd)
130);
131
132/**
133 * io_uring_queue_async_work - called before submitting a new async work
134 *
135 * @req: pointer to a submitted request
136 * @rw: type of workqueue, hashed or normal
137 *
138 * Allows to trace asynchronous work submission.
139 */
140TRACE_EVENT(io_uring_queue_async_work,
141
142 TP_PROTO(struct io_kiocb *req, int rw),
143
144 TP_ARGS(req, rw),
145
146 TP_STRUCT__entry (
147 __field( void *, ctx )
148 __field( void *, req )
149 __field( u64, user_data )
150 __field( u8, opcode )
151 __field( unsigned int, flags )
152 __field( struct io_wq_work *, work )
153 __field( int, rw )
154
155 __string( op_str, io_uring_get_opcode(req->opcode) )
156 ),
157
158 TP_fast_assign(
159 __entry->ctx = req->ctx;
160 __entry->req = req;
161 __entry->user_data = req->cqe.user_data;
162 __entry->flags = req->flags;
163 __entry->opcode = req->opcode;
164 __entry->work = &req->work;
165 __entry->rw = rw;
166
167 __assign_str(op_str, io_uring_get_opcode(req->opcode));
168 ),
169
170 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
171 __entry->ctx, __entry->req, __entry->user_data,
172 __get_str(op_str),
173 __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
174);
175
176/**
177 * io_uring_defer - called when an io_uring request is deferred
178 *
179 * @req: pointer to a deferred request
180 *
181 * Allows to track deferred requests, to get an insight about what requests are
182 * not started immediately.
183 */
184TRACE_EVENT(io_uring_defer,
185
186 TP_PROTO(struct io_kiocb *req),
187
188 TP_ARGS(req),
189
190 TP_STRUCT__entry (
191 __field( void *, ctx )
192 __field( void *, req )
193 __field( unsigned long long, data )
194 __field( u8, opcode )
195
196 __string( op_str, io_uring_get_opcode(req->opcode) )
197 ),
198
199 TP_fast_assign(
200 __entry->ctx = req->ctx;
201 __entry->req = req;
202 __entry->data = req->cqe.user_data;
203 __entry->opcode = req->opcode;
204
205 __assign_str(op_str, io_uring_get_opcode(req->opcode));
206 ),
207
208 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
209 __entry->ctx, __entry->req, __entry->data,
210 __get_str(op_str))
211);
212
213/**
214 * io_uring_link - called before the io_uring request added into link_list of
215 * another request
216 *
217 * @req: pointer to a linked request
218 * @target_req: pointer to a previous request, that would contain @req
219 *
220 * Allows to track linked requests, to understand dependencies between requests
221 * and how does it influence their execution flow.
222 */
223TRACE_EVENT(io_uring_link,
224
225 TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req),
226
227 TP_ARGS(req, target_req),
228
229 TP_STRUCT__entry (
230 __field( void *, ctx )
231 __field( void *, req )
232 __field( void *, target_req )
233 ),
234
235 TP_fast_assign(
236 __entry->ctx = req->ctx;
237 __entry->req = req;
238 __entry->target_req = target_req;
239 ),
240
241 TP_printk("ring %p, request %p linked after %p",
242 __entry->ctx, __entry->req, __entry->target_req)
243);
244
245/**
246 * io_uring_cqring_wait - called before start waiting for an available CQE
247 *
248 * @ctx: pointer to a ring context structure
249 * @min_events: minimal number of events to wait for
250 *
251 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
252 * situations, when an application wants to wait for an event, that never
253 * comes.
254 */
255TRACE_EVENT(io_uring_cqring_wait,
256
257 TP_PROTO(void *ctx, int min_events),
258
259 TP_ARGS(ctx, min_events),
260
261 TP_STRUCT__entry (
262 __field( void *, ctx )
263 __field( int, min_events )
264 ),
265
266 TP_fast_assign(
267 __entry->ctx = ctx;
268 __entry->min_events = min_events;
269 ),
270
271 TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
272);
273
274/**
275 * io_uring_fail_link - called before failing a linked request
276 *
277 * @req: request, which links were cancelled
278 * @link: cancelled link
279 *
280 * Allows to track linked requests cancellation, to see not only that some work
281 * was cancelled, but also which request was the reason.
282 */
283TRACE_EVENT(io_uring_fail_link,
284
285 TP_PROTO(struct io_kiocb *req, struct io_kiocb *link),
286
287 TP_ARGS(req, link),
288
289 TP_STRUCT__entry (
290 __field( void *, ctx )
291 __field( void *, req )
292 __field( unsigned long long, user_data )
293 __field( u8, opcode )
294 __field( void *, link )
295
296 __string( op_str, io_uring_get_opcode(req->opcode) )
297 ),
298
299 TP_fast_assign(
300 __entry->ctx = req->ctx;
301 __entry->req = req;
302 __entry->user_data = req->cqe.user_data;
303 __entry->opcode = req->opcode;
304 __entry->link = link;
305
306 __assign_str(op_str, io_uring_get_opcode(req->opcode));
307 ),
308
309 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
310 __entry->ctx, __entry->req, __entry->user_data,
311 __get_str(op_str), __entry->link)
312);
313
314/**
315 * io_uring_complete - called when completing an SQE
316 *
317 * @ctx: pointer to a ring context structure
318 * @req: pointer to a submitted request
319 * @user_data: user data associated with the request
320 * @res: result of the request
321 * @cflags: completion flags
322 * @extra1: extra 64-bit data for CQE32
323 * @extra2: extra 64-bit data for CQE32
324 *
325 */
326TRACE_EVENT(io_uring_complete,
327
328 TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
329 u64 extra1, u64 extra2),
330
331 TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
332
333 TP_STRUCT__entry (
334 __field( void *, ctx )
335 __field( void *, req )
336 __field( u64, user_data )
337 __field( int, res )
338 __field( unsigned, cflags )
339 __field( u64, extra1 )
340 __field( u64, extra2 )
341 ),
342
343 TP_fast_assign(
344 __entry->ctx = ctx;
345 __entry->req = req;
346 __entry->user_data = user_data;
347 __entry->res = res;
348 __entry->cflags = cflags;
349 __entry->extra1 = extra1;
350 __entry->extra2 = extra2;
351 ),
352
353 TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
354 "extra1 %llu extra2 %llu ",
355 __entry->ctx, __entry->req,
356 __entry->user_data,
357 __entry->res, __entry->cflags,
358 (unsigned long long) __entry->extra1,
359 (unsigned long long) __entry->extra2)
360);
361
362/**
363 * io_uring_submit_sqe - called before submitting one SQE
364 *
365 * @req: pointer to a submitted request
366 * @force_nonblock: whether a context blocking or not
367 *
368 * Allows to track SQE submitting, to understand what was the source of it, SQ
369 * thread or io_uring_enter call.
370 */
371TRACE_EVENT(io_uring_submit_sqe,
372
373 TP_PROTO(struct io_kiocb *req, bool force_nonblock),
374
375 TP_ARGS(req, force_nonblock),
376
377 TP_STRUCT__entry (
378 __field( void *, ctx )
379 __field( void *, req )
380 __field( unsigned long long, user_data )
381 __field( u8, opcode )
382 __field( u32, flags )
383 __field( bool, force_nonblock )
384 __field( bool, sq_thread )
385
386 __string( op_str, io_uring_get_opcode(req->opcode) )
387 ),
388
389 TP_fast_assign(
390 __entry->ctx = req->ctx;
391 __entry->req = req;
392 __entry->user_data = req->cqe.user_data;
393 __entry->opcode = req->opcode;
394 __entry->flags = req->flags;
395 __entry->force_nonblock = force_nonblock;
396 __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
397
398 __assign_str(op_str, io_uring_get_opcode(req->opcode));
399 ),
400
401 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
402 "non block %d, sq_thread %d", __entry->ctx, __entry->req,
403 __entry->user_data, __get_str(op_str),
404 __entry->flags, __entry->force_nonblock, __entry->sq_thread)
405);
406
407/*
408 * io_uring_poll_arm - called after arming a poll wait if successful
409 *
410 * @req: pointer to the armed request
411 * @mask: request poll events mask
412 * @events: registered events of interest
413 *
414 * Allows to track which fds are waiting for and what are the events of
415 * interest.
416 */
417TRACE_EVENT(io_uring_poll_arm,
418
419 TP_PROTO(struct io_kiocb *req, int mask, int events),
420
421 TP_ARGS(req, mask, events),
422
423 TP_STRUCT__entry (
424 __field( void *, ctx )
425 __field( void *, req )
426 __field( unsigned long long, user_data )
427 __field( u8, opcode )
428 __field( int, mask )
429 __field( int, events )
430
431 __string( op_str, io_uring_get_opcode(req->opcode) )
432 ),
433
434 TP_fast_assign(
435 __entry->ctx = req->ctx;
436 __entry->req = req;
437 __entry->user_data = req->cqe.user_data;
438 __entry->opcode = req->opcode;
439 __entry->mask = mask;
440 __entry->events = events;
441
442 __assign_str(op_str, io_uring_get_opcode(req->opcode));
443 ),
444
445 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
446 __entry->ctx, __entry->req, __entry->user_data,
447 __get_str(op_str),
448 __entry->mask, __entry->events)
449);
450
451/*
452 * io_uring_task_add - called after adding a task
453 *
454 * @req: pointer to request
455 * @mask: request poll events mask
456 *
457 */
458TRACE_EVENT(io_uring_task_add,
459
460 TP_PROTO(struct io_kiocb *req, int mask),
461
462 TP_ARGS(req, mask),
463
464 TP_STRUCT__entry (
465 __field( void *, ctx )
466 __field( void *, req )
467 __field( unsigned long long, user_data )
468 __field( u8, opcode )
469 __field( int, mask )
470
471 __string( op_str, io_uring_get_opcode(req->opcode) )
472 ),
473
474 TP_fast_assign(
475 __entry->ctx = req->ctx;
476 __entry->req = req;
477 __entry->user_data = req->cqe.user_data;
478 __entry->opcode = req->opcode;
479 __entry->mask = mask;
480
481 __assign_str(op_str, io_uring_get_opcode(req->opcode));
482 ),
483
484 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
485 __entry->ctx, __entry->req, __entry->user_data,
486 __get_str(op_str),
487 __entry->mask)
488);
489
490/*
491 * io_uring_req_failed - called when an sqe is errored dring submission
492 *
493 * @sqe: pointer to the io_uring_sqe that failed
494 * @req: pointer to request
495 * @error: error it failed with
496 *
497 * Allows easier diagnosing of malformed requests in production systems.
498 */
499TRACE_EVENT(io_uring_req_failed,
500
501 TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
502
503 TP_ARGS(sqe, req, error),
504
505 TP_STRUCT__entry (
506 __field( void *, ctx )
507 __field( void *, req )
508 __field( unsigned long long, user_data )
509 __field( u8, opcode )
510 __field( u8, flags )
511 __field( u8, ioprio )
512 __field( u64, off )
513 __field( u64, addr )
514 __field( u32, len )
515 __field( u32, op_flags )
516 __field( u16, buf_index )
517 __field( u16, personality )
518 __field( u32, file_index )
519 __field( u64, pad1 )
520 __field( u64, addr3 )
521 __field( int, error )
522
523 __string( op_str, io_uring_get_opcode(sqe->opcode) )
524 ),
525
526 TP_fast_assign(
527 __entry->ctx = req->ctx;
528 __entry->req = req;
529 __entry->user_data = sqe->user_data;
530 __entry->opcode = sqe->opcode;
531 __entry->flags = sqe->flags;
532 __entry->ioprio = sqe->ioprio;
533 __entry->off = sqe->off;
534 __entry->addr = sqe->addr;
535 __entry->len = sqe->len;
536 __entry->op_flags = sqe->poll32_events;
537 __entry->buf_index = sqe->buf_index;
538 __entry->personality = sqe->personality;
539 __entry->file_index = sqe->file_index;
540 __entry->pad1 = sqe->__pad2[0];
541 __entry->addr3 = sqe->addr3;
542 __entry->error = error;
543
544 __assign_str(op_str, io_uring_get_opcode(sqe->opcode));
545 ),
546
547 TP_printk("ring %p, req %p, user_data 0x%llx, "
548 "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
549 "len=%u, rw_flags=0x%x, buf_index=%d, "
550 "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
551 "error=%d",
552 __entry->ctx, __entry->req, __entry->user_data,
553 __get_str(op_str),
554 __entry->flags, __entry->ioprio,
555 (unsigned long long)__entry->off,
556 (unsigned long long) __entry->addr, __entry->len,
557 __entry->op_flags,
558 __entry->buf_index, __entry->personality, __entry->file_index,
559 (unsigned long long) __entry->pad1,
560 (unsigned long long) __entry->addr3, __entry->error)
561);
562
563
564/*
565 * io_uring_cqe_overflow - a CQE overflowed
566 *
567 * @ctx: pointer to a ring context structure
568 * @user_data: user data associated with the request
569 * @res: CQE result
570 * @cflags: CQE flags
571 * @ocqe: pointer to the overflow cqe (if available)
572 *
573 */
574TRACE_EVENT(io_uring_cqe_overflow,
575
576 TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags,
577 void *ocqe),
578
579 TP_ARGS(ctx, user_data, res, cflags, ocqe),
580
581 TP_STRUCT__entry (
582 __field( void *, ctx )
583 __field( unsigned long long, user_data )
584 __field( s32, res )
585 __field( u32, cflags )
586 __field( void *, ocqe )
587 ),
588
589 TP_fast_assign(
590 __entry->ctx = ctx;
591 __entry->user_data = user_data;
592 __entry->res = res;
593 __entry->cflags = cflags;
594 __entry->ocqe = ocqe;
595 ),
596
597 TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, "
598 "overflow_cqe %p",
599 __entry->ctx, __entry->user_data, __entry->res,
600 __entry->cflags, __entry->ocqe)
601);
602
603/*
604 * io_uring_task_work_run - ran task work
605 *
606 * @tctx: pointer to a io_uring_task
607 * @count: how many functions it ran
608 * @loops: how many loops it ran
609 *
610 */
611TRACE_EVENT(io_uring_task_work_run,
612
613 TP_PROTO(void *tctx, unsigned int count, unsigned int loops),
614
615 TP_ARGS(tctx, count, loops),
616
617 TP_STRUCT__entry (
618 __field( void *, tctx )
619 __field( unsigned int, count )
620 __field( unsigned int, loops )
621 ),
622
623 TP_fast_assign(
624 __entry->tctx = tctx;
625 __entry->count = count;
626 __entry->loops = loops;
627 ),
628
629 TP_printk("tctx %p, count %u, loops %u",
630 __entry->tctx, __entry->count, __entry->loops)
631);
632
633TRACE_EVENT(io_uring_short_write,
634
635 TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got),
636
637 TP_ARGS(ctx, fpos, wanted, got),
638
639 TP_STRUCT__entry(
640 __field(void *, ctx)
641 __field(u64, fpos)
642 __field(u64, wanted)
643 __field(u64, got)
644 ),
645
646 TP_fast_assign(
647 __entry->ctx = ctx;
648 __entry->fpos = fpos;
649 __entry->wanted = wanted;
650 __entry->got = got;
651 ),
652
653 TP_printk("ring %p, fpos %lld, wanted %lld, got %lld",
654 __entry->ctx, __entry->fpos,
655 __entry->wanted, __entry->got)
656);
657
658/*
659 * io_uring_local_work_run - ran ring local task work
660 *
661 * @tctx: pointer to a io_uring_ctx
662 * @count: how many functions it ran
663 * @loops: how many loops it ran
664 *
665 */
666TRACE_EVENT(io_uring_local_work_run,
667
668 TP_PROTO(void *ctx, int count, unsigned int loops),
669
670 TP_ARGS(ctx, count, loops),
671
672 TP_STRUCT__entry (
673 __field(void *, ctx )
674 __field(int, count )
675 __field(unsigned int, loops )
676 ),
677
678 TP_fast_assign(
679 __entry->ctx = ctx;
680 __entry->count = count;
681 __entry->loops = loops;
682 ),
683
684 TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
685);
686
687#endif /* _TRACE_IO_URING_H */
688
689/* This part must be outside protection */
690#include <trace/define_trace.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM io_uring
4
5#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_IO_URING_H
7
8#include <linux/tracepoint.h>
9
10struct io_wq_work;
11
12/**
13 * io_uring_create - called after a new io_uring context was prepared
14 *
15 * @fd: corresponding file descriptor
16 * @ctx: pointer to a ring context structure
17 * @sq_entries: actual SQ size
18 * @cq_entries: actual CQ size
19 * @flags: SQ ring flags, provided to io_uring_setup(2)
20 *
21 * Allows to trace io_uring creation and provide pointer to a context, that can
22 * be used later to find correlated events.
23 */
24TRACE_EVENT(io_uring_create,
25
26 TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
27
28 TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
29
30 TP_STRUCT__entry (
31 __field( int, fd )
32 __field( void *, ctx )
33 __field( u32, sq_entries )
34 __field( u32, cq_entries )
35 __field( u32, flags )
36 ),
37
38 TP_fast_assign(
39 __entry->fd = fd;
40 __entry->ctx = ctx;
41 __entry->sq_entries = sq_entries;
42 __entry->cq_entries = cq_entries;
43 __entry->flags = flags;
44 ),
45
46 TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
47 __entry->ctx, __entry->fd, __entry->sq_entries,
48 __entry->cq_entries, __entry->flags)
49);
50
51/**
52 * io_uring_register - called after a buffer/file/eventfd was successfully
53 * registered for a ring
54 *
55 * @ctx: pointer to a ring context structure
56 * @opcode: describes which operation to perform
57 * @nr_user_files: number of registered files
58 * @nr_user_bufs: number of registered buffers
59 * @cq_ev_fd: whether eventfs registered or not
60 * @ret: return code
61 *
62 * Allows to trace fixed files/buffers/eventfds, that could be registered to
63 * avoid an overhead of getting references to them for every operation. This
64 * event, together with io_uring_file_get, can provide a full picture of how
65 * much overhead one can reduce via fixing.
66 */
67TRACE_EVENT(io_uring_register,
68
69 TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
70 unsigned nr_bufs, bool eventfd, long ret),
71
72 TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
73
74 TP_STRUCT__entry (
75 __field( void *, ctx )
76 __field( unsigned, opcode )
77 __field( unsigned, nr_files )
78 __field( unsigned, nr_bufs )
79 __field( bool, eventfd )
80 __field( long, ret )
81 ),
82
83 TP_fast_assign(
84 __entry->ctx = ctx;
85 __entry->opcode = opcode;
86 __entry->nr_files = nr_files;
87 __entry->nr_bufs = nr_bufs;
88 __entry->eventfd = eventfd;
89 __entry->ret = ret;
90 ),
91
92 TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
93 "eventfd %d, ret %ld",
94 __entry->ctx, __entry->opcode, __entry->nr_files,
95 __entry->nr_bufs, __entry->eventfd, __entry->ret)
96);
97
98/**
99 * io_uring_file_get - called before getting references to an SQE file
100 *
101 * @ctx: pointer to a ring context structure
102 * @fd: SQE file descriptor
103 *
104 * Allows to trace out how often an SQE file reference is obtained, which can
105 * help figuring out if it makes sense to use fixed files, or check that fixed
106 * files are used correctly.
107 */
108TRACE_EVENT(io_uring_file_get,
109
110 TP_PROTO(void *ctx, int fd),
111
112 TP_ARGS(ctx, fd),
113
114 TP_STRUCT__entry (
115 __field( void *, ctx )
116 __field( int, fd )
117 ),
118
119 TP_fast_assign(
120 __entry->ctx = ctx;
121 __entry->fd = fd;
122 ),
123
124 TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
125);
126
127/**
128 * io_uring_queue_async_work - called before submitting a new async work
129 *
130 * @ctx: pointer to a ring context structure
131 * @hashed: type of workqueue, hashed or normal
132 * @req: pointer to a submitted request
133 * @work: pointer to a submitted io_wq_work
134 *
135 * Allows to trace asynchronous work submission.
136 */
137TRACE_EVENT(io_uring_queue_async_work,
138
139 TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
140 unsigned int flags),
141
142 TP_ARGS(ctx, rw, req, work, flags),
143
144 TP_STRUCT__entry (
145 __field( void *, ctx )
146 __field( int, rw )
147 __field( void *, req )
148 __field( struct io_wq_work *, work )
149 __field( unsigned int, flags )
150 ),
151
152 TP_fast_assign(
153 __entry->ctx = ctx;
154 __entry->rw = rw;
155 __entry->req = req;
156 __entry->work = work;
157 __entry->flags = flags;
158 ),
159
160 TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
161 __entry->ctx, __entry->req, __entry->flags,
162 __entry->rw ? "hashed" : "normal", __entry->work)
163);
164
165/**
166 * io_uring_defer - called when an io_uring request is deferred
167 *
168 * @ctx: pointer to a ring context structure
169 * @req: pointer to a deferred request
170 * @user_data: user data associated with the request
171 *
172 * Allows to track deferred requests, to get an insight about what requests are
173 * not started immediately.
174 */
175TRACE_EVENT(io_uring_defer,
176
177 TP_PROTO(void *ctx, void *req, unsigned long long user_data),
178
179 TP_ARGS(ctx, req, user_data),
180
181 TP_STRUCT__entry (
182 __field( void *, ctx )
183 __field( void *, req )
184 __field( unsigned long long, data )
185 ),
186
187 TP_fast_assign(
188 __entry->ctx = ctx;
189 __entry->req = req;
190 __entry->data = user_data;
191 ),
192
193 TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
194 __entry->req, __entry->data)
195);
196
197/**
198 * io_uring_link - called before the io_uring request added into link_list of
199 * another request
200 *
201 * @ctx: pointer to a ring context structure
202 * @req: pointer to a linked request
203 * @target_req: pointer to a previous request, that would contain @req
204 *
205 * Allows to track linked requests, to understand dependencies between requests
206 * and how does it influence their execution flow.
207 */
208TRACE_EVENT(io_uring_link,
209
210 TP_PROTO(void *ctx, void *req, void *target_req),
211
212 TP_ARGS(ctx, req, target_req),
213
214 TP_STRUCT__entry (
215 __field( void *, ctx )
216 __field( void *, req )
217 __field( void *, target_req )
218 ),
219
220 TP_fast_assign(
221 __entry->ctx = ctx;
222 __entry->req = req;
223 __entry->target_req = target_req;
224 ),
225
226 TP_printk("ring %p, request %p linked after %p",
227 __entry->ctx, __entry->req, __entry->target_req)
228);
229
230/**
231 * io_uring_cqring_wait - called before start waiting for an available CQE
232 *
233 * @ctx: pointer to a ring context structure
234 * @min_events: minimal number of events to wait for
235 *
236 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
237 * situations, when an application wants to wait for an event, that never
238 * comes.
239 */
240TRACE_EVENT(io_uring_cqring_wait,
241
242 TP_PROTO(void *ctx, int min_events),
243
244 TP_ARGS(ctx, min_events),
245
246 TP_STRUCT__entry (
247 __field( void *, ctx )
248 __field( int, min_events )
249 ),
250
251 TP_fast_assign(
252 __entry->ctx = ctx;
253 __entry->min_events = min_events;
254 ),
255
256 TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
257);
258
259/**
260 * io_uring_fail_link - called before failing a linked request
261 *
262 * @req: request, which links were cancelled
263 * @link: cancelled link
264 *
265 * Allows to track linked requests cancellation, to see not only that some work
266 * was cancelled, but also which request was the reason.
267 */
268TRACE_EVENT(io_uring_fail_link,
269
270 TP_PROTO(void *req, void *link),
271
272 TP_ARGS(req, link),
273
274 TP_STRUCT__entry (
275 __field( void *, req )
276 __field( void *, link )
277 ),
278
279 TP_fast_assign(
280 __entry->req = req;
281 __entry->link = link;
282 ),
283
284 TP_printk("request %p, link %p", __entry->req, __entry->link)
285);
286
287/**
288 * io_uring_complete - called when completing an SQE
289 *
290 * @ctx: pointer to a ring context structure
291 * @user_data: user data associated with the request
292 * @res: result of the request
293 * @cflags: completion flags
294 *
295 */
296TRACE_EVENT(io_uring_complete,
297
298 TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags),
299
300 TP_ARGS(ctx, user_data, res, cflags),
301
302 TP_STRUCT__entry (
303 __field( void *, ctx )
304 __field( u64, user_data )
305 __field( int, res )
306 __field( unsigned, cflags )
307 ),
308
309 TP_fast_assign(
310 __entry->ctx = ctx;
311 __entry->user_data = user_data;
312 __entry->res = res;
313 __entry->cflags = cflags;
314 ),
315
316 TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x",
317 __entry->ctx, (unsigned long long)__entry->user_data,
318 __entry->res, __entry->cflags)
319);
320
321/**
322 * io_uring_submit_sqe - called before submitting one SQE
323 *
324 * @ctx: pointer to a ring context structure
325 * @req: pointer to a submitted request
326 * @opcode: opcode of request
327 * @user_data: user data associated with the request
328 * @flags request flags
329 * @force_nonblock: whether a context blocking or not
330 * @sq_thread: true if sq_thread has submitted this SQE
331 *
332 * Allows to track SQE submitting, to understand what was the source of it, SQ
333 * thread or io_uring_enter call.
334 */
335TRACE_EVENT(io_uring_submit_sqe,
336
337 TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags,
338 bool force_nonblock, bool sq_thread),
339
340 TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread),
341
342 TP_STRUCT__entry (
343 __field( void *, ctx )
344 __field( void *, req )
345 __field( u8, opcode )
346 __field( u64, user_data )
347 __field( u32, flags )
348 __field( bool, force_nonblock )
349 __field( bool, sq_thread )
350 ),
351
352 TP_fast_assign(
353 __entry->ctx = ctx;
354 __entry->req = req;
355 __entry->opcode = opcode;
356 __entry->user_data = user_data;
357 __entry->flags = flags;
358 __entry->force_nonblock = force_nonblock;
359 __entry->sq_thread = sq_thread;
360 ),
361
362 TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, "
363 "non block %d, sq_thread %d", __entry->ctx, __entry->req,
364 __entry->opcode, (unsigned long long)__entry->user_data,
365 __entry->flags, __entry->force_nonblock, __entry->sq_thread)
366);
367
368/*
369 * io_uring_poll_arm - called after arming a poll wait if successful
370 *
371 * @ctx: pointer to a ring context structure
372 * @req: pointer to the armed request
373 * @opcode: opcode of request
374 * @user_data: user data associated with the request
375 * @mask: request poll events mask
376 * @events: registered events of interest
377 *
378 * Allows to track which fds are waiting for and what are the events of
379 * interest.
380 */
381TRACE_EVENT(io_uring_poll_arm,
382
383 TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data,
384 int mask, int events),
385
386 TP_ARGS(ctx, req, opcode, user_data, mask, events),
387
388 TP_STRUCT__entry (
389 __field( void *, ctx )
390 __field( void *, req )
391 __field( u8, opcode )
392 __field( u64, user_data )
393 __field( int, mask )
394 __field( int, events )
395 ),
396
397 TP_fast_assign(
398 __entry->ctx = ctx;
399 __entry->req = req;
400 __entry->opcode = opcode;
401 __entry->user_data = user_data;
402 __entry->mask = mask;
403 __entry->events = events;
404 ),
405
406 TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
407 __entry->ctx, __entry->req, __entry->opcode,
408 (unsigned long long) __entry->user_data,
409 __entry->mask, __entry->events)
410);
411
412TRACE_EVENT(io_uring_poll_wake,
413
414 TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
415
416 TP_ARGS(ctx, opcode, user_data, mask),
417
418 TP_STRUCT__entry (
419 __field( void *, ctx )
420 __field( u8, opcode )
421 __field( u64, user_data )
422 __field( int, mask )
423 ),
424
425 TP_fast_assign(
426 __entry->ctx = ctx;
427 __entry->opcode = opcode;
428 __entry->user_data = user_data;
429 __entry->mask = mask;
430 ),
431
432 TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
433 __entry->ctx, __entry->opcode,
434 (unsigned long long) __entry->user_data,
435 __entry->mask)
436);
437
438TRACE_EVENT(io_uring_task_add,
439
440 TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
441
442 TP_ARGS(ctx, opcode, user_data, mask),
443
444 TP_STRUCT__entry (
445 __field( void *, ctx )
446 __field( u8, opcode )
447 __field( u64, user_data )
448 __field( int, mask )
449 ),
450
451 TP_fast_assign(
452 __entry->ctx = ctx;
453 __entry->opcode = opcode;
454 __entry->user_data = user_data;
455 __entry->mask = mask;
456 ),
457
458 TP_printk("ring %p, op %d, data 0x%llx, mask %x",
459 __entry->ctx, __entry->opcode,
460 (unsigned long long) __entry->user_data,
461 __entry->mask)
462);
463
464/*
465 * io_uring_task_run - called when task_work_run() executes the poll events
466 * notification callbacks
467 *
468 * @ctx: pointer to a ring context structure
469 * @req: pointer to the armed request
470 * @opcode: opcode of request
471 * @user_data: user data associated with the request
472 *
473 * Allows to track when notified poll events are processed
474 */
475TRACE_EVENT(io_uring_task_run,
476
477 TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data),
478
479 TP_ARGS(ctx, req, opcode, user_data),
480
481 TP_STRUCT__entry (
482 __field( void *, ctx )
483 __field( void *, req )
484 __field( u8, opcode )
485 __field( u64, user_data )
486 ),
487
488 TP_fast_assign(
489 __entry->ctx = ctx;
490 __entry->req = req;
491 __entry->opcode = opcode;
492 __entry->user_data = user_data;
493 ),
494
495 TP_printk("ring %p, req %p, op %d, data 0x%llx",
496 __entry->ctx, __entry->req, __entry->opcode,
497 (unsigned long long) __entry->user_data)
498);
499
500#endif /* _TRACE_IO_URING_H */
501
502/* This part must be outside protection */
503#include <trace/define_trace.h>