Loading...
Note: File does not exist in v4.10.11.
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM io_uring
4
5#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_IO_URING_H
7
8#include <linux/tracepoint.h>
9#include <uapi/linux/io_uring.h>
10#include <linux/io_uring_types.h>
11#include <linux/io_uring.h>
12
13struct io_wq_work;
14
15/**
16 * io_uring_create - called after a new io_uring context was prepared
17 *
18 * @fd: corresponding file descriptor
19 * @ctx: pointer to a ring context structure
20 * @sq_entries: actual SQ size
21 * @cq_entries: actual CQ size
22 * @flags: SQ ring flags, provided to io_uring_setup(2)
23 *
24 * Allows to trace io_uring creation and provide pointer to a context, that can
25 * be used later to find correlated events.
26 */
27TRACE_EVENT(io_uring_create,
28
29 TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
30
31 TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
32
33 TP_STRUCT__entry (
34 __field( int, fd )
35 __field( void *, ctx )
36 __field( u32, sq_entries )
37 __field( u32, cq_entries )
38 __field( u32, flags )
39 ),
40
41 TP_fast_assign(
42 __entry->fd = fd;
43 __entry->ctx = ctx;
44 __entry->sq_entries = sq_entries;
45 __entry->cq_entries = cq_entries;
46 __entry->flags = flags;
47 ),
48
49 TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
50 __entry->ctx, __entry->fd, __entry->sq_entries,
51 __entry->cq_entries, __entry->flags)
52);
53
54/**
55 * io_uring_register - called after a buffer/file/eventfd was successfully
56 * registered for a ring
57 *
58 * @ctx: pointer to a ring context structure
59 * @opcode: describes which operation to perform
60 * @nr_user_files: number of registered files
61 * @nr_user_bufs: number of registered buffers
62 * @ret: return code
63 *
64 * Allows to trace fixed files/buffers, that could be registered to
65 * avoid an overhead of getting references to them for every operation. This
66 * event, together with io_uring_file_get, can provide a full picture of how
67 * much overhead one can reduce via fixing.
68 */
69TRACE_EVENT(io_uring_register,
70
71 TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
72 unsigned nr_bufs, long ret),
73
74 TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
75
76 TP_STRUCT__entry (
77 __field( void *, ctx )
78 __field( unsigned, opcode )
79 __field( unsigned, nr_files)
80 __field( unsigned, nr_bufs )
81 __field( long, ret )
82 ),
83
84 TP_fast_assign(
85 __entry->ctx = ctx;
86 __entry->opcode = opcode;
87 __entry->nr_files = nr_files;
88 __entry->nr_bufs = nr_bufs;
89 __entry->ret = ret;
90 ),
91
92 TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
93 "ret %ld",
94 __entry->ctx, __entry->opcode, __entry->nr_files,
95 __entry->nr_bufs, __entry->ret)
96);
97
98/**
99 * io_uring_file_get - called before getting references to an SQE file
100 *
101 * @req: pointer to a submitted request
102 * @fd: SQE file descriptor
103 *
104 * Allows to trace out how often an SQE file reference is obtained, which can
105 * help figuring out if it makes sense to use fixed files, or check that fixed
106 * files are used correctly.
107 */
108TRACE_EVENT(io_uring_file_get,
109
110 TP_PROTO(struct io_kiocb *req, int fd),
111
112 TP_ARGS(req, fd),
113
114 TP_STRUCT__entry (
115 __field( void *, ctx )
116 __field( void *, req )
117 __field( u64, user_data )
118 __field( int, fd )
119 ),
120
121 TP_fast_assign(
122 __entry->ctx = req->ctx;
123 __entry->req = req;
124 __entry->user_data = req->cqe.user_data;
125 __entry->fd = fd;
126 ),
127
128 TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
129 __entry->ctx, __entry->req, __entry->user_data, __entry->fd)
130);
131
132/**
133 * io_uring_queue_async_work - called before submitting a new async work
134 *
135 * @req: pointer to a submitted request
136 * @rw: type of workqueue, hashed or normal
137 *
138 * Allows to trace asynchronous work submission.
139 */
140TRACE_EVENT(io_uring_queue_async_work,
141
142 TP_PROTO(struct io_kiocb *req, int rw),
143
144 TP_ARGS(req, rw),
145
146 TP_STRUCT__entry (
147 __field( void *, ctx )
148 __field( void *, req )
149 __field( u64, user_data )
150 __field( u8, opcode )
151 __field( unsigned long long, flags )
152 __field( struct io_wq_work *, work )
153 __field( int, rw )
154
155 __string( op_str, io_uring_get_opcode(req->opcode) )
156 ),
157
158 TP_fast_assign(
159 __entry->ctx = req->ctx;
160 __entry->req = req;
161 __entry->user_data = req->cqe.user_data;
162 __entry->flags = (__force unsigned long long) req->flags;
163 __entry->opcode = req->opcode;
164 __entry->work = &req->work;
165 __entry->rw = rw;
166
167 __assign_str(op_str);
168 ),
169
170 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p",
171 __entry->ctx, __entry->req, __entry->user_data,
172 __get_str(op_str), __entry->flags,
173 __entry->rw ? "hashed" : "normal", __entry->work)
174);
175
176/**
177 * io_uring_defer - called when an io_uring request is deferred
178 *
179 * @req: pointer to a deferred request
180 *
181 * Allows to track deferred requests, to get an insight about what requests are
182 * not started immediately.
183 */
184TRACE_EVENT(io_uring_defer,
185
186 TP_PROTO(struct io_kiocb *req),
187
188 TP_ARGS(req),
189
190 TP_STRUCT__entry (
191 __field( void *, ctx )
192 __field( void *, req )
193 __field( unsigned long long, data )
194 __field( u8, opcode )
195
196 __string( op_str, io_uring_get_opcode(req->opcode) )
197 ),
198
199 TP_fast_assign(
200 __entry->ctx = req->ctx;
201 __entry->req = req;
202 __entry->data = req->cqe.user_data;
203 __entry->opcode = req->opcode;
204
205 __assign_str(op_str);
206 ),
207
208 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
209 __entry->ctx, __entry->req, __entry->data,
210 __get_str(op_str))
211);
212
213/**
214 * io_uring_link - called before the io_uring request added into link_list of
215 * another request
216 *
217 * @req: pointer to a linked request
218 * @target_req: pointer to a previous request, that would contain @req
219 *
220 * Allows to track linked requests, to understand dependencies between requests
221 * and how does it influence their execution flow.
222 */
223TRACE_EVENT(io_uring_link,
224
225 TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req),
226
227 TP_ARGS(req, target_req),
228
229 TP_STRUCT__entry (
230 __field( void *, ctx )
231 __field( void *, req )
232 __field( void *, target_req )
233 ),
234
235 TP_fast_assign(
236 __entry->ctx = req->ctx;
237 __entry->req = req;
238 __entry->target_req = target_req;
239 ),
240
241 TP_printk("ring %p, request %p linked after %p",
242 __entry->ctx, __entry->req, __entry->target_req)
243);
244
245/**
246 * io_uring_cqring_wait - called before start waiting for an available CQE
247 *
248 * @ctx: pointer to a ring context structure
249 * @min_events: minimal number of events to wait for
250 *
251 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
252 * situations, when an application wants to wait for an event, that never
253 * comes.
254 */
255TRACE_EVENT(io_uring_cqring_wait,
256
257 TP_PROTO(void *ctx, int min_events),
258
259 TP_ARGS(ctx, min_events),
260
261 TP_STRUCT__entry (
262 __field( void *, ctx )
263 __field( int, min_events )
264 ),
265
266 TP_fast_assign(
267 __entry->ctx = ctx;
268 __entry->min_events = min_events;
269 ),
270
271 TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
272);
273
274/**
275 * io_uring_fail_link - called before failing a linked request
276 *
277 * @req: request, which links were cancelled
278 * @link: cancelled link
279 *
280 * Allows to track linked requests cancellation, to see not only that some work
281 * was cancelled, but also which request was the reason.
282 */
283TRACE_EVENT(io_uring_fail_link,
284
285 TP_PROTO(struct io_kiocb *req, struct io_kiocb *link),
286
287 TP_ARGS(req, link),
288
289 TP_STRUCT__entry (
290 __field( void *, ctx )
291 __field( void *, req )
292 __field( unsigned long long, user_data )
293 __field( u8, opcode )
294 __field( void *, link )
295
296 __string( op_str, io_uring_get_opcode(req->opcode) )
297 ),
298
299 TP_fast_assign(
300 __entry->ctx = req->ctx;
301 __entry->req = req;
302 __entry->user_data = req->cqe.user_data;
303 __entry->opcode = req->opcode;
304 __entry->link = link;
305
306 __assign_str(op_str);
307 ),
308
309 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
310 __entry->ctx, __entry->req, __entry->user_data,
311 __get_str(op_str), __entry->link)
312);
313
314/**
315 * io_uring_complete - called when completing an SQE
316 *
317 * @ctx: pointer to a ring context structure
318 * @req: (optional) pointer to a submitted request
319 * @cqe: pointer to the filled in CQE being posted
320 */
321TRACE_EVENT(io_uring_complete,
322
323TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
324
325 TP_ARGS(ctx, req, cqe),
326
327 TP_STRUCT__entry (
328 __field( void *, ctx )
329 __field( void *, req )
330 __field( u64, user_data )
331 __field( int, res )
332 __field( unsigned, cflags )
333 __field( u64, extra1 )
334 __field( u64, extra2 )
335 ),
336
337 TP_fast_assign(
338 __entry->ctx = ctx;
339 __entry->req = req;
340 __entry->user_data = cqe->user_data;
341 __entry->res = cqe->res;
342 __entry->cflags = cqe->flags;
343 __entry->extra1 = io_ctx_cqe32(ctx) ? cqe->big_cqe[0] : 0;
344 __entry->extra2 = io_ctx_cqe32(ctx) ? cqe->big_cqe[1] : 0;
345 ),
346
347 TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
348 "extra1 %llu extra2 %llu ",
349 __entry->ctx, __entry->req,
350 __entry->user_data,
351 __entry->res, __entry->cflags,
352 (unsigned long long) __entry->extra1,
353 (unsigned long long) __entry->extra2)
354);
355
356/**
357 * io_uring_submit_req - called before submitting a request
358 *
359 * @req: pointer to a submitted request
360 *
361 * Allows to track SQE submitting, to understand what was the source of it, SQ
362 * thread or io_uring_enter call.
363 */
364TRACE_EVENT(io_uring_submit_req,
365
366 TP_PROTO(struct io_kiocb *req),
367
368 TP_ARGS(req),
369
370 TP_STRUCT__entry (
371 __field( void *, ctx )
372 __field( void *, req )
373 __field( unsigned long long, user_data )
374 __field( u8, opcode )
375 __field( unsigned long long, flags )
376 __field( bool, sq_thread )
377
378 __string( op_str, io_uring_get_opcode(req->opcode) )
379 ),
380
381 TP_fast_assign(
382 __entry->ctx = req->ctx;
383 __entry->req = req;
384 __entry->user_data = req->cqe.user_data;
385 __entry->opcode = req->opcode;
386 __entry->flags = (__force unsigned long long) req->flags;
387 __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
388
389 __assign_str(op_str);
390 ),
391
392 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%llx, "
393 "sq_thread %d", __entry->ctx, __entry->req,
394 __entry->user_data, __get_str(op_str), __entry->flags,
395 __entry->sq_thread)
396);
397
398/*
399 * io_uring_poll_arm - called after arming a poll wait if successful
400 *
401 * @req: pointer to the armed request
402 * @mask: request poll events mask
403 * @events: registered events of interest
404 *
405 * Allows to track which fds are waiting for and what are the events of
406 * interest.
407 */
408TRACE_EVENT(io_uring_poll_arm,
409
410 TP_PROTO(struct io_kiocb *req, int mask, int events),
411
412 TP_ARGS(req, mask, events),
413
414 TP_STRUCT__entry (
415 __field( void *, ctx )
416 __field( void *, req )
417 __field( unsigned long long, user_data )
418 __field( u8, opcode )
419 __field( int, mask )
420 __field( int, events )
421
422 __string( op_str, io_uring_get_opcode(req->opcode) )
423 ),
424
425 TP_fast_assign(
426 __entry->ctx = req->ctx;
427 __entry->req = req;
428 __entry->user_data = req->cqe.user_data;
429 __entry->opcode = req->opcode;
430 __entry->mask = mask;
431 __entry->events = events;
432
433 __assign_str(op_str);
434 ),
435
436 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
437 __entry->ctx, __entry->req, __entry->user_data,
438 __get_str(op_str),
439 __entry->mask, __entry->events)
440);
441
442/*
443 * io_uring_task_add - called after adding a task
444 *
445 * @req: pointer to request
446 * @mask: request poll events mask
447 *
448 */
449TRACE_EVENT(io_uring_task_add,
450
451 TP_PROTO(struct io_kiocb *req, int mask),
452
453 TP_ARGS(req, mask),
454
455 TP_STRUCT__entry (
456 __field( void *, ctx )
457 __field( void *, req )
458 __field( unsigned long long, user_data )
459 __field( u8, opcode )
460 __field( int, mask )
461
462 __string( op_str, io_uring_get_opcode(req->opcode) )
463 ),
464
465 TP_fast_assign(
466 __entry->ctx = req->ctx;
467 __entry->req = req;
468 __entry->user_data = req->cqe.user_data;
469 __entry->opcode = req->opcode;
470 __entry->mask = mask;
471
472 __assign_str(op_str);
473 ),
474
475 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
476 __entry->ctx, __entry->req, __entry->user_data,
477 __get_str(op_str),
478 __entry->mask)
479);
480
481/*
482 * io_uring_req_failed - called when an sqe is errored dring submission
483 *
484 * @sqe: pointer to the io_uring_sqe that failed
485 * @req: pointer to request
486 * @error: error it failed with
487 *
488 * Allows easier diagnosing of malformed requests in production systems.
489 */
490TRACE_EVENT(io_uring_req_failed,
491
492 TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
493
494 TP_ARGS(sqe, req, error),
495
496 TP_STRUCT__entry (
497 __field( void *, ctx )
498 __field( void *, req )
499 __field( unsigned long long, user_data )
500 __field( u8, opcode )
501 __field( u8, flags )
502 __field( u8, ioprio )
503 __field( u64, off )
504 __field( u64, addr )
505 __field( u32, len )
506 __field( u32, op_flags )
507 __field( u16, buf_index )
508 __field( u16, personality )
509 __field( u32, file_index )
510 __field( u64, pad1 )
511 __field( u64, addr3 )
512 __field( int, error )
513
514 __string( op_str, io_uring_get_opcode(sqe->opcode) )
515 ),
516
517 TP_fast_assign(
518 __entry->ctx = req->ctx;
519 __entry->req = req;
520 __entry->user_data = sqe->user_data;
521 __entry->opcode = sqe->opcode;
522 __entry->flags = sqe->flags;
523 __entry->ioprio = sqe->ioprio;
524 __entry->off = sqe->off;
525 __entry->addr = sqe->addr;
526 __entry->len = sqe->len;
527 __entry->op_flags = sqe->poll32_events;
528 __entry->buf_index = sqe->buf_index;
529 __entry->personality = sqe->personality;
530 __entry->file_index = sqe->file_index;
531 __entry->pad1 = sqe->__pad2[0];
532 __entry->addr3 = sqe->addr3;
533 __entry->error = error;
534
535 __assign_str(op_str);
536 ),
537
538 TP_printk("ring %p, req %p, user_data 0x%llx, "
539 "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
540 "len=%u, rw_flags=0x%x, buf_index=%d, "
541 "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
542 "error=%d",
543 __entry->ctx, __entry->req, __entry->user_data,
544 __get_str(op_str),
545 __entry->flags, __entry->ioprio,
546 (unsigned long long)__entry->off,
547 (unsigned long long) __entry->addr, __entry->len,
548 __entry->op_flags,
549 __entry->buf_index, __entry->personality, __entry->file_index,
550 (unsigned long long) __entry->pad1,
551 (unsigned long long) __entry->addr3, __entry->error)
552);
553
554
555/*
556 * io_uring_cqe_overflow - a CQE overflowed
557 *
558 * @ctx: pointer to a ring context structure
559 * @user_data: user data associated with the request
560 * @res: CQE result
561 * @cflags: CQE flags
562 * @ocqe: pointer to the overflow cqe (if available)
563 *
564 */
565TRACE_EVENT(io_uring_cqe_overflow,
566
567 TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags,
568 void *ocqe),
569
570 TP_ARGS(ctx, user_data, res, cflags, ocqe),
571
572 TP_STRUCT__entry (
573 __field( void *, ctx )
574 __field( unsigned long long, user_data )
575 __field( s32, res )
576 __field( u32, cflags )
577 __field( void *, ocqe )
578 ),
579
580 TP_fast_assign(
581 __entry->ctx = ctx;
582 __entry->user_data = user_data;
583 __entry->res = res;
584 __entry->cflags = cflags;
585 __entry->ocqe = ocqe;
586 ),
587
588 TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, "
589 "overflow_cqe %p",
590 __entry->ctx, __entry->user_data, __entry->res,
591 __entry->cflags, __entry->ocqe)
592);
593
594/*
595 * io_uring_task_work_run - ran task work
596 *
597 * @tctx: pointer to a io_uring_task
598 * @count: how many functions it ran
599 *
600 */
601TRACE_EVENT(io_uring_task_work_run,
602
603 TP_PROTO(void *tctx, unsigned int count),
604
605 TP_ARGS(tctx, count),
606
607 TP_STRUCT__entry (
608 __field( void *, tctx )
609 __field( unsigned int, count )
610 ),
611
612 TP_fast_assign(
613 __entry->tctx = tctx;
614 __entry->count = count;
615 ),
616
617 TP_printk("tctx %p, count %u", __entry->tctx, __entry->count)
618);
619
620TRACE_EVENT(io_uring_short_write,
621
622 TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got),
623
624 TP_ARGS(ctx, fpos, wanted, got),
625
626 TP_STRUCT__entry(
627 __field(void *, ctx)
628 __field(u64, fpos)
629 __field(u64, wanted)
630 __field(u64, got)
631 ),
632
633 TP_fast_assign(
634 __entry->ctx = ctx;
635 __entry->fpos = fpos;
636 __entry->wanted = wanted;
637 __entry->got = got;
638 ),
639
640 TP_printk("ring %p, fpos %lld, wanted %lld, got %lld",
641 __entry->ctx, __entry->fpos,
642 __entry->wanted, __entry->got)
643);
644
645/*
646 * io_uring_local_work_run - ran ring local task work
647 *
648 * @tctx: pointer to a io_uring_ctx
649 * @count: how many functions it ran
650 * @loops: how many loops it ran
651 *
652 */
653TRACE_EVENT(io_uring_local_work_run,
654
655 TP_PROTO(void *ctx, int count, unsigned int loops),
656
657 TP_ARGS(ctx, count, loops),
658
659 TP_STRUCT__entry (
660 __field(void *, ctx )
661 __field(int, count )
662 __field(unsigned int, loops )
663 ),
664
665 TP_fast_assign(
666 __entry->ctx = ctx;
667 __entry->count = count;
668 __entry->loops = loops;
669 ),
670
671 TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
672);
673
674#endif /* _TRACE_IO_URING_H */
675
676/* This part must be outside protection */
677#include <trace/define_trace.h>