Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM block
4
5#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_BLOCK_H
7
8#include <linux/blktrace_api.h>
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/tracepoint.h>
12
13#define RWBS_LEN 8
14
15DECLARE_EVENT_CLASS(block_buffer,
16
17 TP_PROTO(struct buffer_head *bh),
18
19 TP_ARGS(bh),
20
21 TP_STRUCT__entry (
22 __field( dev_t, dev )
23 __field( sector_t, sector )
24 __field( size_t, size )
25 ),
26
27 TP_fast_assign(
28 __entry->dev = bh->b_bdev->bd_dev;
29 __entry->sector = bh->b_blocknr;
30 __entry->size = bh->b_size;
31 ),
32
33 TP_printk("%d,%d sector=%llu size=%zu",
34 MAJOR(__entry->dev), MINOR(__entry->dev),
35 (unsigned long long)__entry->sector, __entry->size
36 )
37);
38
39/**
40 * block_touch_buffer - mark a buffer accessed
41 * @bh: buffer_head being touched
42 *
43 * Called from touch_buffer().
44 */
45DEFINE_EVENT(block_buffer, block_touch_buffer,
46
47 TP_PROTO(struct buffer_head *bh),
48
49 TP_ARGS(bh)
50);
51
52/**
53 * block_dirty_buffer - mark a buffer dirty
54 * @bh: buffer_head being dirtied
55 *
56 * Called from mark_buffer_dirty().
57 */
58DEFINE_EVENT(block_buffer, block_dirty_buffer,
59
60 TP_PROTO(struct buffer_head *bh),
61
62 TP_ARGS(bh)
63);
64
65/**
66 * block_rq_requeue - place block IO request back on a queue
67 * @rq: block IO operation request
68 *
69 * The block operation request @rq is being placed back into queue
70 * @q. For some reason the request was not completed and needs to be
71 * put back in the queue.
72 */
73TRACE_EVENT(block_rq_requeue,
74
75 TP_PROTO(struct request *rq),
76
77 TP_ARGS(rq),
78
79 TP_STRUCT__entry(
80 __field( dev_t, dev )
81 __field( sector_t, sector )
82 __field( unsigned int, nr_sector )
83 __array( char, rwbs, RWBS_LEN )
84 __dynamic_array( char, cmd, 1 )
85 ),
86
87 TP_fast_assign(
88 __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
89 __entry->sector = blk_rq_trace_sector(rq);
90 __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
91
92 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
93 __get_str(cmd)[0] = '\0';
94 ),
95
96 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
97 MAJOR(__entry->dev), MINOR(__entry->dev),
98 __entry->rwbs, __get_str(cmd),
99 (unsigned long long)__entry->sector,
100 __entry->nr_sector, 0)
101);
102
103DECLARE_EVENT_CLASS(block_rq_completion,
104
105 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
106
107 TP_ARGS(rq, error, nr_bytes),
108
109 TP_STRUCT__entry(
110 __field( dev_t, dev )
111 __field( sector_t, sector )
112 __field( unsigned int, nr_sector )
113 __field( int , error )
114 __array( char, rwbs, RWBS_LEN )
115 __dynamic_array( char, cmd, 1 )
116 ),
117
118 TP_fast_assign(
119 __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
120 __entry->sector = blk_rq_pos(rq);
121 __entry->nr_sector = nr_bytes >> 9;
122 __entry->error = blk_status_to_errno(error);
123
124 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
125 __get_str(cmd)[0] = '\0';
126 ),
127
128 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
129 MAJOR(__entry->dev), MINOR(__entry->dev),
130 __entry->rwbs, __get_str(cmd),
131 (unsigned long long)__entry->sector,
132 __entry->nr_sector, __entry->error)
133);
134
135/**
136 * block_rq_complete - block IO operation completed by device driver
137 * @rq: block operations request
138 * @error: status code
139 * @nr_bytes: number of completed bytes
140 *
141 * The block_rq_complete tracepoint event indicates that some portion
142 * of operation request has been completed by the device driver. If
143 * the @rq->bio is %NULL, then there is absolutely no additional work to
144 * do for the request. If @rq->bio is non-NULL then there is
145 * additional work required to complete the request.
146 */
147DEFINE_EVENT(block_rq_completion, block_rq_complete,
148
149 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
150
151 TP_ARGS(rq, error, nr_bytes)
152);
153
154/**
155 * block_rq_error - block IO operation error reported by device driver
156 * @rq: block operations request
157 * @error: status code
158 * @nr_bytes: number of completed bytes
159 *
160 * The block_rq_error tracepoint event indicates that some portion
161 * of operation request has failed as reported by the device driver.
162 */
163DEFINE_EVENT(block_rq_completion, block_rq_error,
164
165 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
166
167 TP_ARGS(rq, error, nr_bytes)
168);
169
170DECLARE_EVENT_CLASS(block_rq,
171
172 TP_PROTO(struct request *rq),
173
174 TP_ARGS(rq),
175
176 TP_STRUCT__entry(
177 __field( dev_t, dev )
178 __field( sector_t, sector )
179 __field( unsigned int, nr_sector )
180 __field( unsigned int, bytes )
181 __array( char, rwbs, RWBS_LEN )
182 __array( char, comm, TASK_COMM_LEN )
183 __dynamic_array( char, cmd, 1 )
184 ),
185
186 TP_fast_assign(
187 __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
188 __entry->sector = blk_rq_trace_sector(rq);
189 __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
190 __entry->bytes = blk_rq_bytes(rq);
191
192 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
193 __get_str(cmd)[0] = '\0';
194 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
195 ),
196
197 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
198 MAJOR(__entry->dev), MINOR(__entry->dev),
199 __entry->rwbs, __entry->bytes, __get_str(cmd),
200 (unsigned long long)__entry->sector,
201 __entry->nr_sector, __entry->comm)
202);
203
204/**
205 * block_rq_insert - insert block operation request into queue
206 * @rq: block IO operation request
207 *
208 * Called immediately before block operation request @rq is inserted
209 * into queue @q. The fields in the operation request @rq struct can
210 * be examined to determine which device and sectors the pending
211 * operation would access.
212 */
213DEFINE_EVENT(block_rq, block_rq_insert,
214
215 TP_PROTO(struct request *rq),
216
217 TP_ARGS(rq)
218);
219
220/**
221 * block_rq_issue - issue pending block IO request operation to device driver
222 * @rq: block IO operation request
223 *
224 * Called when block operation request @rq from queue @q is sent to a
225 * device driver for processing.
226 */
227DEFINE_EVENT(block_rq, block_rq_issue,
228
229 TP_PROTO(struct request *rq),
230
231 TP_ARGS(rq)
232);
233
234/**
235 * block_rq_merge - merge request with another one in the elevator
236 * @rq: block IO operation request
237 *
238 * Called when block operation request @rq from queue @q is merged to another
239 * request queued in the elevator.
240 */
241DEFINE_EVENT(block_rq, block_rq_merge,
242
243 TP_PROTO(struct request *rq),
244
245 TP_ARGS(rq)
246);
247
248/**
249 * block_bio_complete - completed all work on the block operation
250 * @q: queue holding the block operation
251 * @bio: block operation completed
252 *
253 * This tracepoint indicates there is no further work to do on this
254 * block IO operation @bio.
255 */
256TRACE_EVENT(block_bio_complete,
257
258 TP_PROTO(struct request_queue *q, struct bio *bio),
259
260 TP_ARGS(q, bio),
261
262 TP_STRUCT__entry(
263 __field( dev_t, dev )
264 __field( sector_t, sector )
265 __field( unsigned, nr_sector )
266 __field( int, error )
267 __array( char, rwbs, RWBS_LEN)
268 ),
269
270 TP_fast_assign(
271 __entry->dev = bio_dev(bio);
272 __entry->sector = bio->bi_iter.bi_sector;
273 __entry->nr_sector = bio_sectors(bio);
274 __entry->error = blk_status_to_errno(bio->bi_status);
275 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
276 ),
277
278 TP_printk("%d,%d %s %llu + %u [%d]",
279 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
280 (unsigned long long)__entry->sector,
281 __entry->nr_sector, __entry->error)
282);
283
284DECLARE_EVENT_CLASS(block_bio,
285
286 TP_PROTO(struct bio *bio),
287
288 TP_ARGS(bio),
289
290 TP_STRUCT__entry(
291 __field( dev_t, dev )
292 __field( sector_t, sector )
293 __field( unsigned int, nr_sector )
294 __array( char, rwbs, RWBS_LEN )
295 __array( char, comm, TASK_COMM_LEN )
296 ),
297
298 TP_fast_assign(
299 __entry->dev = bio_dev(bio);
300 __entry->sector = bio->bi_iter.bi_sector;
301 __entry->nr_sector = bio_sectors(bio);
302 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
303 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
304 ),
305
306 TP_printk("%d,%d %s %llu + %u [%s]",
307 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
308 (unsigned long long)__entry->sector,
309 __entry->nr_sector, __entry->comm)
310);
311
312/**
313 * block_bio_bounce - used bounce buffer when processing block operation
314 * @bio: block operation
315 *
316 * A bounce buffer was used to handle the block operation @bio in @q.
317 * This occurs when hardware limitations prevent a direct transfer of
318 * data between the @bio data memory area and the IO device. Use of a
319 * bounce buffer requires extra copying of data and decreases
320 * performance.
321 */
322DEFINE_EVENT(block_bio, block_bio_bounce,
323 TP_PROTO(struct bio *bio),
324 TP_ARGS(bio)
325);
326
327/**
328 * block_bio_backmerge - merging block operation to the end of an existing operation
329 * @bio: new block operation to merge
330 *
331 * Merging block request @bio to the end of an existing block request.
332 */
333DEFINE_EVENT(block_bio, block_bio_backmerge,
334 TP_PROTO(struct bio *bio),
335 TP_ARGS(bio)
336);
337
338/**
339 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
340 * @bio: new block operation to merge
341 *
342 * Merging block IO operation @bio to the beginning of an existing block request.
343 */
344DEFINE_EVENT(block_bio, block_bio_frontmerge,
345 TP_PROTO(struct bio *bio),
346 TP_ARGS(bio)
347);
348
349/**
350 * block_bio_queue - putting new block IO operation in queue
351 * @bio: new block operation
352 *
353 * About to place the block IO operation @bio into queue @q.
354 */
355DEFINE_EVENT(block_bio, block_bio_queue,
356 TP_PROTO(struct bio *bio),
357 TP_ARGS(bio)
358);
359
360/**
361 * block_getrq - get a free request entry in queue for block IO operations
362 * @bio: pending block IO operation (can be %NULL)
363 *
364 * A request struct has been allocated to handle the block IO operation @bio.
365 */
366DEFINE_EVENT(block_bio, block_getrq,
367 TP_PROTO(struct bio *bio),
368 TP_ARGS(bio)
369);
370
371/**
372 * block_plug - keep operations requests in request queue
373 * @q: request queue to plug
374 *
375 * Plug the request queue @q. Do not allow block operation requests
376 * to be sent to the device driver. Instead, accumulate requests in
377 * the queue to improve throughput performance of the block device.
378 */
379TRACE_EVENT(block_plug,
380
381 TP_PROTO(struct request_queue *q),
382
383 TP_ARGS(q),
384
385 TP_STRUCT__entry(
386 __array( char, comm, TASK_COMM_LEN )
387 ),
388
389 TP_fast_assign(
390 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
391 ),
392
393 TP_printk("[%s]", __entry->comm)
394);
395
396DECLARE_EVENT_CLASS(block_unplug,
397
398 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
399
400 TP_ARGS(q, depth, explicit),
401
402 TP_STRUCT__entry(
403 __field( int, nr_rq )
404 __array( char, comm, TASK_COMM_LEN )
405 ),
406
407 TP_fast_assign(
408 __entry->nr_rq = depth;
409 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
410 ),
411
412 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
413);
414
415/**
416 * block_unplug - release of operations requests in request queue
417 * @q: request queue to unplug
418 * @depth: number of requests just added to the queue
419 * @explicit: whether this was an explicit unplug, or one from schedule()
420 *
421 * Unplug request queue @q because device driver is scheduled to work
422 * on elements in the request queue.
423 */
424DEFINE_EVENT(block_unplug, block_unplug,
425
426 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
427
428 TP_ARGS(q, depth, explicit)
429);
430
431/**
432 * block_split - split a single bio struct into two bio structs
433 * @bio: block operation being split
434 * @new_sector: The starting sector for the new bio
435 *
436 * The bio request @bio needs to be split into two bio requests. The newly
437 * created @bio request starts at @new_sector. This split may be required due to
438 * hardware limitations such as operation crossing device boundaries in a RAID
439 * system.
440 */
441TRACE_EVENT(block_split,
442
443 TP_PROTO(struct bio *bio, unsigned int new_sector),
444
445 TP_ARGS(bio, new_sector),
446
447 TP_STRUCT__entry(
448 __field( dev_t, dev )
449 __field( sector_t, sector )
450 __field( sector_t, new_sector )
451 __array( char, rwbs, RWBS_LEN )
452 __array( char, comm, TASK_COMM_LEN )
453 ),
454
455 TP_fast_assign(
456 __entry->dev = bio_dev(bio);
457 __entry->sector = bio->bi_iter.bi_sector;
458 __entry->new_sector = new_sector;
459 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
460 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
461 ),
462
463 TP_printk("%d,%d %s %llu / %llu [%s]",
464 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
465 (unsigned long long)__entry->sector,
466 (unsigned long long)__entry->new_sector,
467 __entry->comm)
468);
469
470/**
471 * block_bio_remap - map request for a logical device to the raw device
472 * @bio: revised operation
473 * @dev: original device for the operation
474 * @from: original sector for the operation
475 *
476 * An operation for a logical device has been mapped to the
477 * raw block device.
478 */
479TRACE_EVENT(block_bio_remap,
480
481 TP_PROTO(struct bio *bio, dev_t dev, sector_t from),
482
483 TP_ARGS(bio, dev, from),
484
485 TP_STRUCT__entry(
486 __field( dev_t, dev )
487 __field( sector_t, sector )
488 __field( unsigned int, nr_sector )
489 __field( dev_t, old_dev )
490 __field( sector_t, old_sector )
491 __array( char, rwbs, RWBS_LEN)
492 ),
493
494 TP_fast_assign(
495 __entry->dev = bio_dev(bio);
496 __entry->sector = bio->bi_iter.bi_sector;
497 __entry->nr_sector = bio_sectors(bio);
498 __entry->old_dev = dev;
499 __entry->old_sector = from;
500 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
501 ),
502
503 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
504 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
505 (unsigned long long)__entry->sector,
506 __entry->nr_sector,
507 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
508 (unsigned long long)__entry->old_sector)
509);
510
511/**
512 * block_rq_remap - map request for a block operation request
513 * @rq: block IO operation request
514 * @dev: device for the operation
515 * @from: original sector for the operation
516 *
517 * The block operation request @rq in @q has been remapped. The block
518 * operation request @rq holds the current information and @from hold
519 * the original sector.
520 */
521TRACE_EVENT(block_rq_remap,
522
523 TP_PROTO(struct request *rq, dev_t dev, sector_t from),
524
525 TP_ARGS(rq, dev, from),
526
527 TP_STRUCT__entry(
528 __field( dev_t, dev )
529 __field( sector_t, sector )
530 __field( unsigned int, nr_sector )
531 __field( dev_t, old_dev )
532 __field( sector_t, old_sector )
533 __field( unsigned int, nr_bios )
534 __array( char, rwbs, RWBS_LEN)
535 ),
536
537 TP_fast_assign(
538 __entry->dev = disk_devt(rq->q->disk);
539 __entry->sector = blk_rq_pos(rq);
540 __entry->nr_sector = blk_rq_sectors(rq);
541 __entry->old_dev = dev;
542 __entry->old_sector = from;
543 __entry->nr_bios = blk_rq_count_bios(rq);
544 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
545 ),
546
547 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
548 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
549 (unsigned long long)__entry->sector,
550 __entry->nr_sector,
551 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
552 (unsigned long long)__entry->old_sector, __entry->nr_bios)
553);
554
555#endif /* _TRACE_BLOCK_H */
556
557/* This part must be outside protection */
558#include <trace/define_trace.h>
559
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM block
4
5#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_BLOCK_H
7
8#include <linux/blktrace_api.h>
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/tracepoint.h>
12
13#define RWBS_LEN 8
14
15DECLARE_EVENT_CLASS(block_buffer,
16
17 TP_PROTO(struct buffer_head *bh),
18
19 TP_ARGS(bh),
20
21 TP_STRUCT__entry (
22 __field( dev_t, dev )
23 __field( sector_t, sector )
24 __field( size_t, size )
25 ),
26
27 TP_fast_assign(
28 __entry->dev = bh->b_bdev->bd_dev;
29 __entry->sector = bh->b_blocknr;
30 __entry->size = bh->b_size;
31 ),
32
33 TP_printk("%d,%d sector=%llu size=%zu",
34 MAJOR(__entry->dev), MINOR(__entry->dev),
35 (unsigned long long)__entry->sector, __entry->size
36 )
37);
38
39/**
40 * block_touch_buffer - mark a buffer accessed
41 * @bh: buffer_head being touched
42 *
43 * Called from touch_buffer().
44 */
45DEFINE_EVENT(block_buffer, block_touch_buffer,
46
47 TP_PROTO(struct buffer_head *bh),
48
49 TP_ARGS(bh)
50);
51
52/**
53 * block_dirty_buffer - mark a buffer dirty
54 * @bh: buffer_head being dirtied
55 *
56 * Called from mark_buffer_dirty().
57 */
58DEFINE_EVENT(block_buffer, block_dirty_buffer,
59
60 TP_PROTO(struct buffer_head *bh),
61
62 TP_ARGS(bh)
63);
64
65/**
66 * block_rq_requeue - place block IO request back on a queue
67 * @q: queue holding operation
68 * @rq: block IO operation request
69 *
70 * The block operation request @rq is being placed back into queue
71 * @q. For some reason the request was not completed and needs to be
72 * put back in the queue.
73 */
74TRACE_EVENT(block_rq_requeue,
75
76 TP_PROTO(struct request_queue *q, struct request *rq),
77
78 TP_ARGS(q, rq),
79
80 TP_STRUCT__entry(
81 __field( dev_t, dev )
82 __field( sector_t, sector )
83 __field( unsigned int, nr_sector )
84 __array( char, rwbs, RWBS_LEN )
85 __dynamic_array( char, cmd, 1 )
86 ),
87
88 TP_fast_assign(
89 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
90 __entry->sector = blk_rq_trace_sector(rq);
91 __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
92
93 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
94 __get_str(cmd)[0] = '\0';
95 ),
96
97 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
98 MAJOR(__entry->dev), MINOR(__entry->dev),
99 __entry->rwbs, __get_str(cmd),
100 (unsigned long long)__entry->sector,
101 __entry->nr_sector, 0)
102);
103
104/**
105 * block_rq_complete - block IO operation completed by device driver
106 * @rq: block operations request
107 * @error: status code
108 * @nr_bytes: number of completed bytes
109 *
110 * The block_rq_complete tracepoint event indicates that some portion
111 * of operation request has been completed by the device driver. If
112 * the @rq->bio is %NULL, then there is absolutely no additional work to
113 * do for the request. If @rq->bio is non-NULL then there is
114 * additional work required to complete the request.
115 */
116TRACE_EVENT(block_rq_complete,
117
118 TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
119
120 TP_ARGS(rq, error, nr_bytes),
121
122 TP_STRUCT__entry(
123 __field( dev_t, dev )
124 __field( sector_t, sector )
125 __field( unsigned int, nr_sector )
126 __field( int, error )
127 __array( char, rwbs, RWBS_LEN )
128 __dynamic_array( char, cmd, 1 )
129 ),
130
131 TP_fast_assign(
132 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
133 __entry->sector = blk_rq_pos(rq);
134 __entry->nr_sector = nr_bytes >> 9;
135 __entry->error = error;
136
137 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
138 __get_str(cmd)[0] = '\0';
139 ),
140
141 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
142 MAJOR(__entry->dev), MINOR(__entry->dev),
143 __entry->rwbs, __get_str(cmd),
144 (unsigned long long)__entry->sector,
145 __entry->nr_sector, __entry->error)
146);
147
148DECLARE_EVENT_CLASS(block_rq,
149
150 TP_PROTO(struct request_queue *q, struct request *rq),
151
152 TP_ARGS(q, rq),
153
154 TP_STRUCT__entry(
155 __field( dev_t, dev )
156 __field( sector_t, sector )
157 __field( unsigned int, nr_sector )
158 __field( unsigned int, bytes )
159 __array( char, rwbs, RWBS_LEN )
160 __array( char, comm, TASK_COMM_LEN )
161 __dynamic_array( char, cmd, 1 )
162 ),
163
164 TP_fast_assign(
165 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
166 __entry->sector = blk_rq_trace_sector(rq);
167 __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
168 __entry->bytes = blk_rq_bytes(rq);
169
170 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
171 __get_str(cmd)[0] = '\0';
172 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
173 ),
174
175 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
176 MAJOR(__entry->dev), MINOR(__entry->dev),
177 __entry->rwbs, __entry->bytes, __get_str(cmd),
178 (unsigned long long)__entry->sector,
179 __entry->nr_sector, __entry->comm)
180);
181
182/**
183 * block_rq_insert - insert block operation request into queue
184 * @q: target queue
185 * @rq: block IO operation request
186 *
187 * Called immediately before block operation request @rq is inserted
188 * into queue @q. The fields in the operation request @rq struct can
189 * be examined to determine which device and sectors the pending
190 * operation would access.
191 */
192DEFINE_EVENT(block_rq, block_rq_insert,
193
194 TP_PROTO(struct request_queue *q, struct request *rq),
195
196 TP_ARGS(q, rq)
197);
198
199/**
200 * block_rq_issue - issue pending block IO request operation to device driver
201 * @q: queue holding operation
202 * @rq: block IO operation operation request
203 *
204 * Called when block operation request @rq from queue @q is sent to a
205 * device driver for processing.
206 */
207DEFINE_EVENT(block_rq, block_rq_issue,
208
209 TP_PROTO(struct request_queue *q, struct request *rq),
210
211 TP_ARGS(q, rq)
212);
213
214/**
215 * block_bio_bounce - used bounce buffer when processing block operation
216 * @q: queue holding the block operation
217 * @bio: block operation
218 *
219 * A bounce buffer was used to handle the block operation @bio in @q.
220 * This occurs when hardware limitations prevent a direct transfer of
221 * data between the @bio data memory area and the IO device. Use of a
222 * bounce buffer requires extra copying of data and decreases
223 * performance.
224 */
225TRACE_EVENT(block_bio_bounce,
226
227 TP_PROTO(struct request_queue *q, struct bio *bio),
228
229 TP_ARGS(q, bio),
230
231 TP_STRUCT__entry(
232 __field( dev_t, dev )
233 __field( sector_t, sector )
234 __field( unsigned int, nr_sector )
235 __array( char, rwbs, RWBS_LEN )
236 __array( char, comm, TASK_COMM_LEN )
237 ),
238
239 TP_fast_assign(
240 __entry->dev = bio_dev(bio);
241 __entry->sector = bio->bi_iter.bi_sector;
242 __entry->nr_sector = bio_sectors(bio);
243 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
244 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
245 ),
246
247 TP_printk("%d,%d %s %llu + %u [%s]",
248 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
249 (unsigned long long)__entry->sector,
250 __entry->nr_sector, __entry->comm)
251);
252
253/**
254 * block_bio_complete - completed all work on the block operation
255 * @q: queue holding the block operation
256 * @bio: block operation completed
257 * @error: io error value
258 *
259 * This tracepoint indicates there is no further work to do on this
260 * block IO operation @bio.
261 */
262TRACE_EVENT(block_bio_complete,
263
264 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
265
266 TP_ARGS(q, bio, error),
267
268 TP_STRUCT__entry(
269 __field( dev_t, dev )
270 __field( sector_t, sector )
271 __field( unsigned, nr_sector )
272 __field( int, error )
273 __array( char, rwbs, RWBS_LEN)
274 ),
275
276 TP_fast_assign(
277 __entry->dev = bio_dev(bio);
278 __entry->sector = bio->bi_iter.bi_sector;
279 __entry->nr_sector = bio_sectors(bio);
280 __entry->error = error;
281 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
282 ),
283
284 TP_printk("%d,%d %s %llu + %u [%d]",
285 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
286 (unsigned long long)__entry->sector,
287 __entry->nr_sector, __entry->error)
288);
289
290DECLARE_EVENT_CLASS(block_bio_merge,
291
292 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
293
294 TP_ARGS(q, rq, bio),
295
296 TP_STRUCT__entry(
297 __field( dev_t, dev )
298 __field( sector_t, sector )
299 __field( unsigned int, nr_sector )
300 __array( char, rwbs, RWBS_LEN )
301 __array( char, comm, TASK_COMM_LEN )
302 ),
303
304 TP_fast_assign(
305 __entry->dev = bio_dev(bio);
306 __entry->sector = bio->bi_iter.bi_sector;
307 __entry->nr_sector = bio_sectors(bio);
308 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
309 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
310 ),
311
312 TP_printk("%d,%d %s %llu + %u [%s]",
313 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
314 (unsigned long long)__entry->sector,
315 __entry->nr_sector, __entry->comm)
316);
317
318/**
319 * block_bio_backmerge - merging block operation to the end of an existing operation
320 * @q: queue holding operation
321 * @rq: request bio is being merged into
322 * @bio: new block operation to merge
323 *
324 * Merging block request @bio to the end of an existing block request
325 * in queue @q.
326 */
327DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
328
329 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
330
331 TP_ARGS(q, rq, bio)
332);
333
334/**
335 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
336 * @q: queue holding operation
337 * @rq: request bio is being merged into
338 * @bio: new block operation to merge
339 *
340 * Merging block IO operation @bio to the beginning of an existing block
341 * operation in queue @q.
342 */
343DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
344
345 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
346
347 TP_ARGS(q, rq, bio)
348);
349
350/**
351 * block_bio_queue - putting new block IO operation in queue
352 * @q: queue holding operation
353 * @bio: new block operation
354 *
355 * About to place the block IO operation @bio into queue @q.
356 */
357TRACE_EVENT(block_bio_queue,
358
359 TP_PROTO(struct request_queue *q, struct bio *bio),
360
361 TP_ARGS(q, bio),
362
363 TP_STRUCT__entry(
364 __field( dev_t, dev )
365 __field( sector_t, sector )
366 __field( unsigned int, nr_sector )
367 __array( char, rwbs, RWBS_LEN )
368 __array( char, comm, TASK_COMM_LEN )
369 ),
370
371 TP_fast_assign(
372 __entry->dev = bio_dev(bio);
373 __entry->sector = bio->bi_iter.bi_sector;
374 __entry->nr_sector = bio_sectors(bio);
375 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
376 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
377 ),
378
379 TP_printk("%d,%d %s %llu + %u [%s]",
380 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
381 (unsigned long long)__entry->sector,
382 __entry->nr_sector, __entry->comm)
383);
384
385DECLARE_EVENT_CLASS(block_get_rq,
386
387 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
388
389 TP_ARGS(q, bio, rw),
390
391 TP_STRUCT__entry(
392 __field( dev_t, dev )
393 __field( sector_t, sector )
394 __field( unsigned int, nr_sector )
395 __array( char, rwbs, RWBS_LEN )
396 __array( char, comm, TASK_COMM_LEN )
397 ),
398
399 TP_fast_assign(
400 __entry->dev = bio ? bio_dev(bio) : 0;
401 __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
402 __entry->nr_sector = bio ? bio_sectors(bio) : 0;
403 blk_fill_rwbs(__entry->rwbs,
404 bio ? bio->bi_opf : 0, __entry->nr_sector);
405 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
406 ),
407
408 TP_printk("%d,%d %s %llu + %u [%s]",
409 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
410 (unsigned long long)__entry->sector,
411 __entry->nr_sector, __entry->comm)
412);
413
414/**
415 * block_getrq - get a free request entry in queue for block IO operations
416 * @q: queue for operations
417 * @bio: pending block IO operation (can be %NULL)
418 * @rw: low bit indicates a read (%0) or a write (%1)
419 *
420 * A request struct for queue @q has been allocated to handle the
421 * block IO operation @bio.
422 */
423DEFINE_EVENT(block_get_rq, block_getrq,
424
425 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
426
427 TP_ARGS(q, bio, rw)
428);
429
430/**
431 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
432 * @q: queue for operation
433 * @bio: pending block IO operation (can be %NULL)
434 * @rw: low bit indicates a read (%0) or a write (%1)
435 *
436 * In the case where a request struct cannot be provided for queue @q
437 * the process needs to wait for an request struct to become
438 * available. This tracepoint event is generated each time the
439 * process goes to sleep waiting for request struct become available.
440 */
441DEFINE_EVENT(block_get_rq, block_sleeprq,
442
443 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
444
445 TP_ARGS(q, bio, rw)
446);
447
448/**
449 * block_plug - keep operations requests in request queue
450 * @q: request queue to plug
451 *
452 * Plug the request queue @q. Do not allow block operation requests
453 * to be sent to the device driver. Instead, accumulate requests in
454 * the queue to improve throughput performance of the block device.
455 */
456TRACE_EVENT(block_plug,
457
458 TP_PROTO(struct request_queue *q),
459
460 TP_ARGS(q),
461
462 TP_STRUCT__entry(
463 __array( char, comm, TASK_COMM_LEN )
464 ),
465
466 TP_fast_assign(
467 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
468 ),
469
470 TP_printk("[%s]", __entry->comm)
471);
472
473DECLARE_EVENT_CLASS(block_unplug,
474
475 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
476
477 TP_ARGS(q, depth, explicit),
478
479 TP_STRUCT__entry(
480 __field( int, nr_rq )
481 __array( char, comm, TASK_COMM_LEN )
482 ),
483
484 TP_fast_assign(
485 __entry->nr_rq = depth;
486 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
487 ),
488
489 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
490);
491
492/**
493 * block_unplug - release of operations requests in request queue
494 * @q: request queue to unplug
495 * @depth: number of requests just added to the queue
496 * @explicit: whether this was an explicit unplug, or one from schedule()
497 *
498 * Unplug request queue @q because device driver is scheduled to work
499 * on elements in the request queue.
500 */
501DEFINE_EVENT(block_unplug, block_unplug,
502
503 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
504
505 TP_ARGS(q, depth, explicit)
506);
507
508/**
509 * block_split - split a single bio struct into two bio structs
510 * @q: queue containing the bio
511 * @bio: block operation being split
512 * @new_sector: The starting sector for the new bio
513 *
514 * The bio request @bio in request queue @q needs to be split into two
515 * bio requests. The newly created @bio request starts at
516 * @new_sector. This split may be required due to hardware limitation
517 * such as operation crossing device boundaries in a RAID system.
518 */
519TRACE_EVENT(block_split,
520
521 TP_PROTO(struct request_queue *q, struct bio *bio,
522 unsigned int new_sector),
523
524 TP_ARGS(q, bio, new_sector),
525
526 TP_STRUCT__entry(
527 __field( dev_t, dev )
528 __field( sector_t, sector )
529 __field( sector_t, new_sector )
530 __array( char, rwbs, RWBS_LEN )
531 __array( char, comm, TASK_COMM_LEN )
532 ),
533
534 TP_fast_assign(
535 __entry->dev = bio_dev(bio);
536 __entry->sector = bio->bi_iter.bi_sector;
537 __entry->new_sector = new_sector;
538 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
539 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
540 ),
541
542 TP_printk("%d,%d %s %llu / %llu [%s]",
543 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
544 (unsigned long long)__entry->sector,
545 (unsigned long long)__entry->new_sector,
546 __entry->comm)
547);
548
549/**
550 * block_bio_remap - map request for a logical device to the raw device
551 * @q: queue holding the operation
552 * @bio: revised operation
553 * @dev: device for the operation
554 * @from: original sector for the operation
555 *
556 * An operation for a logical device has been mapped to the
557 * raw block device.
558 */
559TRACE_EVENT(block_bio_remap,
560
561 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
562 sector_t from),
563
564 TP_ARGS(q, bio, dev, from),
565
566 TP_STRUCT__entry(
567 __field( dev_t, dev )
568 __field( sector_t, sector )
569 __field( unsigned int, nr_sector )
570 __field( dev_t, old_dev )
571 __field( sector_t, old_sector )
572 __array( char, rwbs, RWBS_LEN)
573 ),
574
575 TP_fast_assign(
576 __entry->dev = bio_dev(bio);
577 __entry->sector = bio->bi_iter.bi_sector;
578 __entry->nr_sector = bio_sectors(bio);
579 __entry->old_dev = dev;
580 __entry->old_sector = from;
581 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
582 ),
583
584 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
585 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
586 (unsigned long long)__entry->sector,
587 __entry->nr_sector,
588 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
589 (unsigned long long)__entry->old_sector)
590);
591
592/**
593 * block_rq_remap - map request for a block operation request
594 * @q: queue holding the operation
595 * @rq: block IO operation request
596 * @dev: device for the operation
597 * @from: original sector for the operation
598 *
599 * The block operation request @rq in @q has been remapped. The block
600 * operation request @rq holds the current information and @from hold
601 * the original sector.
602 */
603TRACE_EVENT(block_rq_remap,
604
605 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
606 sector_t from),
607
608 TP_ARGS(q, rq, dev, from),
609
610 TP_STRUCT__entry(
611 __field( dev_t, dev )
612 __field( sector_t, sector )
613 __field( unsigned int, nr_sector )
614 __field( dev_t, old_dev )
615 __field( sector_t, old_sector )
616 __field( unsigned int, nr_bios )
617 __array( char, rwbs, RWBS_LEN)
618 ),
619
620 TP_fast_assign(
621 __entry->dev = disk_devt(rq->rq_disk);
622 __entry->sector = blk_rq_pos(rq);
623 __entry->nr_sector = blk_rq_sectors(rq);
624 __entry->old_dev = dev;
625 __entry->old_sector = from;
626 __entry->nr_bios = blk_rq_count_bios(rq);
627 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
628 ),
629
630 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
631 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
632 (unsigned long long)__entry->sector,
633 __entry->nr_sector,
634 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
635 (unsigned long long)__entry->old_sector, __entry->nr_bios)
636);
637
638#endif /* _TRACE_BLOCK_H */
639
640/* This part must be outside protection */
641#include <trace/define_trace.h>
642