Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM block
  4
  5#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_BLOCK_H
  7
  8#include <linux/blktrace_api.h>
  9#include <linux/blkdev.h>
 10#include <linux/buffer_head.h>
 11#include <linux/tracepoint.h>
 12#include <uapi/linux/ioprio.h>
 13
 14#define RWBS_LEN	8
 15
 16#define IOPRIO_CLASS_STRINGS \
 17	{ IOPRIO_CLASS_NONE,	"none" }, \
 18	{ IOPRIO_CLASS_RT,	"rt" }, \
 19	{ IOPRIO_CLASS_BE,	"be" }, \
 20	{ IOPRIO_CLASS_IDLE,	"idle" }, \
 21	{ IOPRIO_CLASS_INVALID,	"invalid"}
 22
 23#ifdef CONFIG_BUFFER_HEAD
 24DECLARE_EVENT_CLASS(block_buffer,
 25
 26	TP_PROTO(struct buffer_head *bh),
 27
 28	TP_ARGS(bh),
 29
 30	TP_STRUCT__entry (
 31		__field(  dev_t,	dev			)
 32		__field(  sector_t,	sector			)
 33		__field(  size_t,	size			)
 34	),
 35
 36	TP_fast_assign(
 37		__entry->dev		= bh->b_bdev->bd_dev;
 38		__entry->sector		= bh->b_blocknr;
 39		__entry->size		= bh->b_size;
 40	),
 41
 42	TP_printk("%d,%d sector=%llu size=%zu",
 43		MAJOR(__entry->dev), MINOR(__entry->dev),
 44		(unsigned long long)__entry->sector, __entry->size
 45	)
 46);
 47
 48/**
 49 * block_touch_buffer - mark a buffer accessed
 50 * @bh: buffer_head being touched
 51 *
 52 * Called from touch_buffer().
 53 */
 54DEFINE_EVENT(block_buffer, block_touch_buffer,
 55
 56	TP_PROTO(struct buffer_head *bh),
 57
 58	TP_ARGS(bh)
 59);
 60
 61/**
 62 * block_dirty_buffer - mark a buffer dirty
 63 * @bh: buffer_head being dirtied
 64 *
 65 * Called from mark_buffer_dirty().
 66 */
 67DEFINE_EVENT(block_buffer, block_dirty_buffer,
 68
 69	TP_PROTO(struct buffer_head *bh),
 70
 71	TP_ARGS(bh)
 72);
 73#endif /* CONFIG_BUFFER_HEAD */
 74
 75/**
 76 * block_rq_requeue - place block IO request back on a queue
 77 * @rq: block IO operation request
 78 *
 79 * The block operation request @rq is being placed back into queue
 80 * @q.  For some reason the request was not completed and needs to be
 81 * put back in the queue.
 82 */
 83TRACE_EVENT(block_rq_requeue,
 84
 85	TP_PROTO(struct request *rq),
 86
 87	TP_ARGS(rq),
 88
 89	TP_STRUCT__entry(
 90		__field(  dev_t,	dev			)
 91		__field(  sector_t,	sector			)
 92		__field(  unsigned int,	nr_sector		)
 93		__field(  unsigned short, ioprio		)
 94		__array(  char,		rwbs,	RWBS_LEN	)
 95		__dynamic_array( char,	cmd,	1		)
 96	),
 97
 98	TP_fast_assign(
 99		__entry->dev	   = rq->q->disk ? disk_devt(rq->q->disk) : 0;
100		__entry->sector    = blk_rq_trace_sector(rq);
101		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
102		__entry->ioprio    = req_get_ioprio(rq);
 
 
103
104		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
105		__get_str(cmd)[0] = '\0';
106	),
107
108	TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]",
109		  MAJOR(__entry->dev), MINOR(__entry->dev),
110		  __entry->rwbs, __get_str(cmd),
111		  (unsigned long long)__entry->sector, __entry->nr_sector,
112		  __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
113				   IOPRIO_CLASS_STRINGS),
114		  IOPRIO_PRIO_HINT(__entry->ioprio),
115		  IOPRIO_PRIO_LEVEL(__entry->ioprio),  0)
116);
117
118DECLARE_EVENT_CLASS(block_rq_completion,
119
120	TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
 
 
 
 
 
 
 
 
121
122	TP_ARGS(rq, error, nr_bytes),
123
124	TP_STRUCT__entry(
125		__field(  dev_t,	dev			)
126		__field(  sector_t,	sector			)
127		__field(  unsigned int,	nr_sector		)
128		__field(  int	,	error			)
129		__field(  unsigned short, ioprio		)
130		__array(  char,		rwbs,	RWBS_LEN	)
131		__dynamic_array( char,	cmd,	1		)
132	),
133
134	TP_fast_assign(
135		__entry->dev	   = rq->q->disk ? disk_devt(rq->q->disk) : 0;
136		__entry->sector    = blk_rq_pos(rq);
137		__entry->nr_sector = nr_bytes >> 9;
138		__entry->error     = blk_status_to_errno(error);
139		__entry->ioprio    = req_get_ioprio(rq);
 
 
 
 
140
141		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
142		__get_str(cmd)[0] = '\0';
143	),
144
145	TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]",
146		  MAJOR(__entry->dev), MINOR(__entry->dev),
147		  __entry->rwbs, __get_str(cmd),
148		  (unsigned long long)__entry->sector, __entry->nr_sector,
149		  __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
150				   IOPRIO_CLASS_STRINGS),
151		  IOPRIO_PRIO_HINT(__entry->ioprio),
152		  IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error)
153);
154
155/**
156 * block_rq_complete - block IO operation completed by device driver
 
157 * @rq: block operations request
158 * @error: status code
159 * @nr_bytes: number of completed bytes
160 *
161 * The block_rq_complete tracepoint event indicates that some portion
162 * of operation request has been completed by the device driver.  If
163 * the @rq->bio is %NULL, then there is absolutely no additional work to
164 * do for the request. If @rq->bio is non-NULL then there is
165 * additional work required to complete the request.
166 */
167DEFINE_EVENT(block_rq_completion, block_rq_complete,
168
169	TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
 
170
171	TP_ARGS(rq, error, nr_bytes)
172);
173
174/**
175 * block_rq_error - block IO operation error reported by device driver
176 * @rq: block operations request
177 * @error: status code
178 * @nr_bytes: number of completed bytes
179 *
180 * The block_rq_error tracepoint event indicates that some portion
181 * of operation request has failed as reported by the device driver.
182 */
183DEFINE_EVENT(block_rq_completion, block_rq_error,
184
185	TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
 
 
 
 
 
 
 
 
186
187	TP_ARGS(rq, error, nr_bytes)
 
 
 
 
188);
189
190DECLARE_EVENT_CLASS(block_rq,
191
192	TP_PROTO(struct request *rq),
193
194	TP_ARGS(rq),
195
196	TP_STRUCT__entry(
197		__field(  dev_t,	dev			)
198		__field(  sector_t,	sector			)
199		__field(  unsigned int,	nr_sector		)
200		__field(  unsigned int,	bytes			)
201		__field(  unsigned short, ioprio		)
202		__array(  char,		rwbs,	RWBS_LEN	)
203		__array(  char,         comm,   TASK_COMM_LEN   )
204		__dynamic_array( char,	cmd,	1		)
205	),
206
207	TP_fast_assign(
208		__entry->dev	   = rq->q->disk ? disk_devt(rq->q->disk) : 0;
209		__entry->sector    = blk_rq_trace_sector(rq);
210		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
211		__entry->bytes     = blk_rq_bytes(rq);
212		__entry->ioprio	   = req_get_ioprio(rq);
 
 
213
214		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
215		__get_str(cmd)[0] = '\0';
216		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
217	),
218
219	TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]",
220		  MAJOR(__entry->dev), MINOR(__entry->dev),
221		  __entry->rwbs, __entry->bytes, __get_str(cmd),
222		  (unsigned long long)__entry->sector, __entry->nr_sector,
223		  __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
224				   IOPRIO_CLASS_STRINGS),
225		  IOPRIO_PRIO_HINT(__entry->ioprio),
226		  IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm)
227);
228
229/**
230 * block_rq_insert - insert block operation request into queue
 
231 * @rq: block IO operation request
232 *
233 * Called immediately before block operation request @rq is inserted
234 * into queue @q.  The fields in the operation request @rq struct can
235 * be examined to determine which device and sectors the pending
236 * operation would access.
237 */
238DEFINE_EVENT(block_rq, block_rq_insert,
239
240	TP_PROTO(struct request *rq),
241
242	TP_ARGS(rq)
243);
244
245/**
246 * block_rq_issue - issue pending block IO request operation to device driver
247 * @rq: block IO operation request
 
248 *
249 * Called when block operation request @rq from queue @q is sent to a
250 * device driver for processing.
251 */
252DEFINE_EVENT(block_rq, block_rq_issue,
253
254	TP_PROTO(struct request *rq),
255
256	TP_ARGS(rq)
257);
258
259/**
260 * block_rq_merge - merge request with another one in the elevator
261 * @rq: block IO operation request
262 *
263 * Called when block operation request @rq from queue @q is merged to another
264 * request queued in the elevator.
265 */
266DEFINE_EVENT(block_rq, block_rq_merge,
267
268	TP_PROTO(struct request *rq),
269
270	TP_ARGS(rq)
271);
272
273/**
274 * block_io_start - insert a request for execution
275 * @rq: block IO operation request
276 *
277 * Called when block operation request @rq is queued for execution
 
 
 
 
278 */
279DEFINE_EVENT(block_rq, block_io_start,
280
281	TP_PROTO(struct request *rq),
282
283	TP_ARGS(rq)
284);
285
286/**
287 * block_io_done - block IO operation request completed
288 * @rq: block IO operation request
289 *
290 * Called when block operation request @rq is completed
291 */
292DEFINE_EVENT(block_rq, block_io_done,
293
294	TP_PROTO(struct request *rq),
 
 
 
 
 
 
 
295
296	TP_ARGS(rq)
 
 
 
297);
298
299/**
300 * block_bio_complete - completed all work on the block operation
301 * @q: queue holding the block operation
302 * @bio: block operation completed
 
303 *
304 * This tracepoint indicates there is no further work to do on this
305 * block IO operation @bio.
306 */
307TRACE_EVENT(block_bio_complete,
308
309	TP_PROTO(struct request_queue *q, struct bio *bio),
310
311	TP_ARGS(q, bio),
312
313	TP_STRUCT__entry(
314		__field( dev_t,		dev		)
315		__field( sector_t,	sector		)
316		__field( unsigned,	nr_sector	)
317		__field( int,		error		)
318		__array( char,		rwbs,	RWBS_LEN)
319	),
320
321	TP_fast_assign(
322		__entry->dev		= bio_dev(bio);
323		__entry->sector		= bio->bi_iter.bi_sector;
324		__entry->nr_sector	= bio_sectors(bio);
325		__entry->error		= blk_status_to_errno(bio->bi_status);
326		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
327	),
328
329	TP_printk("%d,%d %s %llu + %u [%d]",
330		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
331		  (unsigned long long)__entry->sector,
332		  __entry->nr_sector, __entry->error)
333);
334
335DECLARE_EVENT_CLASS(block_bio,
336
337	TP_PROTO(struct bio *bio),
338
339	TP_ARGS(bio),
340
341	TP_STRUCT__entry(
342		__field( dev_t,		dev			)
343		__field( sector_t,	sector			)
344		__field( unsigned int,	nr_sector		)
345		__array( char,		rwbs,	RWBS_LEN	)
346		__array( char,		comm,	TASK_COMM_LEN	)
347	),
348
349	TP_fast_assign(
350		__entry->dev		= bio_dev(bio);
351		__entry->sector		= bio->bi_iter.bi_sector;
352		__entry->nr_sector	= bio_sectors(bio);
353		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
354		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
355	),
356
357	TP_printk("%d,%d %s %llu + %u [%s]",
358		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
359		  (unsigned long long)__entry->sector,
360		  __entry->nr_sector, __entry->comm)
361);
362
363/**
364 * block_bio_bounce - used bounce buffer when processing block operation
365 * @bio: block operation
366 *
367 * A bounce buffer was used to handle the block operation @bio in @q.
368 * This occurs when hardware limitations prevent a direct transfer of
369 * data between the @bio data memory area and the IO device.  Use of a
370 * bounce buffer requires extra copying of data and decreases
371 * performance.
372 */
373DEFINE_EVENT(block_bio, block_bio_bounce,
374	TP_PROTO(struct bio *bio),
375	TP_ARGS(bio)
376);
377
378/**
379 * block_bio_backmerge - merging block operation to the end of an existing operation
 
 
380 * @bio: new block operation to merge
381 *
382 * Merging block request @bio to the end of an existing block request.
 
383 */
384DEFINE_EVENT(block_bio, block_bio_backmerge,
385	TP_PROTO(struct bio *bio),
386	TP_ARGS(bio)
 
 
387);
388
389/**
390 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
 
 
391 * @bio: new block operation to merge
392 *
393 * Merging block IO operation @bio to the beginning of an existing block request.
 
394 */
395DEFINE_EVENT(block_bio, block_bio_frontmerge,
396	TP_PROTO(struct bio *bio),
397	TP_ARGS(bio)
 
 
398);
399
400/**
401 * block_bio_queue - putting new block IO operation in queue
 
402 * @bio: new block operation
403 *
404 * About to place the block IO operation @bio into queue @q.
405 */
406DEFINE_EVENT(block_bio, block_bio_queue,
407	TP_PROTO(struct bio *bio),
408	TP_ARGS(bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409);
410
411/**
412 * block_getrq - get a free request entry in queue for block IO operations
413 * @bio: pending block IO operation (can be %NULL)
 
 
414 *
415 * A request struct has been allocated to handle the block IO operation @bio.
 
416 */
417DEFINE_EVENT(block_bio, block_getrq,
418	TP_PROTO(struct bio *bio),
419	TP_ARGS(bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420);
421
422/**
423 * block_plug - keep operations requests in request queue
424 * @q: request queue to plug
425 *
426 * Plug the request queue @q.  Do not allow block operation requests
427 * to be sent to the device driver. Instead, accumulate requests in
428 * the queue to improve throughput performance of the block device.
429 */
430TRACE_EVENT(block_plug,
431
432	TP_PROTO(struct request_queue *q),
433
434	TP_ARGS(q),
435
436	TP_STRUCT__entry(
437		__array( char,		comm,	TASK_COMM_LEN	)
438	),
439
440	TP_fast_assign(
441		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
442	),
443
444	TP_printk("[%s]", __entry->comm)
445);
446
447DECLARE_EVENT_CLASS(block_unplug,
448
449	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
450
451	TP_ARGS(q, depth, explicit),
452
453	TP_STRUCT__entry(
454		__field( int,		nr_rq			)
455		__array( char,		comm,	TASK_COMM_LEN	)
456	),
457
458	TP_fast_assign(
459		__entry->nr_rq = depth;
460		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
461	),
462
463	TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
464);
465
466/**
467 * block_unplug - release of operations requests in request queue
468 * @q: request queue to unplug
469 * @depth: number of requests just added to the queue
470 * @explicit: whether this was an explicit unplug, or one from schedule()
471 *
472 * Unplug request queue @q because device driver is scheduled to work
473 * on elements in the request queue.
474 */
475DEFINE_EVENT(block_unplug, block_unplug,
476
477	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
478
479	TP_ARGS(q, depth, explicit)
480);
481
482/**
483 * block_split - split a single bio struct into two bio structs
 
484 * @bio: block operation being split
485 * @new_sector: The starting sector for the new bio
486 *
487 * The bio request @bio needs to be split into two bio requests.  The newly
488 * created @bio request starts at @new_sector. This split may be required due to
489 * hardware limitations such as operation crossing device boundaries in a RAID
490 * system.
491 */
492TRACE_EVENT(block_split,
493
494	TP_PROTO(struct bio *bio, unsigned int new_sector),
 
495
496	TP_ARGS(bio, new_sector),
497
498	TP_STRUCT__entry(
499		__field( dev_t,		dev				)
500		__field( sector_t,	sector				)
501		__field( sector_t,	new_sector			)
502		__array( char,		rwbs,		RWBS_LEN	)
503		__array( char,		comm,		TASK_COMM_LEN	)
504	),
505
506	TP_fast_assign(
507		__entry->dev		= bio_dev(bio);
508		__entry->sector		= bio->bi_iter.bi_sector;
509		__entry->new_sector	= new_sector;
510		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
511		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
512	),
513
514	TP_printk("%d,%d %s %llu / %llu [%s]",
515		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
516		  (unsigned long long)__entry->sector,
517		  (unsigned long long)__entry->new_sector,
518		  __entry->comm)
519);
520
521/**
522 * block_bio_remap - map request for a logical device to the raw device
 
523 * @bio: revised operation
524 * @dev: original device for the operation
525 * @from: original sector for the operation
526 *
527 * An operation for a logical device has been mapped to the
528 * raw block device.
529 */
530TRACE_EVENT(block_bio_remap,
531
532	TP_PROTO(struct bio *bio, dev_t dev, sector_t from),
 
533
534	TP_ARGS(bio, dev, from),
535
536	TP_STRUCT__entry(
537		__field( dev_t,		dev		)
538		__field( sector_t,	sector		)
539		__field( unsigned int,	nr_sector	)
540		__field( dev_t,		old_dev		)
541		__field( sector_t,	old_sector	)
542		__array( char,		rwbs,	RWBS_LEN)
543	),
544
545	TP_fast_assign(
546		__entry->dev		= bio_dev(bio);
547		__entry->sector		= bio->bi_iter.bi_sector;
548		__entry->nr_sector	= bio_sectors(bio);
549		__entry->old_dev	= dev;
550		__entry->old_sector	= from;
551		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
552	),
553
554	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
555		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
556		  (unsigned long long)__entry->sector,
557		  __entry->nr_sector,
558		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
559		  (unsigned long long)__entry->old_sector)
560);
561
562/**
563 * block_rq_remap - map request for a block operation request
 
564 * @rq: block IO operation request
565 * @dev: device for the operation
566 * @from: original sector for the operation
567 *
568 * The block operation request @rq in @q has been remapped.  The block
569 * operation request @rq holds the current information and @from hold
570 * the original sector.
571 */
572TRACE_EVENT(block_rq_remap,
573
574	TP_PROTO(struct request *rq, dev_t dev, sector_t from),
 
575
576	TP_ARGS(rq, dev, from),
577
578	TP_STRUCT__entry(
579		__field( dev_t,		dev		)
580		__field( sector_t,	sector		)
581		__field( unsigned int,	nr_sector	)
582		__field( dev_t,		old_dev		)
583		__field( sector_t,	old_sector	)
584		__field( unsigned int,	nr_bios		)
585		__array( char,		rwbs,	RWBS_LEN)
586	),
587
588	TP_fast_assign(
589		__entry->dev		= disk_devt(rq->q->disk);
590		__entry->sector		= blk_rq_pos(rq);
591		__entry->nr_sector	= blk_rq_sectors(rq);
592		__entry->old_dev	= dev;
593		__entry->old_sector	= from;
594		__entry->nr_bios	= blk_rq_count_bios(rq);
595		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
596	),
597
598	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
599		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
600		  (unsigned long long)__entry->sector,
601		  __entry->nr_sector,
602		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
603		  (unsigned long long)__entry->old_sector, __entry->nr_bios)
604);
605
606#endif /* _TRACE_BLOCK_H */
607
608/* This part must be outside protection */
609#include <trace/define_trace.h>
610
v4.6
 
  1#undef TRACE_SYSTEM
  2#define TRACE_SYSTEM block
  3
  4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
  5#define _TRACE_BLOCK_H
  6
  7#include <linux/blktrace_api.h>
  8#include <linux/blkdev.h>
  9#include <linux/buffer_head.h>
 10#include <linux/tracepoint.h>
 
 11
 12#define RWBS_LEN	8
 13
 
 
 
 
 
 
 
 
 14DECLARE_EVENT_CLASS(block_buffer,
 15
 16	TP_PROTO(struct buffer_head *bh),
 17
 18	TP_ARGS(bh),
 19
 20	TP_STRUCT__entry (
 21		__field(  dev_t,	dev			)
 22		__field(  sector_t,	sector			)
 23		__field(  size_t,	size			)
 24	),
 25
 26	TP_fast_assign(
 27		__entry->dev		= bh->b_bdev->bd_dev;
 28		__entry->sector		= bh->b_blocknr;
 29		__entry->size		= bh->b_size;
 30	),
 31
 32	TP_printk("%d,%d sector=%llu size=%zu",
 33		MAJOR(__entry->dev), MINOR(__entry->dev),
 34		(unsigned long long)__entry->sector, __entry->size
 35	)
 36);
 37
 38/**
 39 * block_touch_buffer - mark a buffer accessed
 40 * @bh: buffer_head being touched
 41 *
 42 * Called from touch_buffer().
 43 */
 44DEFINE_EVENT(block_buffer, block_touch_buffer,
 45
 46	TP_PROTO(struct buffer_head *bh),
 47
 48	TP_ARGS(bh)
 49);
 50
 51/**
 52 * block_dirty_buffer - mark a buffer dirty
 53 * @bh: buffer_head being dirtied
 54 *
 55 * Called from mark_buffer_dirty().
 56 */
 57DEFINE_EVENT(block_buffer, block_dirty_buffer,
 58
 59	TP_PROTO(struct buffer_head *bh),
 60
 61	TP_ARGS(bh)
 62);
 
 63
 64DECLARE_EVENT_CLASS(block_rq_with_error,
 
 
 
 
 
 
 
 
 65
 66	TP_PROTO(struct request_queue *q, struct request *rq),
 67
 68	TP_ARGS(q, rq),
 69
 70	TP_STRUCT__entry(
 71		__field(  dev_t,	dev			)
 72		__field(  sector_t,	sector			)
 73		__field(  unsigned int,	nr_sector		)
 74		__field(  int,		errors			)
 75		__array(  char,		rwbs,	RWBS_LEN	)
 76		__dynamic_array( char,	cmd,	blk_cmd_buf_len(rq)	)
 77	),
 78
 79	TP_fast_assign(
 80		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
 81		__entry->sector    = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
 82					0 : blk_rq_pos(rq);
 83		__entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
 84					0 : blk_rq_sectors(rq);
 85		__entry->errors    = rq->errors;
 86
 87		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 88		blk_dump_cmd(__get_str(cmd), rq);
 89	),
 90
 91	TP_printk("%d,%d %s (%s) %llu + %u [%d]",
 92		  MAJOR(__entry->dev), MINOR(__entry->dev),
 93		  __entry->rwbs, __get_str(cmd),
 94		  (unsigned long long)__entry->sector,
 95		  __entry->nr_sector, __entry->errors)
 
 
 
 96);
 97
 98/**
 99 * block_rq_abort - abort block operation request
100 * @q: queue containing the block operation request
101 * @rq: block IO operation request
102 *
103 * Called immediately after pending block IO operation request @rq in
104 * queue @q is aborted. The fields in the operation request @rq
105 * can be examined to determine which device and sectors the pending
106 * operation would access.
107 */
108DEFINE_EVENT(block_rq_with_error, block_rq_abort,
109
110	TP_PROTO(struct request_queue *q, struct request *rq),
111
112	TP_ARGS(q, rq)
113);
 
 
 
 
 
 
 
114
115/**
116 * block_rq_requeue - place block IO request back on a queue
117 * @q: queue holding operation
118 * @rq: block IO operation request
119 *
120 * The block operation request @rq is being placed back into queue
121 * @q.  For some reason the request was not completed and needs to be
122 * put back in the queue.
123 */
124DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
125
126	TP_PROTO(struct request_queue *q, struct request *rq),
 
 
127
128	TP_ARGS(q, rq)
 
 
 
 
 
 
 
129);
130
131/**
132 * block_rq_complete - block IO operation completed by device driver
133 * @q: queue containing the block operation request
134 * @rq: block operations request
 
135 * @nr_bytes: number of completed bytes
136 *
137 * The block_rq_complete tracepoint event indicates that some portion
138 * of operation request has been completed by the device driver.  If
139 * the @rq->bio is %NULL, then there is absolutely no additional work to
140 * do for the request. If @rq->bio is non-NULL then there is
141 * additional work required to complete the request.
142 */
143TRACE_EVENT(block_rq_complete,
144
145	TP_PROTO(struct request_queue *q, struct request *rq,
146		 unsigned int nr_bytes),
147
148	TP_ARGS(q, rq, nr_bytes),
 
149
150	TP_STRUCT__entry(
151		__field(  dev_t,	dev			)
152		__field(  sector_t,	sector			)
153		__field(  unsigned int,	nr_sector		)
154		__field(  int,		errors			)
155		__array(  char,		rwbs,	RWBS_LEN	)
156		__dynamic_array( char,	cmd,	blk_cmd_buf_len(rq)	)
157	),
 
 
158
159	TP_fast_assign(
160		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
161		__entry->sector    = blk_rq_pos(rq);
162		__entry->nr_sector = nr_bytes >> 9;
163		__entry->errors    = rq->errors;
164
165		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
166		blk_dump_cmd(__get_str(cmd), rq);
167	),
168
169	TP_printk("%d,%d %s (%s) %llu + %u [%d]",
170		  MAJOR(__entry->dev), MINOR(__entry->dev),
171		  __entry->rwbs, __get_str(cmd),
172		  (unsigned long long)__entry->sector,
173		  __entry->nr_sector, __entry->errors)
174);
175
176DECLARE_EVENT_CLASS(block_rq,
177
178	TP_PROTO(struct request_queue *q, struct request *rq),
179
180	TP_ARGS(q, rq),
181
182	TP_STRUCT__entry(
183		__field(  dev_t,	dev			)
184		__field(  sector_t,	sector			)
185		__field(  unsigned int,	nr_sector		)
186		__field(  unsigned int,	bytes			)
 
187		__array(  char,		rwbs,	RWBS_LEN	)
188		__array(  char,         comm,   TASK_COMM_LEN   )
189		__dynamic_array( char,	cmd,	blk_cmd_buf_len(rq)	)
190	),
191
192	TP_fast_assign(
193		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
194		__entry->sector    = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
195					0 : blk_rq_pos(rq);
196		__entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
197					0 : blk_rq_sectors(rq);
198		__entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
199					blk_rq_bytes(rq) : 0;
200
201		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
202		blk_dump_cmd(__get_str(cmd), rq);
203		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
204	),
205
206	TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
207		  MAJOR(__entry->dev), MINOR(__entry->dev),
208		  __entry->rwbs, __entry->bytes, __get_str(cmd),
209		  (unsigned long long)__entry->sector,
210		  __entry->nr_sector, __entry->comm)
 
 
 
211);
212
213/**
214 * block_rq_insert - insert block operation request into queue
215 * @q: target queue
216 * @rq: block IO operation request
217 *
218 * Called immediately before block operation request @rq is inserted
219 * into queue @q.  The fields in the operation request @rq struct can
220 * be examined to determine which device and sectors the pending
221 * operation would access.
222 */
223DEFINE_EVENT(block_rq, block_rq_insert,
224
225	TP_PROTO(struct request_queue *q, struct request *rq),
226
227	TP_ARGS(q, rq)
228);
229
230/**
231 * block_rq_issue - issue pending block IO request operation to device driver
232 * @q: queue holding operation
233 * @rq: block IO operation operation request
234 *
235 * Called when block operation request @rq from queue @q is sent to a
236 * device driver for processing.
237 */
238DEFINE_EVENT(block_rq, block_rq_issue,
239
240	TP_PROTO(struct request_queue *q, struct request *rq),
241
242	TP_ARGS(q, rq)
243);
244
245/**
246 * block_bio_bounce - used bounce buffer when processing block operation
247 * @q: queue holding the block operation
248 * @bio: block operation
 
 
 
 
 
 
 
 
 
 
 
 
 
249 *
250 * A bounce buffer was used to handle the block operation @bio in @q.
251 * This occurs when hardware limitations prevent a direct transfer of
252 * data between the @bio data memory area and the IO device.  Use of a
253 * bounce buffer requires extra copying of data and decreases
254 * performance.
255 */
256TRACE_EVENT(block_bio_bounce,
257
258	TP_PROTO(struct request_queue *q, struct bio *bio),
259
260	TP_ARGS(q, bio),
 
261
262	TP_STRUCT__entry(
263		__field( dev_t,		dev			)
264		__field( sector_t,	sector			)
265		__field( unsigned int,	nr_sector		)
266		__array( char,		rwbs,	RWBS_LEN	)
267		__array( char,		comm,	TASK_COMM_LEN	)
268	),
269
270	TP_fast_assign(
271		__entry->dev		= bio->bi_bdev ?
272					  bio->bi_bdev->bd_dev : 0;
273		__entry->sector		= bio->bi_iter.bi_sector;
274		__entry->nr_sector	= bio_sectors(bio);
275		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
276		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
277	),
278
279	TP_printk("%d,%d %s %llu + %u [%s]",
280		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
281		  (unsigned long long)__entry->sector,
282		  __entry->nr_sector, __entry->comm)
283);
284
285/**
286 * block_bio_complete - completed all work on the block operation
287 * @q: queue holding the block operation
288 * @bio: block operation completed
289 * @error: io error value
290 *
291 * This tracepoint indicates there is no further work to do on this
292 * block IO operation @bio.
293 */
294TRACE_EVENT(block_bio_complete,
295
296	TP_PROTO(struct request_queue *q, struct bio *bio, int error),
297
298	TP_ARGS(q, bio, error),
299
300	TP_STRUCT__entry(
301		__field( dev_t,		dev		)
302		__field( sector_t,	sector		)
303		__field( unsigned,	nr_sector	)
304		__field( int,		error		)
305		__array( char,		rwbs,	RWBS_LEN)
306	),
307
308	TP_fast_assign(
309		__entry->dev		= bio->bi_bdev->bd_dev;
310		__entry->sector		= bio->bi_iter.bi_sector;
311		__entry->nr_sector	= bio_sectors(bio);
312		__entry->error		= error;
313		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
314	),
315
316	TP_printk("%d,%d %s %llu + %u [%d]",
317		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
318		  (unsigned long long)__entry->sector,
319		  __entry->nr_sector, __entry->error)
320);
321
322DECLARE_EVENT_CLASS(block_bio_merge,
323
324	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
325
326	TP_ARGS(q, rq, bio),
327
328	TP_STRUCT__entry(
329		__field( dev_t,		dev			)
330		__field( sector_t,	sector			)
331		__field( unsigned int,	nr_sector		)
332		__array( char,		rwbs,	RWBS_LEN	)
333		__array( char,		comm,	TASK_COMM_LEN	)
334	),
335
336	TP_fast_assign(
337		__entry->dev		= bio->bi_bdev->bd_dev;
338		__entry->sector		= bio->bi_iter.bi_sector;
339		__entry->nr_sector	= bio_sectors(bio);
340		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
341		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
342	),
343
344	TP_printk("%d,%d %s %llu + %u [%s]",
345		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
346		  (unsigned long long)__entry->sector,
347		  __entry->nr_sector, __entry->comm)
348);
349
350/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351 * block_bio_backmerge - merging block operation to the end of an existing operation
352 * @q: queue holding operation
353 * @rq: request bio is being merged into
354 * @bio: new block operation to merge
355 *
356 * Merging block request @bio to the end of an existing block request
357 * in queue @q.
358 */
359DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
360
361	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
362
363	TP_ARGS(q, rq, bio)
364);
365
366/**
367 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
368 * @q: queue holding operation
369 * @rq: request bio is being merged into
370 * @bio: new block operation to merge
371 *
372 * Merging block IO operation @bio to the beginning of an existing block
373 * operation in queue @q.
374 */
375DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
376
377	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
378
379	TP_ARGS(q, rq, bio)
380);
381
382/**
383 * block_bio_queue - putting new block IO operation in queue
384 * @q: queue holding operation
385 * @bio: new block operation
386 *
387 * About to place the block IO operation @bio into queue @q.
388 */
389TRACE_EVENT(block_bio_queue,
390
391	TP_PROTO(struct request_queue *q, struct bio *bio),
392
393	TP_ARGS(q, bio),
394
395	TP_STRUCT__entry(
396		__field( dev_t,		dev			)
397		__field( sector_t,	sector			)
398		__field( unsigned int,	nr_sector		)
399		__array( char,		rwbs,	RWBS_LEN	)
400		__array( char,		comm,	TASK_COMM_LEN	)
401	),
402
403	TP_fast_assign(
404		__entry->dev		= bio->bi_bdev->bd_dev;
405		__entry->sector		= bio->bi_iter.bi_sector;
406		__entry->nr_sector	= bio_sectors(bio);
407		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
408		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
409	),
410
411	TP_printk("%d,%d %s %llu + %u [%s]",
412		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
413		  (unsigned long long)__entry->sector,
414		  __entry->nr_sector, __entry->comm)
415);
416
417DECLARE_EVENT_CLASS(block_get_rq,
418
419	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
420
421	TP_ARGS(q, bio, rw),
422
423	TP_STRUCT__entry(
424		__field( dev_t,		dev			)
425		__field( sector_t,	sector			)
426		__field( unsigned int,	nr_sector		)
427		__array( char,		rwbs,	RWBS_LEN	)
428		__array( char,		comm,	TASK_COMM_LEN	)
429        ),
430
431	TP_fast_assign(
432		__entry->dev		= bio ? bio->bi_bdev->bd_dev : 0;
433		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0;
434		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
435		blk_fill_rwbs(__entry->rwbs,
436			      bio ? bio->bi_rw : 0, __entry->nr_sector);
437		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
438        ),
439
440	TP_printk("%d,%d %s %llu + %u [%s]",
441		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
442		  (unsigned long long)__entry->sector,
443		  __entry->nr_sector, __entry->comm)
444);
445
446/**
447 * block_getrq - get a free request entry in queue for block IO operations
448 * @q: queue for operations
449 * @bio: pending block IO operation
450 * @rw: low bit indicates a read (%0) or a write (%1)
451 *
452 * A request struct for queue @q has been allocated to handle the
453 * block IO operation @bio.
454 */
455DEFINE_EVENT(block_get_rq, block_getrq,
456
457	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
458
459	TP_ARGS(q, bio, rw)
460);
461
462/**
463 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
464 * @q: queue for operation
465 * @bio: pending block IO operation
466 * @rw: low bit indicates a read (%0) or a write (%1)
467 *
468 * In the case where a request struct cannot be provided for queue @q
469 * the process needs to wait for an request struct to become
470 * available.  This tracepoint event is generated each time the
471 * process goes to sleep waiting for request struct become available.
472 */
473DEFINE_EVENT(block_get_rq, block_sleeprq,
474
475	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
476
477	TP_ARGS(q, bio, rw)
478);
479
480/**
481 * block_plug - keep operations requests in request queue
482 * @q: request queue to plug
483 *
484 * Plug the request queue @q.  Do not allow block operation requests
485 * to be sent to the device driver. Instead, accumulate requests in
486 * the queue to improve throughput performance of the block device.
487 */
488TRACE_EVENT(block_plug,
489
490	TP_PROTO(struct request_queue *q),
491
492	TP_ARGS(q),
493
494	TP_STRUCT__entry(
495		__array( char,		comm,	TASK_COMM_LEN	)
496	),
497
498	TP_fast_assign(
499		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
500	),
501
502	TP_printk("[%s]", __entry->comm)
503);
504
505DECLARE_EVENT_CLASS(block_unplug,
506
507	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
508
509	TP_ARGS(q, depth, explicit),
510
511	TP_STRUCT__entry(
512		__field( int,		nr_rq			)
513		__array( char,		comm,	TASK_COMM_LEN	)
514	),
515
516	TP_fast_assign(
517		__entry->nr_rq = depth;
518		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
519	),
520
521	TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
522);
523
524/**
525 * block_unplug - release of operations requests in request queue
526 * @q: request queue to unplug
527 * @depth: number of requests just added to the queue
528 * @explicit: whether this was an explicit unplug, or one from schedule()
529 *
530 * Unplug request queue @q because device driver is scheduled to work
531 * on elements in the request queue.
532 */
533DEFINE_EVENT(block_unplug, block_unplug,
534
535	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
536
537	TP_ARGS(q, depth, explicit)
538);
539
540/**
541 * block_split - split a single bio struct into two bio structs
542 * @q: queue containing the bio
543 * @bio: block operation being split
544 * @new_sector: The starting sector for the new bio
545 *
546 * The bio request @bio in request queue @q needs to be split into two
547 * bio requests. The newly created @bio request starts at
548 * @new_sector. This split may be required due to hardware limitation
549 * such as operation crossing device boundaries in a RAID system.
550 */
551TRACE_EVENT(block_split,
552
553	TP_PROTO(struct request_queue *q, struct bio *bio,
554		 unsigned int new_sector),
555
556	TP_ARGS(q, bio, new_sector),
557
558	TP_STRUCT__entry(
559		__field( dev_t,		dev				)
560		__field( sector_t,	sector				)
561		__field( sector_t,	new_sector			)
562		__array( char,		rwbs,		RWBS_LEN	)
563		__array( char,		comm,		TASK_COMM_LEN	)
564	),
565
566	TP_fast_assign(
567		__entry->dev		= bio->bi_bdev->bd_dev;
568		__entry->sector		= bio->bi_iter.bi_sector;
569		__entry->new_sector	= new_sector;
570		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
571		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
572	),
573
574	TP_printk("%d,%d %s %llu / %llu [%s]",
575		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
576		  (unsigned long long)__entry->sector,
577		  (unsigned long long)__entry->new_sector,
578		  __entry->comm)
579);
580
581/**
582 * block_bio_remap - map request for a logical device to the raw device
583 * @q: queue holding the operation
584 * @bio: revised operation
585 * @dev: device for the operation
586 * @from: original sector for the operation
587 *
588 * An operation for a logical device has been mapped to the
589 * raw block device.
590 */
591TRACE_EVENT(block_bio_remap,
592
593	TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
594		 sector_t from),
595
596	TP_ARGS(q, bio, dev, from),
597
598	TP_STRUCT__entry(
599		__field( dev_t,		dev		)
600		__field( sector_t,	sector		)
601		__field( unsigned int,	nr_sector	)
602		__field( dev_t,		old_dev		)
603		__field( sector_t,	old_sector	)
604		__array( char,		rwbs,	RWBS_LEN)
605	),
606
607	TP_fast_assign(
608		__entry->dev		= bio->bi_bdev->bd_dev;
609		__entry->sector		= bio->bi_iter.bi_sector;
610		__entry->nr_sector	= bio_sectors(bio);
611		__entry->old_dev	= dev;
612		__entry->old_sector	= from;
613		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
614	),
615
616	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
617		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
618		  (unsigned long long)__entry->sector,
619		  __entry->nr_sector,
620		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
621		  (unsigned long long)__entry->old_sector)
622);
623
624/**
625 * block_rq_remap - map request for a block operation request
626 * @q: queue holding the operation
627 * @rq: block IO operation request
628 * @dev: device for the operation
629 * @from: original sector for the operation
630 *
631 * The block operation request @rq in @q has been remapped.  The block
632 * operation request @rq holds the current information and @from hold
633 * the original sector.
634 */
635TRACE_EVENT(block_rq_remap,
636
637	TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
638		 sector_t from),
639
640	TP_ARGS(q, rq, dev, from),
641
642	TP_STRUCT__entry(
643		__field( dev_t,		dev		)
644		__field( sector_t,	sector		)
645		__field( unsigned int,	nr_sector	)
646		__field( dev_t,		old_dev		)
647		__field( sector_t,	old_sector	)
648		__field( unsigned int,	nr_bios		)
649		__array( char,		rwbs,	RWBS_LEN)
650	),
651
652	TP_fast_assign(
653		__entry->dev		= disk_devt(rq->rq_disk);
654		__entry->sector		= blk_rq_pos(rq);
655		__entry->nr_sector	= blk_rq_sectors(rq);
656		__entry->old_dev	= dev;
657		__entry->old_sector	= from;
658		__entry->nr_bios	= blk_rq_count_bios(rq);
659		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
660	),
661
662	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
663		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
664		  (unsigned long long)__entry->sector,
665		  __entry->nr_sector,
666		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
667		  (unsigned long long)__entry->old_sector, __entry->nr_bios)
668);
669
670#endif /* _TRACE_BLOCK_H */
671
672/* This part must be outside protection */
673#include <trace/define_trace.h>
674