Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM block
  4
  5#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_BLOCK_H
  7
  8#include <linux/blktrace_api.h>
  9#include <linux/blkdev.h>
 10#include <linux/buffer_head.h>
 11#include <linux/tracepoint.h>
 12
 13#define RWBS_LEN	8
 14
 15DECLARE_EVENT_CLASS(block_buffer,
 16
 17	TP_PROTO(struct buffer_head *bh),
 18
 19	TP_ARGS(bh),
 20
 21	TP_STRUCT__entry (
 22		__field(  dev_t,	dev			)
 23		__field(  sector_t,	sector			)
 24		__field(  size_t,	size			)
 25	),
 26
 27	TP_fast_assign(
 28		__entry->dev		= bh->b_bdev->bd_dev;
 29		__entry->sector		= bh->b_blocknr;
 30		__entry->size		= bh->b_size;
 31	),
 32
 33	TP_printk("%d,%d sector=%llu size=%zu",
 34		MAJOR(__entry->dev), MINOR(__entry->dev),
 35		(unsigned long long)__entry->sector, __entry->size
 36	)
 37);
 38
 39/**
 40 * block_touch_buffer - mark a buffer accessed
 41 * @bh: buffer_head being touched
 42 *
 43 * Called from touch_buffer().
 44 */
 45DEFINE_EVENT(block_buffer, block_touch_buffer,
 46
 47	TP_PROTO(struct buffer_head *bh),
 48
 49	TP_ARGS(bh)
 50);
 51
 52/**
 53 * block_dirty_buffer - mark a buffer dirty
 54 * @bh: buffer_head being dirtied
 55 *
 56 * Called from mark_buffer_dirty().
 57 */
 58DEFINE_EVENT(block_buffer, block_dirty_buffer,
 59
 60	TP_PROTO(struct buffer_head *bh),
 61
 62	TP_ARGS(bh)
 63);
 64
 65/**
 66 * block_rq_requeue - place block IO request back on a queue
 67 * @q: queue holding operation
 68 * @rq: block IO operation request
 69 *
 70 * The block operation request @rq is being placed back into queue
 71 * @q.  For some reason the request was not completed and needs to be
 72 * put back in the queue.
 73 */
 74TRACE_EVENT(block_rq_requeue,
 75
 76	TP_PROTO(struct request_queue *q, struct request *rq),
 77
 78	TP_ARGS(q, rq),
 79
 80	TP_STRUCT__entry(
 81		__field(  dev_t,	dev			)
 82		__field(  sector_t,	sector			)
 83		__field(  unsigned int,	nr_sector		)
 84		__array(  char,		rwbs,	RWBS_LEN	)
 85		__dynamic_array( char,	cmd,	1		)
 86	),
 87
 88	TP_fast_assign(
 89		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
 90		__entry->sector    = blk_rq_trace_sector(rq);
 91		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
 92
 93		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 94		__get_str(cmd)[0] = '\0';
 95	),
 96
 97	TP_printk("%d,%d %s (%s) %llu + %u [%d]",
 98		  MAJOR(__entry->dev), MINOR(__entry->dev),
 99		  __entry->rwbs, __get_str(cmd),
100		  (unsigned long long)__entry->sector,
101		  __entry->nr_sector, 0)
102);
103
104/**
105 * block_rq_complete - block IO operation completed by device driver
106 * @rq: block operations request
107 * @error: status code
108 * @nr_bytes: number of completed bytes
109 *
110 * The block_rq_complete tracepoint event indicates that some portion
111 * of operation request has been completed by the device driver.  If
112 * the @rq->bio is %NULL, then there is absolutely no additional work to
113 * do for the request. If @rq->bio is non-NULL then there is
114 * additional work required to complete the request.
115 */
116TRACE_EVENT(block_rq_complete,
117
118	TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
119
120	TP_ARGS(rq, error, nr_bytes),
121
122	TP_STRUCT__entry(
123		__field(  dev_t,	dev			)
124		__field(  sector_t,	sector			)
125		__field(  unsigned int,	nr_sector		)
126		__field(  int,		error			)
127		__array(  char,		rwbs,	RWBS_LEN	)
128		__dynamic_array( char,	cmd,	1		)
129	),
130
131	TP_fast_assign(
132		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
133		__entry->sector    = blk_rq_pos(rq);
134		__entry->nr_sector = nr_bytes >> 9;
135		__entry->error     = error;
136
137		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
138		__get_str(cmd)[0] = '\0';
139	),
140
141	TP_printk("%d,%d %s (%s) %llu + %u [%d]",
142		  MAJOR(__entry->dev), MINOR(__entry->dev),
143		  __entry->rwbs, __get_str(cmd),
144		  (unsigned long long)__entry->sector,
145		  __entry->nr_sector, __entry->error)
146);
147
148DECLARE_EVENT_CLASS(block_rq,
149
150	TP_PROTO(struct request_queue *q, struct request *rq),
151
152	TP_ARGS(q, rq),
153
154	TP_STRUCT__entry(
155		__field(  dev_t,	dev			)
156		__field(  sector_t,	sector			)
157		__field(  unsigned int,	nr_sector		)
158		__field(  unsigned int,	bytes			)
159		__array(  char,		rwbs,	RWBS_LEN	)
160		__array(  char,         comm,   TASK_COMM_LEN   )
161		__dynamic_array( char,	cmd,	1		)
162	),
163
164	TP_fast_assign(
165		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
166		__entry->sector    = blk_rq_trace_sector(rq);
167		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
168		__entry->bytes     = blk_rq_bytes(rq);
169
170		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
171		__get_str(cmd)[0] = '\0';
172		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
173	),
174
175	TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
176		  MAJOR(__entry->dev), MINOR(__entry->dev),
177		  __entry->rwbs, __entry->bytes, __get_str(cmd),
178		  (unsigned long long)__entry->sector,
179		  __entry->nr_sector, __entry->comm)
180);
181
182/**
183 * block_rq_insert - insert block operation request into queue
184 * @q: target queue
185 * @rq: block IO operation request
186 *
187 * Called immediately before block operation request @rq is inserted
188 * into queue @q.  The fields in the operation request @rq struct can
189 * be examined to determine which device and sectors the pending
190 * operation would access.
191 */
192DEFINE_EVENT(block_rq, block_rq_insert,
193
194	TP_PROTO(struct request_queue *q, struct request *rq),
195
196	TP_ARGS(q, rq)
197);
198
199/**
200 * block_rq_issue - issue pending block IO request operation to device driver
201 * @q: queue holding operation
202 * @rq: block IO operation operation request
203 *
204 * Called when block operation request @rq from queue @q is sent to a
205 * device driver for processing.
206 */
207DEFINE_EVENT(block_rq, block_rq_issue,
208
209	TP_PROTO(struct request_queue *q, struct request *rq),
210
211	TP_ARGS(q, rq)
212);
213
214/**
215 * block_bio_bounce - used bounce buffer when processing block operation
216 * @q: queue holding the block operation
217 * @bio: block operation
218 *
219 * A bounce buffer was used to handle the block operation @bio in @q.
220 * This occurs when hardware limitations prevent a direct transfer of
221 * data between the @bio data memory area and the IO device.  Use of a
222 * bounce buffer requires extra copying of data and decreases
223 * performance.
224 */
225TRACE_EVENT(block_bio_bounce,
226
227	TP_PROTO(struct request_queue *q, struct bio *bio),
228
229	TP_ARGS(q, bio),
230
231	TP_STRUCT__entry(
232		__field( dev_t,		dev			)
233		__field( sector_t,	sector			)
234		__field( unsigned int,	nr_sector		)
235		__array( char,		rwbs,	RWBS_LEN	)
236		__array( char,		comm,	TASK_COMM_LEN	)
237	),
238
239	TP_fast_assign(
240		__entry->dev		= bio_dev(bio);
241		__entry->sector		= bio->bi_iter.bi_sector;
242		__entry->nr_sector	= bio_sectors(bio);
243		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
244		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
245	),
246
247	TP_printk("%d,%d %s %llu + %u [%s]",
248		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
249		  (unsigned long long)__entry->sector,
250		  __entry->nr_sector, __entry->comm)
251);
252
253/**
254 * block_bio_complete - completed all work on the block operation
255 * @q: queue holding the block operation
256 * @bio: block operation completed
257 * @error: io error value
258 *
259 * This tracepoint indicates there is no further work to do on this
260 * block IO operation @bio.
261 */
262TRACE_EVENT(block_bio_complete,
263
264	TP_PROTO(struct request_queue *q, struct bio *bio, int error),
265
266	TP_ARGS(q, bio, error),
267
268	TP_STRUCT__entry(
269		__field( dev_t,		dev		)
270		__field( sector_t,	sector		)
271		__field( unsigned,	nr_sector	)
272		__field( int,		error		)
273		__array( char,		rwbs,	RWBS_LEN)
274	),
275
276	TP_fast_assign(
277		__entry->dev		= bio_dev(bio);
278		__entry->sector		= bio->bi_iter.bi_sector;
279		__entry->nr_sector	= bio_sectors(bio);
280		__entry->error		= error;
281		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
282	),
283
284	TP_printk("%d,%d %s %llu + %u [%d]",
285		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
286		  (unsigned long long)__entry->sector,
287		  __entry->nr_sector, __entry->error)
288);
289
290DECLARE_EVENT_CLASS(block_bio_merge,
291
292	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
293
294	TP_ARGS(q, rq, bio),
295
296	TP_STRUCT__entry(
297		__field( dev_t,		dev			)
298		__field( sector_t,	sector			)
299		__field( unsigned int,	nr_sector		)
300		__array( char,		rwbs,	RWBS_LEN	)
301		__array( char,		comm,	TASK_COMM_LEN	)
302	),
303
304	TP_fast_assign(
305		__entry->dev		= bio_dev(bio);
306		__entry->sector		= bio->bi_iter.bi_sector;
307		__entry->nr_sector	= bio_sectors(bio);
308		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
309		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
310	),
311
312	TP_printk("%d,%d %s %llu + %u [%s]",
313		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
314		  (unsigned long long)__entry->sector,
315		  __entry->nr_sector, __entry->comm)
316);
317
318/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319 * block_bio_backmerge - merging block operation to the end of an existing operation
320 * @q: queue holding operation
321 * @rq: request bio is being merged into
322 * @bio: new block operation to merge
323 *
324 * Merging block request @bio to the end of an existing block request
325 * in queue @q.
326 */
327DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
328
329	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
330
331	TP_ARGS(q, rq, bio)
332);
333
334/**
335 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
336 * @q: queue holding operation
337 * @rq: request bio is being merged into
338 * @bio: new block operation to merge
339 *
340 * Merging block IO operation @bio to the beginning of an existing block
341 * operation in queue @q.
342 */
343DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
344
345	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
346
347	TP_ARGS(q, rq, bio)
348);
349
350/**
351 * block_bio_queue - putting new block IO operation in queue
352 * @q: queue holding operation
353 * @bio: new block operation
354 *
355 * About to place the block IO operation @bio into queue @q.
356 */
357TRACE_EVENT(block_bio_queue,
358
359	TP_PROTO(struct request_queue *q, struct bio *bio),
360
361	TP_ARGS(q, bio),
362
363	TP_STRUCT__entry(
364		__field( dev_t,		dev			)
365		__field( sector_t,	sector			)
366		__field( unsigned int,	nr_sector		)
367		__array( char,		rwbs,	RWBS_LEN	)
368		__array( char,		comm,	TASK_COMM_LEN	)
369	),
370
371	TP_fast_assign(
372		__entry->dev		= bio_dev(bio);
373		__entry->sector		= bio->bi_iter.bi_sector;
374		__entry->nr_sector	= bio_sectors(bio);
375		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
376		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
377	),
378
379	TP_printk("%d,%d %s %llu + %u [%s]",
380		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
381		  (unsigned long long)__entry->sector,
382		  __entry->nr_sector, __entry->comm)
383);
384
385DECLARE_EVENT_CLASS(block_get_rq,
386
387	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
388
389	TP_ARGS(q, bio, rw),
390
391	TP_STRUCT__entry(
392		__field( dev_t,		dev			)
393		__field( sector_t,	sector			)
394		__field( unsigned int,	nr_sector		)
395		__array( char,		rwbs,	RWBS_LEN	)
396		__array( char,		comm,	TASK_COMM_LEN	)
397        ),
398
399	TP_fast_assign(
400		__entry->dev		= bio ? bio_dev(bio) : 0;
401		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0;
402		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
403		blk_fill_rwbs(__entry->rwbs,
404			      bio ? bio->bi_opf : 0, __entry->nr_sector);
405		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
406        ),
407
408	TP_printk("%d,%d %s %llu + %u [%s]",
409		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
410		  (unsigned long long)__entry->sector,
411		  __entry->nr_sector, __entry->comm)
412);
413
414/**
415 * block_getrq - get a free request entry in queue for block IO operations
416 * @q: queue for operations
417 * @bio: pending block IO operation (can be %NULL)
418 * @rw: low bit indicates a read (%0) or a write (%1)
419 *
420 * A request struct for queue @q has been allocated to handle the
421 * block IO operation @bio.
422 */
423DEFINE_EVENT(block_get_rq, block_getrq,
424
425	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
426
427	TP_ARGS(q, bio, rw)
428);
429
430/**
431 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
432 * @q: queue for operation
433 * @bio: pending block IO operation (can be %NULL)
434 * @rw: low bit indicates a read (%0) or a write (%1)
435 *
436 * In the case where a request struct cannot be provided for queue @q
437 * the process needs to wait for an request struct to become
438 * available.  This tracepoint event is generated each time the
439 * process goes to sleep waiting for request struct become available.
440 */
441DEFINE_EVENT(block_get_rq, block_sleeprq,
442
443	TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
444
445	TP_ARGS(q, bio, rw)
446);
447
448/**
449 * block_plug - keep operations requests in request queue
450 * @q: request queue to plug
451 *
452 * Plug the request queue @q.  Do not allow block operation requests
453 * to be sent to the device driver. Instead, accumulate requests in
454 * the queue to improve throughput performance of the block device.
455 */
456TRACE_EVENT(block_plug,
457
458	TP_PROTO(struct request_queue *q),
459
460	TP_ARGS(q),
461
462	TP_STRUCT__entry(
463		__array( char,		comm,	TASK_COMM_LEN	)
464	),
465
466	TP_fast_assign(
467		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
468	),
469
470	TP_printk("[%s]", __entry->comm)
471);
472
473DECLARE_EVENT_CLASS(block_unplug,
474
475	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
476
477	TP_ARGS(q, depth, explicit),
478
479	TP_STRUCT__entry(
480		__field( int,		nr_rq			)
481		__array( char,		comm,	TASK_COMM_LEN	)
482	),
483
484	TP_fast_assign(
485		__entry->nr_rq = depth;
486		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
487	),
488
489	TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
490);
491
492/**
493 * block_unplug - release of operations requests in request queue
494 * @q: request queue to unplug
495 * @depth: number of requests just added to the queue
496 * @explicit: whether this was an explicit unplug, or one from schedule()
497 *
498 * Unplug request queue @q because device driver is scheduled to work
499 * on elements in the request queue.
500 */
501DEFINE_EVENT(block_unplug, block_unplug,
502
503	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
504
505	TP_ARGS(q, depth, explicit)
506);
507
508/**
509 * block_split - split a single bio struct into two bio structs
510 * @q: queue containing the bio
511 * @bio: block operation being split
512 * @new_sector: The starting sector for the new bio
513 *
514 * The bio request @bio in request queue @q needs to be split into two
515 * bio requests. The newly created @bio request starts at
516 * @new_sector. This split may be required due to hardware limitation
517 * such as operation crossing device boundaries in a RAID system.
518 */
519TRACE_EVENT(block_split,
520
521	TP_PROTO(struct request_queue *q, struct bio *bio,
522		 unsigned int new_sector),
523
524	TP_ARGS(q, bio, new_sector),
525
526	TP_STRUCT__entry(
527		__field( dev_t,		dev				)
528		__field( sector_t,	sector				)
529		__field( sector_t,	new_sector			)
530		__array( char,		rwbs,		RWBS_LEN	)
531		__array( char,		comm,		TASK_COMM_LEN	)
532	),
533
534	TP_fast_assign(
535		__entry->dev		= bio_dev(bio);
536		__entry->sector		= bio->bi_iter.bi_sector;
537		__entry->new_sector	= new_sector;
538		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
539		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
540	),
541
542	TP_printk("%d,%d %s %llu / %llu [%s]",
543		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
544		  (unsigned long long)__entry->sector,
545		  (unsigned long long)__entry->new_sector,
546		  __entry->comm)
547);
548
549/**
550 * block_bio_remap - map request for a logical device to the raw device
551 * @q: queue holding the operation
552 * @bio: revised operation
553 * @dev: device for the operation
554 * @from: original sector for the operation
555 *
556 * An operation for a logical device has been mapped to the
557 * raw block device.
558 */
559TRACE_EVENT(block_bio_remap,
560
561	TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
562		 sector_t from),
563
564	TP_ARGS(q, bio, dev, from),
565
566	TP_STRUCT__entry(
567		__field( dev_t,		dev		)
568		__field( sector_t,	sector		)
569		__field( unsigned int,	nr_sector	)
570		__field( dev_t,		old_dev		)
571		__field( sector_t,	old_sector	)
572		__array( char,		rwbs,	RWBS_LEN)
573	),
574
575	TP_fast_assign(
576		__entry->dev		= bio_dev(bio);
577		__entry->sector		= bio->bi_iter.bi_sector;
578		__entry->nr_sector	= bio_sectors(bio);
579		__entry->old_dev	= dev;
580		__entry->old_sector	= from;
581		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
582	),
583
584	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
585		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
586		  (unsigned long long)__entry->sector,
587		  __entry->nr_sector,
588		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
589		  (unsigned long long)__entry->old_sector)
590);
591
592/**
593 * block_rq_remap - map request for a block operation request
594 * @q: queue holding the operation
595 * @rq: block IO operation request
596 * @dev: device for the operation
597 * @from: original sector for the operation
598 *
599 * The block operation request @rq in @q has been remapped.  The block
600 * operation request @rq holds the current information and @from hold
601 * the original sector.
602 */
603TRACE_EVENT(block_rq_remap,
604
605	TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
606		 sector_t from),
607
608	TP_ARGS(q, rq, dev, from),
609
610	TP_STRUCT__entry(
611		__field( dev_t,		dev		)
612		__field( sector_t,	sector		)
613		__field( unsigned int,	nr_sector	)
614		__field( dev_t,		old_dev		)
615		__field( sector_t,	old_sector	)
616		__field( unsigned int,	nr_bios		)
617		__array( char,		rwbs,	RWBS_LEN)
618	),
619
620	TP_fast_assign(
621		__entry->dev		= disk_devt(rq->rq_disk);
622		__entry->sector		= blk_rq_pos(rq);
623		__entry->nr_sector	= blk_rq_sectors(rq);
624		__entry->old_dev	= dev;
625		__entry->old_sector	= from;
626		__entry->nr_bios	= blk_rq_count_bios(rq);
627		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
628	),
629
630	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
631		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
632		  (unsigned long long)__entry->sector,
633		  __entry->nr_sector,
634		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
635		  (unsigned long long)__entry->old_sector, __entry->nr_bios)
636);
637
638#endif /* _TRACE_BLOCK_H */
639
640/* This part must be outside protection */
641#include <trace/define_trace.h>
642
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM block
  4
  5#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_BLOCK_H
  7
  8#include <linux/blktrace_api.h>
  9#include <linux/blkdev.h>
 10#include <linux/buffer_head.h>
 11#include <linux/tracepoint.h>
 12
 13#define RWBS_LEN	8
 14
 15DECLARE_EVENT_CLASS(block_buffer,
 16
 17	TP_PROTO(struct buffer_head *bh),
 18
 19	TP_ARGS(bh),
 20
 21	TP_STRUCT__entry (
 22		__field(  dev_t,	dev			)
 23		__field(  sector_t,	sector			)
 24		__field(  size_t,	size			)
 25	),
 26
 27	TP_fast_assign(
 28		__entry->dev		= bh->b_bdev->bd_dev;
 29		__entry->sector		= bh->b_blocknr;
 30		__entry->size		= bh->b_size;
 31	),
 32
 33	TP_printk("%d,%d sector=%llu size=%zu",
 34		MAJOR(__entry->dev), MINOR(__entry->dev),
 35		(unsigned long long)__entry->sector, __entry->size
 36	)
 37);
 38
 39/**
 40 * block_touch_buffer - mark a buffer accessed
 41 * @bh: buffer_head being touched
 42 *
 43 * Called from touch_buffer().
 44 */
 45DEFINE_EVENT(block_buffer, block_touch_buffer,
 46
 47	TP_PROTO(struct buffer_head *bh),
 48
 49	TP_ARGS(bh)
 50);
 51
 52/**
 53 * block_dirty_buffer - mark a buffer dirty
 54 * @bh: buffer_head being dirtied
 55 *
 56 * Called from mark_buffer_dirty().
 57 */
 58DEFINE_EVENT(block_buffer, block_dirty_buffer,
 59
 60	TP_PROTO(struct buffer_head *bh),
 61
 62	TP_ARGS(bh)
 63);
 64
 65/**
 66 * block_rq_requeue - place block IO request back on a queue
 
 67 * @rq: block IO operation request
 68 *
 69 * The block operation request @rq is being placed back into queue
 70 * @q.  For some reason the request was not completed and needs to be
 71 * put back in the queue.
 72 */
 73TRACE_EVENT(block_rq_requeue,
 74
 75	TP_PROTO(struct request *rq),
 76
 77	TP_ARGS(rq),
 78
 79	TP_STRUCT__entry(
 80		__field(  dev_t,	dev			)
 81		__field(  sector_t,	sector			)
 82		__field(  unsigned int,	nr_sector		)
 83		__array(  char,		rwbs,	RWBS_LEN	)
 84		__dynamic_array( char,	cmd,	1		)
 85	),
 86
 87	TP_fast_assign(
 88		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
 89		__entry->sector    = blk_rq_trace_sector(rq);
 90		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
 91
 92		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
 93		__get_str(cmd)[0] = '\0';
 94	),
 95
 96	TP_printk("%d,%d %s (%s) %llu + %u [%d]",
 97		  MAJOR(__entry->dev), MINOR(__entry->dev),
 98		  __entry->rwbs, __get_str(cmd),
 99		  (unsigned long long)__entry->sector,
100		  __entry->nr_sector, 0)
101);
102
103/**
104 * block_rq_complete - block IO operation completed by device driver
105 * @rq: block operations request
106 * @error: status code
107 * @nr_bytes: number of completed bytes
108 *
109 * The block_rq_complete tracepoint event indicates that some portion
110 * of operation request has been completed by the device driver.  If
111 * the @rq->bio is %NULL, then there is absolutely no additional work to
112 * do for the request. If @rq->bio is non-NULL then there is
113 * additional work required to complete the request.
114 */
115TRACE_EVENT(block_rq_complete,
116
117	TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
118
119	TP_ARGS(rq, error, nr_bytes),
120
121	TP_STRUCT__entry(
122		__field(  dev_t,	dev			)
123		__field(  sector_t,	sector			)
124		__field(  unsigned int,	nr_sector		)
125		__field(  int,		error			)
126		__array(  char,		rwbs,	RWBS_LEN	)
127		__dynamic_array( char,	cmd,	1		)
128	),
129
130	TP_fast_assign(
131		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
132		__entry->sector    = blk_rq_pos(rq);
133		__entry->nr_sector = nr_bytes >> 9;
134		__entry->error     = error;
135
136		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
137		__get_str(cmd)[0] = '\0';
138	),
139
140	TP_printk("%d,%d %s (%s) %llu + %u [%d]",
141		  MAJOR(__entry->dev), MINOR(__entry->dev),
142		  __entry->rwbs, __get_str(cmd),
143		  (unsigned long long)__entry->sector,
144		  __entry->nr_sector, __entry->error)
145);
146
147DECLARE_EVENT_CLASS(block_rq,
148
149	TP_PROTO(struct request *rq),
150
151	TP_ARGS(rq),
152
153	TP_STRUCT__entry(
154		__field(  dev_t,	dev			)
155		__field(  sector_t,	sector			)
156		__field(  unsigned int,	nr_sector		)
157		__field(  unsigned int,	bytes			)
158		__array(  char,		rwbs,	RWBS_LEN	)
159		__array(  char,         comm,   TASK_COMM_LEN   )
160		__dynamic_array( char,	cmd,	1		)
161	),
162
163	TP_fast_assign(
164		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
165		__entry->sector    = blk_rq_trace_sector(rq);
166		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
167		__entry->bytes     = blk_rq_bytes(rq);
168
169		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
170		__get_str(cmd)[0] = '\0';
171		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
172	),
173
174	TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
175		  MAJOR(__entry->dev), MINOR(__entry->dev),
176		  __entry->rwbs, __entry->bytes, __get_str(cmd),
177		  (unsigned long long)__entry->sector,
178		  __entry->nr_sector, __entry->comm)
179);
180
181/**
182 * block_rq_insert - insert block operation request into queue
 
183 * @rq: block IO operation request
184 *
185 * Called immediately before block operation request @rq is inserted
186 * into queue @q.  The fields in the operation request @rq struct can
187 * be examined to determine which device and sectors the pending
188 * operation would access.
189 */
190DEFINE_EVENT(block_rq, block_rq_insert,
191
192	TP_PROTO(struct request *rq),
193
194	TP_ARGS(rq)
195);
196
197/**
198 * block_rq_issue - issue pending block IO request operation to device driver
199 * @rq: block IO operation request
 
200 *
201 * Called when block operation request @rq from queue @q is sent to a
202 * device driver for processing.
203 */
204DEFINE_EVENT(block_rq, block_rq_issue,
205
206	TP_PROTO(struct request *rq),
207
208	TP_ARGS(rq)
209);
210
211/**
212 * block_rq_merge - merge request with another one in the elevator
213 * @rq: block IO operation request
 
214 *
215 * Called when block operation request @rq from queue @q is merged to another
216 * request queued in the elevator.
 
 
 
217 */
218DEFINE_EVENT(block_rq, block_rq_merge,
219
220	TP_PROTO(struct request *rq),
221
222	TP_ARGS(rq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223);
224
225/**
226 * block_bio_complete - completed all work on the block operation
227 * @q: queue holding the block operation
228 * @bio: block operation completed
 
229 *
230 * This tracepoint indicates there is no further work to do on this
231 * block IO operation @bio.
232 */
233TRACE_EVENT(block_bio_complete,
234
235	TP_PROTO(struct request_queue *q, struct bio *bio),
236
237	TP_ARGS(q, bio),
238
239	TP_STRUCT__entry(
240		__field( dev_t,		dev		)
241		__field( sector_t,	sector		)
242		__field( unsigned,	nr_sector	)
243		__field( int,		error		)
244		__array( char,		rwbs,	RWBS_LEN)
245	),
246
247	TP_fast_assign(
248		__entry->dev		= bio_dev(bio);
249		__entry->sector		= bio->bi_iter.bi_sector;
250		__entry->nr_sector	= bio_sectors(bio);
251		__entry->error		= blk_status_to_errno(bio->bi_status);
252		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
253	),
254
255	TP_printk("%d,%d %s %llu + %u [%d]",
256		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
257		  (unsigned long long)__entry->sector,
258		  __entry->nr_sector, __entry->error)
259);
260
261DECLARE_EVENT_CLASS(block_bio,
262
263	TP_PROTO(struct bio *bio),
264
265	TP_ARGS(bio),
266
267	TP_STRUCT__entry(
268		__field( dev_t,		dev			)
269		__field( sector_t,	sector			)
270		__field( unsigned int,	nr_sector		)
271		__array( char,		rwbs,	RWBS_LEN	)
272		__array( char,		comm,	TASK_COMM_LEN	)
273	),
274
275	TP_fast_assign(
276		__entry->dev		= bio_dev(bio);
277		__entry->sector		= bio->bi_iter.bi_sector;
278		__entry->nr_sector	= bio_sectors(bio);
279		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
280		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
281	),
282
283	TP_printk("%d,%d %s %llu + %u [%s]",
284		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
285		  (unsigned long long)__entry->sector,
286		  __entry->nr_sector, __entry->comm)
287);
288
289/**
290 * block_bio_bounce - used bounce buffer when processing block operation
291 * @bio: block operation
292 *
293 * A bounce buffer was used to handle the block operation @bio in @q.
294 * This occurs when hardware limitations prevent a direct transfer of
295 * data between the @bio data memory area and the IO device.  Use of a
296 * bounce buffer requires extra copying of data and decreases
297 * performance.
298 */
299DEFINE_EVENT(block_bio, block_bio_bounce,
300	TP_PROTO(struct bio *bio),
301	TP_ARGS(bio)
302);
303
304/**
305 * block_bio_backmerge - merging block operation to the end of an existing operation
 
 
306 * @bio: new block operation to merge
307 *
308 * Merging block request @bio to the end of an existing block request.
 
309 */
310DEFINE_EVENT(block_bio, block_bio_backmerge,
311	TP_PROTO(struct bio *bio),
312	TP_ARGS(bio)
 
 
313);
314
315/**
316 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
 
 
317 * @bio: new block operation to merge
318 *
319 * Merging block IO operation @bio to the beginning of an existing block request.
 
320 */
321DEFINE_EVENT(block_bio, block_bio_frontmerge,
322	TP_PROTO(struct bio *bio),
323	TP_ARGS(bio)
 
 
324);
325
326/**
327 * block_bio_queue - putting new block IO operation in queue
 
328 * @bio: new block operation
329 *
330 * About to place the block IO operation @bio into queue @q.
331 */
332DEFINE_EVENT(block_bio, block_bio_queue,
333	TP_PROTO(struct bio *bio),
334	TP_ARGS(bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335);
336
337/**
338 * block_getrq - get a free request entry in queue for block IO operations
 
339 * @bio: pending block IO operation (can be %NULL)
 
340 *
341 * A request struct has been allocated to handle the block IO operation @bio.
 
342 */
343DEFINE_EVENT(block_bio, block_getrq,
344	TP_PROTO(struct bio *bio),
345	TP_ARGS(bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346);
347
348/**
349 * block_plug - keep operations requests in request queue
350 * @q: request queue to plug
351 *
352 * Plug the request queue @q.  Do not allow block operation requests
353 * to be sent to the device driver. Instead, accumulate requests in
354 * the queue to improve throughput performance of the block device.
355 */
356TRACE_EVENT(block_plug,
357
358	TP_PROTO(struct request_queue *q),
359
360	TP_ARGS(q),
361
362	TP_STRUCT__entry(
363		__array( char,		comm,	TASK_COMM_LEN	)
364	),
365
366	TP_fast_assign(
367		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
368	),
369
370	TP_printk("[%s]", __entry->comm)
371);
372
373DECLARE_EVENT_CLASS(block_unplug,
374
375	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
376
377	TP_ARGS(q, depth, explicit),
378
379	TP_STRUCT__entry(
380		__field( int,		nr_rq			)
381		__array( char,		comm,	TASK_COMM_LEN	)
382	),
383
384	TP_fast_assign(
385		__entry->nr_rq = depth;
386		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
387	),
388
389	TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
390);
391
392/**
393 * block_unplug - release of operations requests in request queue
394 * @q: request queue to unplug
395 * @depth: number of requests just added to the queue
396 * @explicit: whether this was an explicit unplug, or one from schedule()
397 *
398 * Unplug request queue @q because device driver is scheduled to work
399 * on elements in the request queue.
400 */
401DEFINE_EVENT(block_unplug, block_unplug,
402
403	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
404
405	TP_ARGS(q, depth, explicit)
406);
407
408/**
409 * block_split - split a single bio struct into two bio structs
 
410 * @bio: block operation being split
411 * @new_sector: The starting sector for the new bio
412 *
413 * The bio request @bio needs to be split into two bio requests.  The newly
414 * created @bio request starts at @new_sector. This split may be required due to
415 * hardware limitations such as operation crossing device boundaries in a RAID
416 * system.
417 */
418TRACE_EVENT(block_split,
419
420	TP_PROTO(struct bio *bio, unsigned int new_sector),
 
421
422	TP_ARGS(bio, new_sector),
423
424	TP_STRUCT__entry(
425		__field( dev_t,		dev				)
426		__field( sector_t,	sector				)
427		__field( sector_t,	new_sector			)
428		__array( char,		rwbs,		RWBS_LEN	)
429		__array( char,		comm,		TASK_COMM_LEN	)
430	),
431
432	TP_fast_assign(
433		__entry->dev		= bio_dev(bio);
434		__entry->sector		= bio->bi_iter.bi_sector;
435		__entry->new_sector	= new_sector;
436		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
437		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
438	),
439
440	TP_printk("%d,%d %s %llu / %llu [%s]",
441		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
442		  (unsigned long long)__entry->sector,
443		  (unsigned long long)__entry->new_sector,
444		  __entry->comm)
445);
446
447/**
448 * block_bio_remap - map request for a logical device to the raw device
 
449 * @bio: revised operation
450 * @dev: original device for the operation
451 * @from: original sector for the operation
452 *
453 * An operation for a logical device has been mapped to the
454 * raw block device.
455 */
456TRACE_EVENT(block_bio_remap,
457
458	TP_PROTO(struct bio *bio, dev_t dev, sector_t from),
 
459
460	TP_ARGS(bio, dev, from),
461
462	TP_STRUCT__entry(
463		__field( dev_t,		dev		)
464		__field( sector_t,	sector		)
465		__field( unsigned int,	nr_sector	)
466		__field( dev_t,		old_dev		)
467		__field( sector_t,	old_sector	)
468		__array( char,		rwbs,	RWBS_LEN)
469	),
470
471	TP_fast_assign(
472		__entry->dev		= bio_dev(bio);
473		__entry->sector		= bio->bi_iter.bi_sector;
474		__entry->nr_sector	= bio_sectors(bio);
475		__entry->old_dev	= dev;
476		__entry->old_sector	= from;
477		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
478	),
479
480	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
481		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
482		  (unsigned long long)__entry->sector,
483		  __entry->nr_sector,
484		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
485		  (unsigned long long)__entry->old_sector)
486);
487
488/**
489 * block_rq_remap - map request for a block operation request
 
490 * @rq: block IO operation request
491 * @dev: device for the operation
492 * @from: original sector for the operation
493 *
494 * The block operation request @rq in @q has been remapped.  The block
495 * operation request @rq holds the current information and @from hold
496 * the original sector.
497 */
498TRACE_EVENT(block_rq_remap,
499
500	TP_PROTO(struct request *rq, dev_t dev, sector_t from),
 
501
502	TP_ARGS(rq, dev, from),
503
504	TP_STRUCT__entry(
505		__field( dev_t,		dev		)
506		__field( sector_t,	sector		)
507		__field( unsigned int,	nr_sector	)
508		__field( dev_t,		old_dev		)
509		__field( sector_t,	old_sector	)
510		__field( unsigned int,	nr_bios		)
511		__array( char,		rwbs,	RWBS_LEN)
512	),
513
514	TP_fast_assign(
515		__entry->dev		= disk_devt(rq->rq_disk);
516		__entry->sector		= blk_rq_pos(rq);
517		__entry->nr_sector	= blk_rq_sectors(rq);
518		__entry->old_dev	= dev;
519		__entry->old_sector	= from;
520		__entry->nr_bios	= blk_rq_count_bios(rq);
521		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
522	),
523
524	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
525		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
526		  (unsigned long long)__entry->sector,
527		  __entry->nr_sector,
528		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
529		  (unsigned long long)__entry->old_sector, __entry->nr_bios)
530);
531
532#endif /* _TRACE_BLOCK_H */
533
534/* This part must be outside protection */
535#include <trace/define_trace.h>
536