Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM bcache
  4
  5#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_BCACHE_H
  7
  8#include <linux/tracepoint.h>
  9
 10DECLARE_EVENT_CLASS(bcache_request,
 11	TP_PROTO(struct bcache_device *d, struct bio *bio),
 12	TP_ARGS(d, bio),
 13
 14	TP_STRUCT__entry(
 15		__field(dev_t,		dev			)
 16		__field(unsigned int,	orig_major		)
 17		__field(unsigned int,	orig_minor		)
 18		__field(sector_t,	sector			)
 19		__field(dev_t,		orig_sector		)
 20		__field(unsigned int,	nr_sector		)
 21		__array(char,		rwbs,	6		)
 22	),
 23
 24	TP_fast_assign(
 25		__entry->dev		= bio_dev(bio);
 26		__entry->orig_major	= d->disk->major;
 27		__entry->orig_minor	= d->disk->first_minor;
 28		__entry->sector		= bio->bi_iter.bi_sector;
 29		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
 30		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
 31		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
 32	),
 33
 34	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
 35		  MAJOR(__entry->dev), MINOR(__entry->dev),
 36		  __entry->rwbs, (unsigned long long)__entry->sector,
 37		  __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
 38		  (unsigned long long)__entry->orig_sector)
 39);
 40
 41DECLARE_EVENT_CLASS(bkey,
 42	TP_PROTO(struct bkey *k),
 43	TP_ARGS(k),
 44
 45	TP_STRUCT__entry(
 46		__field(u32,	size				)
 47		__field(u32,	inode				)
 48		__field(u64,	offset				)
 49		__field(bool,	dirty				)
 50	),
 51
 52	TP_fast_assign(
 53		__entry->inode	= KEY_INODE(k);
 54		__entry->offset	= KEY_OFFSET(k);
 55		__entry->size	= KEY_SIZE(k);
 56		__entry->dirty	= KEY_DIRTY(k);
 57	),
 58
 59	TP_printk("%u:%llu len %u dirty %u", __entry->inode,
 60		  __entry->offset, __entry->size, __entry->dirty)
 61);
 62
 63DECLARE_EVENT_CLASS(btree_node,
 64	TP_PROTO(struct btree *b),
 65	TP_ARGS(b),
 66
 67	TP_STRUCT__entry(
 68		__field(size_t,		bucket			)
 69	),
 70
 71	TP_fast_assign(
 72		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
 73	),
 74
 75	TP_printk("bucket %zu", __entry->bucket)
 76);
 77
 78/* request.c */
 79
 80DEFINE_EVENT(bcache_request, bcache_request_start,
 81	TP_PROTO(struct bcache_device *d, struct bio *bio),
 82	TP_ARGS(d, bio)
 83);
 84
 85DEFINE_EVENT(bcache_request, bcache_request_end,
 86	TP_PROTO(struct bcache_device *d, struct bio *bio),
 87	TP_ARGS(d, bio)
 88);
 89
 90DECLARE_EVENT_CLASS(bcache_bio,
 91	TP_PROTO(struct bio *bio),
 92	TP_ARGS(bio),
 93
 94	TP_STRUCT__entry(
 95		__field(dev_t,		dev			)
 96		__field(sector_t,	sector			)
 97		__field(unsigned int,	nr_sector		)
 98		__array(char,		rwbs,	6		)
 99	),
100
101	TP_fast_assign(
102		__entry->dev		= bio_dev(bio);
103		__entry->sector		= bio->bi_iter.bi_sector;
104		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
105		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
106	),
107
108	TP_printk("%d,%d  %s %llu + %u",
109		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110		  (unsigned long long)__entry->sector, __entry->nr_sector)
111);
112
113DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
114	TP_PROTO(struct bio *bio),
115	TP_ARGS(bio)
116);
117
118DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
119	TP_PROTO(struct bio *bio),
120	TP_ARGS(bio)
121);
122
123TRACE_EVENT(bcache_read,
124	TP_PROTO(struct bio *bio, bool hit, bool bypass),
125	TP_ARGS(bio, hit, bypass),
126
127	TP_STRUCT__entry(
128		__field(dev_t,		dev			)
129		__field(sector_t,	sector			)
130		__field(unsigned int,	nr_sector		)
131		__array(char,		rwbs,	6		)
132		__field(bool,		cache_hit		)
133		__field(bool,		bypass			)
134	),
135
136	TP_fast_assign(
137		__entry->dev		= bio_dev(bio);
138		__entry->sector		= bio->bi_iter.bi_sector;
139		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
140		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
141		__entry->cache_hit = hit;
142		__entry->bypass = bypass;
143	),
144
145	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
146		  MAJOR(__entry->dev), MINOR(__entry->dev),
147		  __entry->rwbs, (unsigned long long)__entry->sector,
148		  __entry->nr_sector, __entry->cache_hit, __entry->bypass)
149);
150
151TRACE_EVENT(bcache_write,
152	TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
153		bool writeback, bool bypass),
154	TP_ARGS(c, inode, bio, writeback, bypass),
155
156	TP_STRUCT__entry(
157		__array(char,		uuid,	16		)
158		__field(u64,		inode			)
159		__field(sector_t,	sector			)
160		__field(unsigned int,	nr_sector		)
161		__array(char,		rwbs,	6		)
162		__field(bool,		writeback		)
163		__field(bool,		bypass			)
164	),
165
166	TP_fast_assign(
167		memcpy(__entry->uuid, c->set_uuid, 16);
168		__entry->inode		= inode;
169		__entry->sector		= bio->bi_iter.bi_sector;
170		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
171		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
172		__entry->writeback = writeback;
173		__entry->bypass = bypass;
174	),
175
176	TP_printk("%pU inode %llu  %s %llu + %u hit %u bypass %u",
177		  __entry->uuid, __entry->inode,
178		  __entry->rwbs, (unsigned long long)__entry->sector,
179		  __entry->nr_sector, __entry->writeback, __entry->bypass)
180);
181
182DEFINE_EVENT(bcache_bio, bcache_read_retry,
183	TP_PROTO(struct bio *bio),
184	TP_ARGS(bio)
185);
186
187DEFINE_EVENT(bkey, bcache_cache_insert,
188	TP_PROTO(struct bkey *k),
189	TP_ARGS(k)
190);
191
192/* Journal */
193
194DECLARE_EVENT_CLASS(cache_set,
195	TP_PROTO(struct cache_set *c),
196	TP_ARGS(c),
197
198	TP_STRUCT__entry(
199		__array(char,		uuid,	16 )
200	),
201
202	TP_fast_assign(
203		memcpy(__entry->uuid, c->set_uuid, 16);
204	),
205
206	TP_printk("%pU", __entry->uuid)
207);
208
209DEFINE_EVENT(bkey, bcache_journal_replay_key,
210	TP_PROTO(struct bkey *k),
211	TP_ARGS(k)
212);
213
214DEFINE_EVENT(cache_set, bcache_journal_full,
215	TP_PROTO(struct cache_set *c),
216	TP_ARGS(c)
217);
218
219DEFINE_EVENT(cache_set, bcache_journal_entry_full,
220	TP_PROTO(struct cache_set *c),
221	TP_ARGS(c)
222);
223
224TRACE_EVENT(bcache_journal_write,
225	TP_PROTO(struct bio *bio, u32 keys),
226	TP_ARGS(bio, keys),
227
228	TP_STRUCT__entry(
229		__field(dev_t,		dev			)
230		__field(sector_t,	sector			)
231		__field(unsigned int,	nr_sector		)
232		__array(char,		rwbs,	6		)
233		__field(u32,		nr_keys			)
234	),
235
236	TP_fast_assign(
237		__entry->dev		= bio_dev(bio);
238		__entry->sector		= bio->bi_iter.bi_sector;
239		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
240		__entry->nr_keys	= keys;
241		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
242	),
243
244	TP_printk("%d,%d  %s %llu + %u keys %u",
245		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
246		  (unsigned long long)__entry->sector, __entry->nr_sector,
247		  __entry->nr_keys)
248);
249
250/* Btree */
251
252DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
253	TP_PROTO(struct cache_set *c),
254	TP_ARGS(c)
255);
256
257DEFINE_EVENT(btree_node, bcache_btree_read,
258	TP_PROTO(struct btree *b),
259	TP_ARGS(b)
260);
261
262TRACE_EVENT(bcache_btree_write,
263	TP_PROTO(struct btree *b),
264	TP_ARGS(b),
265
266	TP_STRUCT__entry(
267		__field(size_t,		bucket			)
268		__field(unsigned,	block			)
269		__field(unsigned,	keys			)
270	),
271
272	TP_fast_assign(
273		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
274		__entry->block	= b->written;
275		__entry->keys	= b->keys.set[b->keys.nsets].data->keys;
276	),
277
278	TP_printk("bucket %zu written block %u + %u",
279		__entry->bucket, __entry->block, __entry->keys)
280);
281
282DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
283	TP_PROTO(struct btree *b),
284	TP_ARGS(b)
285);
286
287DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
288	TP_PROTO(struct cache_set *c),
289	TP_ARGS(c)
290);
291
292DEFINE_EVENT(btree_node, bcache_btree_node_free,
293	TP_PROTO(struct btree *b),
294	TP_ARGS(b)
295);
296
297TRACE_EVENT(bcache_btree_gc_coalesce,
298	TP_PROTO(unsigned nodes),
299	TP_ARGS(nodes),
300
301	TP_STRUCT__entry(
302		__field(unsigned,	nodes			)
303	),
304
305	TP_fast_assign(
306		__entry->nodes	= nodes;
307	),
308
309	TP_printk("coalesced %u nodes", __entry->nodes)
310);
311
312DEFINE_EVENT(cache_set, bcache_gc_start,
313	TP_PROTO(struct cache_set *c),
314	TP_ARGS(c)
315);
316
317DEFINE_EVENT(cache_set, bcache_gc_end,
318	TP_PROTO(struct cache_set *c),
319	TP_ARGS(c)
320);
321
322DEFINE_EVENT(bkey, bcache_gc_copy,
323	TP_PROTO(struct bkey *k),
324	TP_ARGS(k)
325);
326
327DEFINE_EVENT(bkey, bcache_gc_copy_collision,
328	TP_PROTO(struct bkey *k),
329	TP_ARGS(k)
330);
331
332TRACE_EVENT(bcache_btree_insert_key,
333	TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
334	TP_ARGS(b, k, op, status),
335
336	TP_STRUCT__entry(
337		__field(u64,	btree_node			)
338		__field(u32,	btree_level			)
339		__field(u32,	inode				)
340		__field(u64,	offset				)
341		__field(u32,	size				)
342		__field(u8,	dirty				)
343		__field(u8,	op				)
344		__field(u8,	status				)
345	),
346
347	TP_fast_assign(
348		__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
349		__entry->btree_level = b->level;
350		__entry->inode	= KEY_INODE(k);
351		__entry->offset	= KEY_OFFSET(k);
352		__entry->size	= KEY_SIZE(k);
353		__entry->dirty	= KEY_DIRTY(k);
354		__entry->op = op;
355		__entry->status = status;
356	),
357
358	TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
359		  __entry->status, __entry->op,
360		  __entry->btree_node, __entry->btree_level,
361		  __entry->inode, __entry->offset,
362		  __entry->size, __entry->dirty)
363);
364
365DECLARE_EVENT_CLASS(btree_split,
366	TP_PROTO(struct btree *b, unsigned keys),
367	TP_ARGS(b, keys),
368
369	TP_STRUCT__entry(
370		__field(size_t,		bucket			)
371		__field(unsigned,	keys			)
372	),
373
374	TP_fast_assign(
375		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
376		__entry->keys	= keys;
377	),
378
379	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
380);
381
382DEFINE_EVENT(btree_split, bcache_btree_node_split,
383	TP_PROTO(struct btree *b, unsigned keys),
384	TP_ARGS(b, keys)
385);
386
387DEFINE_EVENT(btree_split, bcache_btree_node_compact,
388	TP_PROTO(struct btree *b, unsigned keys),
389	TP_ARGS(b, keys)
390);
391
392DEFINE_EVENT(btree_node, bcache_btree_set_root,
393	TP_PROTO(struct btree *b),
394	TP_ARGS(b)
395);
396
397TRACE_EVENT(bcache_keyscan,
398	TP_PROTO(unsigned nr_found,
399		 unsigned start_inode, uint64_t start_offset,
400		 unsigned end_inode, uint64_t end_offset),
401	TP_ARGS(nr_found,
402		start_inode, start_offset,
403		end_inode, end_offset),
404
405	TP_STRUCT__entry(
406		__field(__u32,	nr_found			)
407		__field(__u32,	start_inode			)
408		__field(__u64,	start_offset			)
409		__field(__u32,	end_inode			)
410		__field(__u64,	end_offset			)
411	),
412
413	TP_fast_assign(
414		__entry->nr_found	= nr_found;
415		__entry->start_inode	= start_inode;
416		__entry->start_offset	= start_offset;
417		__entry->end_inode	= end_inode;
418		__entry->end_offset	= end_offset;
419	),
420
421	TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
422		  __entry->start_inode, __entry->start_offset,
423		  __entry->end_inode, __entry->end_offset)
424);
425
426/* Allocator */
427
428TRACE_EVENT(bcache_invalidate,
429	TP_PROTO(struct cache *ca, size_t bucket),
430	TP_ARGS(ca, bucket),
431
432	TP_STRUCT__entry(
433		__field(unsigned,	sectors			)
434		__field(dev_t,		dev			)
435		__field(__u64,		offset			)
436	),
437
438	TP_fast_assign(
439		__entry->dev		= ca->bdev->bd_dev;
440		__entry->offset		= bucket << ca->set->bucket_bits;
441		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);
442	),
443
444	TP_printk("invalidated %u sectors at %d,%d sector=%llu",
445		  __entry->sectors, MAJOR(__entry->dev),
446		  MINOR(__entry->dev), __entry->offset)
447);
448
449TRACE_EVENT(bcache_alloc,
450	TP_PROTO(struct cache *ca, size_t bucket),
451	TP_ARGS(ca, bucket),
452
453	TP_STRUCT__entry(
454		__field(dev_t,		dev			)
455		__field(__u64,		offset			)
456	),
457
458	TP_fast_assign(
459		__entry->dev		= ca->bdev->bd_dev;
460		__entry->offset		= bucket << ca->set->bucket_bits;
461	),
462
463	TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
464		  MINOR(__entry->dev), __entry->offset)
465);
466
467TRACE_EVENT(bcache_alloc_fail,
468	TP_PROTO(struct cache *ca, unsigned reserve),
469	TP_ARGS(ca, reserve),
470
471	TP_STRUCT__entry(
472		__field(dev_t,		dev			)
473		__field(unsigned,	free			)
474		__field(unsigned,	free_inc		)
475		__field(unsigned,	blocked			)
476	),
477
478	TP_fast_assign(
479		__entry->dev		= ca->bdev->bd_dev;
480		__entry->free		= fifo_used(&ca->free[reserve]);
481		__entry->free_inc	= fifo_used(&ca->free_inc);
482		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
483	),
484
485	TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
486		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
487		  __entry->free_inc, __entry->blocked)
488);
489
490/* Background writeback */
491
492DEFINE_EVENT(bkey, bcache_writeback,
493	TP_PROTO(struct bkey *k),
494	TP_ARGS(k)
495);
496
497DEFINE_EVENT(bkey, bcache_writeback_collision,
498	TP_PROTO(struct bkey *k),
499	TP_ARGS(k)
500);
501
502#endif /* _TRACE_BCACHE_H */
503
504/* This part must be outside protection */
505#include <trace/define_trace.h>
v3.15
 
  1#undef TRACE_SYSTEM
  2#define TRACE_SYSTEM bcache
  3
  4#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
  5#define _TRACE_BCACHE_H
  6
  7#include <linux/tracepoint.h>
  8
  9DECLARE_EVENT_CLASS(bcache_request,
 10	TP_PROTO(struct bcache_device *d, struct bio *bio),
 11	TP_ARGS(d, bio),
 12
 13	TP_STRUCT__entry(
 14		__field(dev_t,		dev			)
 15		__field(unsigned int,	orig_major		)
 16		__field(unsigned int,	orig_minor		)
 17		__field(sector_t,	sector			)
 18		__field(dev_t,		orig_sector		)
 19		__field(unsigned int,	nr_sector		)
 20		__array(char,		rwbs,	6		)
 21	),
 22
 23	TP_fast_assign(
 24		__entry->dev		= bio->bi_bdev->bd_dev;
 25		__entry->orig_major	= d->disk->major;
 26		__entry->orig_minor	= d->disk->first_minor;
 27		__entry->sector		= bio->bi_iter.bi_sector;
 28		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
 29		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
 30		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 31	),
 32
 33	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
 34		  MAJOR(__entry->dev), MINOR(__entry->dev),
 35		  __entry->rwbs, (unsigned long long)__entry->sector,
 36		  __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
 37		  (unsigned long long)__entry->orig_sector)
 38);
 39
 40DECLARE_EVENT_CLASS(bkey,
 41	TP_PROTO(struct bkey *k),
 42	TP_ARGS(k),
 43
 44	TP_STRUCT__entry(
 45		__field(u32,	size				)
 46		__field(u32,	inode				)
 47		__field(u64,	offset				)
 48		__field(bool,	dirty				)
 49	),
 50
 51	TP_fast_assign(
 52		__entry->inode	= KEY_INODE(k);
 53		__entry->offset	= KEY_OFFSET(k);
 54		__entry->size	= KEY_SIZE(k);
 55		__entry->dirty	= KEY_DIRTY(k);
 56	),
 57
 58	TP_printk("%u:%llu len %u dirty %u", __entry->inode,
 59		  __entry->offset, __entry->size, __entry->dirty)
 60);
 61
 62DECLARE_EVENT_CLASS(btree_node,
 63	TP_PROTO(struct btree *b),
 64	TP_ARGS(b),
 65
 66	TP_STRUCT__entry(
 67		__field(size_t,		bucket			)
 68	),
 69
 70	TP_fast_assign(
 71		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
 72	),
 73
 74	TP_printk("bucket %zu", __entry->bucket)
 75);
 76
 77/* request.c */
 78
 79DEFINE_EVENT(bcache_request, bcache_request_start,
 80	TP_PROTO(struct bcache_device *d, struct bio *bio),
 81	TP_ARGS(d, bio)
 82);
 83
 84DEFINE_EVENT(bcache_request, bcache_request_end,
 85	TP_PROTO(struct bcache_device *d, struct bio *bio),
 86	TP_ARGS(d, bio)
 87);
 88
 89DECLARE_EVENT_CLASS(bcache_bio,
 90	TP_PROTO(struct bio *bio),
 91	TP_ARGS(bio),
 92
 93	TP_STRUCT__entry(
 94		__field(dev_t,		dev			)
 95		__field(sector_t,	sector			)
 96		__field(unsigned int,	nr_sector		)
 97		__array(char,		rwbs,	6		)
 98	),
 99
100	TP_fast_assign(
101		__entry->dev		= bio->bi_bdev->bd_dev;
102		__entry->sector		= bio->bi_iter.bi_sector;
103		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
104		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
105	),
106
107	TP_printk("%d,%d  %s %llu + %u",
108		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
109		  (unsigned long long)__entry->sector, __entry->nr_sector)
110);
111
112DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
113	TP_PROTO(struct bio *bio),
114	TP_ARGS(bio)
115);
116
117DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
118	TP_PROTO(struct bio *bio),
119	TP_ARGS(bio)
120);
121
122TRACE_EVENT(bcache_read,
123	TP_PROTO(struct bio *bio, bool hit, bool bypass),
124	TP_ARGS(bio, hit, bypass),
125
126	TP_STRUCT__entry(
127		__field(dev_t,		dev			)
128		__field(sector_t,	sector			)
129		__field(unsigned int,	nr_sector		)
130		__array(char,		rwbs,	6		)
131		__field(bool,		cache_hit		)
132		__field(bool,		bypass			)
133	),
134
135	TP_fast_assign(
136		__entry->dev		= bio->bi_bdev->bd_dev;
137		__entry->sector		= bio->bi_iter.bi_sector;
138		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
139		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
140		__entry->cache_hit = hit;
141		__entry->bypass = bypass;
142	),
143
144	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
145		  MAJOR(__entry->dev), MINOR(__entry->dev),
146		  __entry->rwbs, (unsigned long long)__entry->sector,
147		  __entry->nr_sector, __entry->cache_hit, __entry->bypass)
148);
149
150TRACE_EVENT(bcache_write,
151	TP_PROTO(struct bio *bio, bool writeback, bool bypass),
152	TP_ARGS(bio, writeback, bypass),
 
153
154	TP_STRUCT__entry(
155		__field(dev_t,		dev			)
 
156		__field(sector_t,	sector			)
157		__field(unsigned int,	nr_sector		)
158		__array(char,		rwbs,	6		)
159		__field(bool,		writeback		)
160		__field(bool,		bypass			)
161	),
162
163	TP_fast_assign(
164		__entry->dev		= bio->bi_bdev->bd_dev;
 
165		__entry->sector		= bio->bi_iter.bi_sector;
166		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
167		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
168		__entry->writeback = writeback;
169		__entry->bypass = bypass;
170	),
171
172	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
173		  MAJOR(__entry->dev), MINOR(__entry->dev),
174		  __entry->rwbs, (unsigned long long)__entry->sector,
175		  __entry->nr_sector, __entry->writeback, __entry->bypass)
176);
177
178DEFINE_EVENT(bcache_bio, bcache_read_retry,
179	TP_PROTO(struct bio *bio),
180	TP_ARGS(bio)
181);
182
183DEFINE_EVENT(bkey, bcache_cache_insert,
184	TP_PROTO(struct bkey *k),
185	TP_ARGS(k)
186);
187
188/* Journal */
189
190DECLARE_EVENT_CLASS(cache_set,
191	TP_PROTO(struct cache_set *c),
192	TP_ARGS(c),
193
194	TP_STRUCT__entry(
195		__array(char,		uuid,	16 )
196	),
197
198	TP_fast_assign(
199		memcpy(__entry->uuid, c->sb.set_uuid, 16);
200	),
201
202	TP_printk("%pU", __entry->uuid)
203);
204
205DEFINE_EVENT(bkey, bcache_journal_replay_key,
206	TP_PROTO(struct bkey *k),
207	TP_ARGS(k)
208);
209
210DEFINE_EVENT(cache_set, bcache_journal_full,
211	TP_PROTO(struct cache_set *c),
212	TP_ARGS(c)
213);
214
215DEFINE_EVENT(cache_set, bcache_journal_entry_full,
216	TP_PROTO(struct cache_set *c),
217	TP_ARGS(c)
218);
219
220DEFINE_EVENT(bcache_bio, bcache_journal_write,
221	TP_PROTO(struct bio *bio),
222	TP_ARGS(bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223);
224
225/* Btree */
226
227DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
228	TP_PROTO(struct cache_set *c),
229	TP_ARGS(c)
230);
231
232DEFINE_EVENT(btree_node, bcache_btree_read,
233	TP_PROTO(struct btree *b),
234	TP_ARGS(b)
235);
236
237TRACE_EVENT(bcache_btree_write,
238	TP_PROTO(struct btree *b),
239	TP_ARGS(b),
240
241	TP_STRUCT__entry(
242		__field(size_t,		bucket			)
243		__field(unsigned,	block			)
244		__field(unsigned,	keys			)
245	),
246
247	TP_fast_assign(
248		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
249		__entry->block	= b->written;
250		__entry->keys	= b->keys.set[b->keys.nsets].data->keys;
251	),
252
253	TP_printk("bucket %zu", __entry->bucket)
 
254);
255
256DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
257	TP_PROTO(struct btree *b),
258	TP_ARGS(b)
259);
260
261DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail,
262	TP_PROTO(struct btree *b),
263	TP_ARGS(b)
264);
265
266DEFINE_EVENT(btree_node, bcache_btree_node_free,
267	TP_PROTO(struct btree *b),
268	TP_ARGS(b)
269);
270
271TRACE_EVENT(bcache_btree_gc_coalesce,
272	TP_PROTO(unsigned nodes),
273	TP_ARGS(nodes),
274
275	TP_STRUCT__entry(
276		__field(unsigned,	nodes			)
277	),
278
279	TP_fast_assign(
280		__entry->nodes	= nodes;
281	),
282
283	TP_printk("coalesced %u nodes", __entry->nodes)
284);
285
286DEFINE_EVENT(cache_set, bcache_gc_start,
287	TP_PROTO(struct cache_set *c),
288	TP_ARGS(c)
289);
290
291DEFINE_EVENT(cache_set, bcache_gc_end,
292	TP_PROTO(struct cache_set *c),
293	TP_ARGS(c)
294);
295
296DEFINE_EVENT(bkey, bcache_gc_copy,
297	TP_PROTO(struct bkey *k),
298	TP_ARGS(k)
299);
300
301DEFINE_EVENT(bkey, bcache_gc_copy_collision,
302	TP_PROTO(struct bkey *k),
303	TP_ARGS(k)
304);
305
306TRACE_EVENT(bcache_btree_insert_key,
307	TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
308	TP_ARGS(b, k, op, status),
309
310	TP_STRUCT__entry(
311		__field(u64,	btree_node			)
312		__field(u32,	btree_level			)
313		__field(u32,	inode				)
314		__field(u64,	offset				)
315		__field(u32,	size				)
316		__field(u8,	dirty				)
317		__field(u8,	op				)
318		__field(u8,	status				)
319	),
320
321	TP_fast_assign(
322		__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
323		__entry->btree_level = b->level;
324		__entry->inode	= KEY_INODE(k);
325		__entry->offset	= KEY_OFFSET(k);
326		__entry->size	= KEY_SIZE(k);
327		__entry->dirty	= KEY_DIRTY(k);
328		__entry->op = op;
329		__entry->status = status;
330	),
331
332	TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
333		  __entry->status, __entry->op,
334		  __entry->btree_node, __entry->btree_level,
335		  __entry->inode, __entry->offset,
336		  __entry->size, __entry->dirty)
337);
338
339DECLARE_EVENT_CLASS(btree_split,
340	TP_PROTO(struct btree *b, unsigned keys),
341	TP_ARGS(b, keys),
342
343	TP_STRUCT__entry(
344		__field(size_t,		bucket			)
345		__field(unsigned,	keys			)
346	),
347
348	TP_fast_assign(
349		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
350		__entry->keys	= keys;
351	),
352
353	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
354);
355
356DEFINE_EVENT(btree_split, bcache_btree_node_split,
357	TP_PROTO(struct btree *b, unsigned keys),
358	TP_ARGS(b, keys)
359);
360
361DEFINE_EVENT(btree_split, bcache_btree_node_compact,
362	TP_PROTO(struct btree *b, unsigned keys),
363	TP_ARGS(b, keys)
364);
365
366DEFINE_EVENT(btree_node, bcache_btree_set_root,
367	TP_PROTO(struct btree *b),
368	TP_ARGS(b)
369);
370
371TRACE_EVENT(bcache_keyscan,
372	TP_PROTO(unsigned nr_found,
373		 unsigned start_inode, uint64_t start_offset,
374		 unsigned end_inode, uint64_t end_offset),
375	TP_ARGS(nr_found,
376		start_inode, start_offset,
377		end_inode, end_offset),
378
379	TP_STRUCT__entry(
380		__field(__u32,	nr_found			)
381		__field(__u32,	start_inode			)
382		__field(__u64,	start_offset			)
383		__field(__u32,	end_inode			)
384		__field(__u64,	end_offset			)
385	),
386
387	TP_fast_assign(
388		__entry->nr_found	= nr_found;
389		__entry->start_inode	= start_inode;
390		__entry->start_offset	= start_offset;
391		__entry->end_inode	= end_inode;
392		__entry->end_offset	= end_offset;
393	),
394
395	TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
396		  __entry->start_inode, __entry->start_offset,
397		  __entry->end_inode, __entry->end_offset)
398);
399
400/* Allocator */
401
402TRACE_EVENT(bcache_invalidate,
403	TP_PROTO(struct cache *ca, size_t bucket),
404	TP_ARGS(ca, bucket),
405
406	TP_STRUCT__entry(
407		__field(unsigned,	sectors			)
408		__field(dev_t,		dev			)
409		__field(__u64,		offset			)
410	),
411
412	TP_fast_assign(
413		__entry->dev		= ca->bdev->bd_dev;
414		__entry->offset		= bucket << ca->set->bucket_bits;
415		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);
416	),
417
418	TP_printk("invalidated %u sectors at %d,%d sector=%llu",
419		  __entry->sectors, MAJOR(__entry->dev),
420		  MINOR(__entry->dev), __entry->offset)
421);
422
423TRACE_EVENT(bcache_alloc,
424	TP_PROTO(struct cache *ca, size_t bucket),
425	TP_ARGS(ca, bucket),
426
427	TP_STRUCT__entry(
428		__field(dev_t,		dev			)
429		__field(__u64,		offset			)
430	),
431
432	TP_fast_assign(
433		__entry->dev		= ca->bdev->bd_dev;
434		__entry->offset		= bucket << ca->set->bucket_bits;
435	),
436
437	TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
438		  MINOR(__entry->dev), __entry->offset)
439);
440
441TRACE_EVENT(bcache_alloc_fail,
442	TP_PROTO(struct cache *ca, unsigned reserve),
443	TP_ARGS(ca, reserve),
444
445	TP_STRUCT__entry(
446		__field(dev_t,		dev			)
447		__field(unsigned,	free			)
448		__field(unsigned,	free_inc		)
449		__field(unsigned,	blocked			)
450	),
451
452	TP_fast_assign(
453		__entry->dev		= ca->bdev->bd_dev;
454		__entry->free		= fifo_used(&ca->free[reserve]);
455		__entry->free_inc	= fifo_used(&ca->free_inc);
456		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
457	),
458
459	TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
460		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
461		  __entry->free_inc, __entry->blocked)
462);
463
464/* Background writeback */
465
466DEFINE_EVENT(bkey, bcache_writeback,
467	TP_PROTO(struct bkey *k),
468	TP_ARGS(k)
469);
470
471DEFINE_EVENT(bkey, bcache_writeback_collision,
472	TP_PROTO(struct bkey *k),
473	TP_ARGS(k)
474);
475
476#endif /* _TRACE_BCACHE_H */
477
478/* This part must be outside protection */
479#include <trace/define_trace.h>