Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM bcache
  4
  5#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_BCACHE_H
  7
  8#include <linux/tracepoint.h>
  9
 10DECLARE_EVENT_CLASS(bcache_request,
 11	TP_PROTO(struct bcache_device *d, struct bio *bio),
 12	TP_ARGS(d, bio),
 13
 14	TP_STRUCT__entry(
 15		__field(dev_t,		dev			)
 16		__field(unsigned int,	orig_major		)
 17		__field(unsigned int,	orig_minor		)
 18		__field(sector_t,	sector			)
 19		__field(dev_t,		orig_sector		)
 20		__field(unsigned int,	nr_sector		)
 21		__array(char,		rwbs,	6		)
 22	),
 23
 24	TP_fast_assign(
 25		__entry->dev		= bio_dev(bio);
 26		__entry->orig_major	= d->disk->major;
 27		__entry->orig_minor	= d->disk->first_minor;
 28		__entry->sector		= bio->bi_iter.bi_sector;
 29		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
 30		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
 31		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 32	),
 33
 34	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
 35		  MAJOR(__entry->dev), MINOR(__entry->dev),
 36		  __entry->rwbs, (unsigned long long)__entry->sector,
 37		  __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
 38		  (unsigned long long)__entry->orig_sector)
 39);
 40
 41DECLARE_EVENT_CLASS(bkey,
 42	TP_PROTO(struct bkey *k),
 43	TP_ARGS(k),
 44
 45	TP_STRUCT__entry(
 46		__field(u32,	size				)
 47		__field(u32,	inode				)
 48		__field(u64,	offset				)
 49		__field(bool,	dirty				)
 50	),
 51
 52	TP_fast_assign(
 53		__entry->inode	= KEY_INODE(k);
 54		__entry->offset	= KEY_OFFSET(k);
 55		__entry->size	= KEY_SIZE(k);
 56		__entry->dirty	= KEY_DIRTY(k);
 57	),
 58
 59	TP_printk("%u:%llu len %u dirty %u", __entry->inode,
 60		  __entry->offset, __entry->size, __entry->dirty)
 61);
 62
 63DECLARE_EVENT_CLASS(btree_node,
 64	TP_PROTO(struct btree *b),
 65	TP_ARGS(b),
 66
 67	TP_STRUCT__entry(
 68		__field(size_t,		bucket			)
 69	),
 70
 71	TP_fast_assign(
 72		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
 73	),
 74
 75	TP_printk("bucket %zu", __entry->bucket)
 76);
 77
 78/* request.c */
 79
 80DEFINE_EVENT(bcache_request, bcache_request_start,
 81	TP_PROTO(struct bcache_device *d, struct bio *bio),
 82	TP_ARGS(d, bio)
 83);
 84
 85DEFINE_EVENT(bcache_request, bcache_request_end,
 86	TP_PROTO(struct bcache_device *d, struct bio *bio),
 87	TP_ARGS(d, bio)
 88);
 89
 90DECLARE_EVENT_CLASS(bcache_bio,
 91	TP_PROTO(struct bio *bio),
 92	TP_ARGS(bio),
 93
 94	TP_STRUCT__entry(
 95		__field(dev_t,		dev			)
 96		__field(sector_t,	sector			)
 97		__field(unsigned int,	nr_sector		)
 98		__array(char,		rwbs,	6		)
 99	),
100
101	TP_fast_assign(
102		__entry->dev		= bio_dev(bio);
103		__entry->sector		= bio->bi_iter.bi_sector;
104		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
105		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
106	),
107
108	TP_printk("%d,%d  %s %llu + %u",
109		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110		  (unsigned long long)__entry->sector, __entry->nr_sector)
111);
112
113DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
114	TP_PROTO(struct bio *bio),
115	TP_ARGS(bio)
116);
117
118DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
119	TP_PROTO(struct bio *bio),
120	TP_ARGS(bio)
121);
122
123TRACE_EVENT(bcache_read,
124	TP_PROTO(struct bio *bio, bool hit, bool bypass),
125	TP_ARGS(bio, hit, bypass),
126
127	TP_STRUCT__entry(
128		__field(dev_t,		dev			)
129		__field(sector_t,	sector			)
130		__field(unsigned int,	nr_sector		)
131		__array(char,		rwbs,	6		)
132		__field(bool,		cache_hit		)
133		__field(bool,		bypass			)
134	),
135
136	TP_fast_assign(
137		__entry->dev		= bio_dev(bio);
138		__entry->sector		= bio->bi_iter.bi_sector;
139		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
140		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
141		__entry->cache_hit = hit;
142		__entry->bypass = bypass;
143	),
144
145	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
146		  MAJOR(__entry->dev), MINOR(__entry->dev),
147		  __entry->rwbs, (unsigned long long)__entry->sector,
148		  __entry->nr_sector, __entry->cache_hit, __entry->bypass)
149);
150
151TRACE_EVENT(bcache_write,
152	TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
153		bool writeback, bool bypass),
154	TP_ARGS(c, inode, bio, writeback, bypass),
155
156	TP_STRUCT__entry(
157		__array(char,		uuid,	16		)
158		__field(u64,		inode			)
159		__field(sector_t,	sector			)
160		__field(unsigned int,	nr_sector		)
161		__array(char,		rwbs,	6		)
162		__field(bool,		writeback		)
163		__field(bool,		bypass			)
164	),
165
166	TP_fast_assign(
167		memcpy(__entry->uuid, c->sb.set_uuid, 16);
168		__entry->inode		= inode;
169		__entry->sector		= bio->bi_iter.bi_sector;
170		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
171		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
172		__entry->writeback = writeback;
173		__entry->bypass = bypass;
174	),
175
176	TP_printk("%pU inode %llu  %s %llu + %u hit %u bypass %u",
177		  __entry->uuid, __entry->inode,
178		  __entry->rwbs, (unsigned long long)__entry->sector,
179		  __entry->nr_sector, __entry->writeback, __entry->bypass)
180);
181
182DEFINE_EVENT(bcache_bio, bcache_read_retry,
183	TP_PROTO(struct bio *bio),
184	TP_ARGS(bio)
185);
186
187DEFINE_EVENT(bkey, bcache_cache_insert,
188	TP_PROTO(struct bkey *k),
189	TP_ARGS(k)
190);
191
192/* Journal */
193
194DECLARE_EVENT_CLASS(cache_set,
195	TP_PROTO(struct cache_set *c),
196	TP_ARGS(c),
197
198	TP_STRUCT__entry(
199		__array(char,		uuid,	16 )
200	),
201
202	TP_fast_assign(
203		memcpy(__entry->uuid, c->sb.set_uuid, 16);
204	),
205
206	TP_printk("%pU", __entry->uuid)
207);
208
209DEFINE_EVENT(bkey, bcache_journal_replay_key,
210	TP_PROTO(struct bkey *k),
211	TP_ARGS(k)
212);
213
214DEFINE_EVENT(cache_set, bcache_journal_full,
215	TP_PROTO(struct cache_set *c),
216	TP_ARGS(c)
217);
218
219DEFINE_EVENT(cache_set, bcache_journal_entry_full,
220	TP_PROTO(struct cache_set *c),
221	TP_ARGS(c)
222);
223
224TRACE_EVENT(bcache_journal_write,
225	TP_PROTO(struct bio *bio, u32 keys),
226	TP_ARGS(bio, keys),
227
228	TP_STRUCT__entry(
229		__field(dev_t,		dev			)
230		__field(sector_t,	sector			)
231		__field(unsigned int,	nr_sector		)
232		__array(char,		rwbs,	6		)
233		__field(u32,		nr_keys			)
234	),
235
236	TP_fast_assign(
237		__entry->dev		= bio_dev(bio);
238		__entry->sector		= bio->bi_iter.bi_sector;
239		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
240		__entry->nr_keys	= keys;
241		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
242	),
243
244	TP_printk("%d,%d  %s %llu + %u keys %u",
245		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
246		  (unsigned long long)__entry->sector, __entry->nr_sector,
247		  __entry->nr_keys)
248);
249
250/* Btree */
251
252DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
253	TP_PROTO(struct cache_set *c),
254	TP_ARGS(c)
255);
256
257DEFINE_EVENT(btree_node, bcache_btree_read,
258	TP_PROTO(struct btree *b),
259	TP_ARGS(b)
260);
261
262TRACE_EVENT(bcache_btree_write,
263	TP_PROTO(struct btree *b),
264	TP_ARGS(b),
265
266	TP_STRUCT__entry(
267		__field(size_t,		bucket			)
268		__field(unsigned,	block			)
269		__field(unsigned,	keys			)
270	),
271
272	TP_fast_assign(
273		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
274		__entry->block	= b->written;
275		__entry->keys	= b->keys.set[b->keys.nsets].data->keys;
276	),
277
278	TP_printk("bucket %zu", __entry->bucket)
279);
280
281DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
282	TP_PROTO(struct btree *b),
283	TP_ARGS(b)
284);
285
286DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
287	TP_PROTO(struct cache_set *c),
288	TP_ARGS(c)
289);
290
291DEFINE_EVENT(btree_node, bcache_btree_node_free,
292	TP_PROTO(struct btree *b),
293	TP_ARGS(b)
294);
295
296TRACE_EVENT(bcache_btree_gc_coalesce,
297	TP_PROTO(unsigned nodes),
298	TP_ARGS(nodes),
299
300	TP_STRUCT__entry(
301		__field(unsigned,	nodes			)
302	),
303
304	TP_fast_assign(
305		__entry->nodes	= nodes;
306	),
307
308	TP_printk("coalesced %u nodes", __entry->nodes)
309);
310
311DEFINE_EVENT(cache_set, bcache_gc_start,
312	TP_PROTO(struct cache_set *c),
313	TP_ARGS(c)
314);
315
316DEFINE_EVENT(cache_set, bcache_gc_end,
317	TP_PROTO(struct cache_set *c),
318	TP_ARGS(c)
319);
320
321DEFINE_EVENT(bkey, bcache_gc_copy,
322	TP_PROTO(struct bkey *k),
323	TP_ARGS(k)
324);
325
326DEFINE_EVENT(bkey, bcache_gc_copy_collision,
327	TP_PROTO(struct bkey *k),
328	TP_ARGS(k)
329);
330
331TRACE_EVENT(bcache_btree_insert_key,
332	TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
333	TP_ARGS(b, k, op, status),
334
335	TP_STRUCT__entry(
336		__field(u64,	btree_node			)
337		__field(u32,	btree_level			)
338		__field(u32,	inode				)
339		__field(u64,	offset				)
340		__field(u32,	size				)
341		__field(u8,	dirty				)
342		__field(u8,	op				)
343		__field(u8,	status				)
344	),
345
346	TP_fast_assign(
347		__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
348		__entry->btree_level = b->level;
349		__entry->inode	= KEY_INODE(k);
350		__entry->offset	= KEY_OFFSET(k);
351		__entry->size	= KEY_SIZE(k);
352		__entry->dirty	= KEY_DIRTY(k);
353		__entry->op = op;
354		__entry->status = status;
355	),
356
357	TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
358		  __entry->status, __entry->op,
359		  __entry->btree_node, __entry->btree_level,
360		  __entry->inode, __entry->offset,
361		  __entry->size, __entry->dirty)
362);
363
364DECLARE_EVENT_CLASS(btree_split,
365	TP_PROTO(struct btree *b, unsigned keys),
366	TP_ARGS(b, keys),
367
368	TP_STRUCT__entry(
369		__field(size_t,		bucket			)
370		__field(unsigned,	keys			)
371	),
372
373	TP_fast_assign(
374		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
375		__entry->keys	= keys;
376	),
377
378	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
379);
380
381DEFINE_EVENT(btree_split, bcache_btree_node_split,
382	TP_PROTO(struct btree *b, unsigned keys),
383	TP_ARGS(b, keys)
384);
385
386DEFINE_EVENT(btree_split, bcache_btree_node_compact,
387	TP_PROTO(struct btree *b, unsigned keys),
388	TP_ARGS(b, keys)
389);
390
391DEFINE_EVENT(btree_node, bcache_btree_set_root,
392	TP_PROTO(struct btree *b),
393	TP_ARGS(b)
394);
395
396TRACE_EVENT(bcache_keyscan,
397	TP_PROTO(unsigned nr_found,
398		 unsigned start_inode, uint64_t start_offset,
399		 unsigned end_inode, uint64_t end_offset),
400	TP_ARGS(nr_found,
401		start_inode, start_offset,
402		end_inode, end_offset),
403
404	TP_STRUCT__entry(
405		__field(__u32,	nr_found			)
406		__field(__u32,	start_inode			)
407		__field(__u64,	start_offset			)
408		__field(__u32,	end_inode			)
409		__field(__u64,	end_offset			)
410	),
411
412	TP_fast_assign(
413		__entry->nr_found	= nr_found;
414		__entry->start_inode	= start_inode;
415		__entry->start_offset	= start_offset;
416		__entry->end_inode	= end_inode;
417		__entry->end_offset	= end_offset;
418	),
419
420	TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
421		  __entry->start_inode, __entry->start_offset,
422		  __entry->end_inode, __entry->end_offset)
423);
424
425/* Allocator */
426
427TRACE_EVENT(bcache_invalidate,
428	TP_PROTO(struct cache *ca, size_t bucket),
429	TP_ARGS(ca, bucket),
430
431	TP_STRUCT__entry(
432		__field(unsigned,	sectors			)
433		__field(dev_t,		dev			)
434		__field(__u64,		offset			)
435	),
436
437	TP_fast_assign(
438		__entry->dev		= ca->bdev->bd_dev;
439		__entry->offset		= bucket << ca->set->bucket_bits;
440		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);
441	),
442
443	TP_printk("invalidated %u sectors at %d,%d sector=%llu",
444		  __entry->sectors, MAJOR(__entry->dev),
445		  MINOR(__entry->dev), __entry->offset)
446);
447
448TRACE_EVENT(bcache_alloc,
449	TP_PROTO(struct cache *ca, size_t bucket),
450	TP_ARGS(ca, bucket),
451
452	TP_STRUCT__entry(
453		__field(dev_t,		dev			)
454		__field(__u64,		offset			)
455	),
456
457	TP_fast_assign(
458		__entry->dev		= ca->bdev->bd_dev;
459		__entry->offset		= bucket << ca->set->bucket_bits;
460	),
461
462	TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
463		  MINOR(__entry->dev), __entry->offset)
464);
465
466TRACE_EVENT(bcache_alloc_fail,
467	TP_PROTO(struct cache *ca, unsigned reserve),
468	TP_ARGS(ca, reserve),
469
470	TP_STRUCT__entry(
471		__field(dev_t,		dev			)
472		__field(unsigned,	free			)
473		__field(unsigned,	free_inc		)
474		__field(unsigned,	blocked			)
475	),
476
477	TP_fast_assign(
478		__entry->dev		= ca->bdev->bd_dev;
479		__entry->free		= fifo_used(&ca->free[reserve]);
480		__entry->free_inc	= fifo_used(&ca->free_inc);
481		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
482	),
483
484	TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
485		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
486		  __entry->free_inc, __entry->blocked)
487);
488
489/* Background writeback */
490
491DEFINE_EVENT(bkey, bcache_writeback,
492	TP_PROTO(struct bkey *k),
493	TP_ARGS(k)
494);
495
496DEFINE_EVENT(bkey, bcache_writeback_collision,
497	TP_PROTO(struct bkey *k),
498	TP_ARGS(k)
499);
500
501#endif /* _TRACE_BCACHE_H */
502
503/* This part must be outside protection */
504#include <trace/define_trace.h>
v4.10.11
 
  1#undef TRACE_SYSTEM
  2#define TRACE_SYSTEM bcache
  3
  4#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
  5#define _TRACE_BCACHE_H
  6
  7#include <linux/tracepoint.h>
  8
  9DECLARE_EVENT_CLASS(bcache_request,
 10	TP_PROTO(struct bcache_device *d, struct bio *bio),
 11	TP_ARGS(d, bio),
 12
 13	TP_STRUCT__entry(
 14		__field(dev_t,		dev			)
 15		__field(unsigned int,	orig_major		)
 16		__field(unsigned int,	orig_minor		)
 17		__field(sector_t,	sector			)
 18		__field(dev_t,		orig_sector		)
 19		__field(unsigned int,	nr_sector		)
 20		__array(char,		rwbs,	6		)
 21	),
 22
 23	TP_fast_assign(
 24		__entry->dev		= bio->bi_bdev->bd_dev;
 25		__entry->orig_major	= d->disk->major;
 26		__entry->orig_minor	= d->disk->first_minor;
 27		__entry->sector		= bio->bi_iter.bi_sector;
 28		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
 29		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
 30		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 31	),
 32
 33	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
 34		  MAJOR(__entry->dev), MINOR(__entry->dev),
 35		  __entry->rwbs, (unsigned long long)__entry->sector,
 36		  __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
 37		  (unsigned long long)__entry->orig_sector)
 38);
 39
 40DECLARE_EVENT_CLASS(bkey,
 41	TP_PROTO(struct bkey *k),
 42	TP_ARGS(k),
 43
 44	TP_STRUCT__entry(
 45		__field(u32,	size				)
 46		__field(u32,	inode				)
 47		__field(u64,	offset				)
 48		__field(bool,	dirty				)
 49	),
 50
 51	TP_fast_assign(
 52		__entry->inode	= KEY_INODE(k);
 53		__entry->offset	= KEY_OFFSET(k);
 54		__entry->size	= KEY_SIZE(k);
 55		__entry->dirty	= KEY_DIRTY(k);
 56	),
 57
 58	TP_printk("%u:%llu len %u dirty %u", __entry->inode,
 59		  __entry->offset, __entry->size, __entry->dirty)
 60);
 61
 62DECLARE_EVENT_CLASS(btree_node,
 63	TP_PROTO(struct btree *b),
 64	TP_ARGS(b),
 65
 66	TP_STRUCT__entry(
 67		__field(size_t,		bucket			)
 68	),
 69
 70	TP_fast_assign(
 71		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
 72	),
 73
 74	TP_printk("bucket %zu", __entry->bucket)
 75);
 76
 77/* request.c */
 78
 79DEFINE_EVENT(bcache_request, bcache_request_start,
 80	TP_PROTO(struct bcache_device *d, struct bio *bio),
 81	TP_ARGS(d, bio)
 82);
 83
 84DEFINE_EVENT(bcache_request, bcache_request_end,
 85	TP_PROTO(struct bcache_device *d, struct bio *bio),
 86	TP_ARGS(d, bio)
 87);
 88
 89DECLARE_EVENT_CLASS(bcache_bio,
 90	TP_PROTO(struct bio *bio),
 91	TP_ARGS(bio),
 92
 93	TP_STRUCT__entry(
 94		__field(dev_t,		dev			)
 95		__field(sector_t,	sector			)
 96		__field(unsigned int,	nr_sector		)
 97		__array(char,		rwbs,	6		)
 98	),
 99
100	TP_fast_assign(
101		__entry->dev		= bio->bi_bdev->bd_dev;
102		__entry->sector		= bio->bi_iter.bi_sector;
103		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
104		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
105	),
106
107	TP_printk("%d,%d  %s %llu + %u",
108		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
109		  (unsigned long long)__entry->sector, __entry->nr_sector)
110);
111
112DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
113	TP_PROTO(struct bio *bio),
114	TP_ARGS(bio)
115);
116
117DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
118	TP_PROTO(struct bio *bio),
119	TP_ARGS(bio)
120);
121
122TRACE_EVENT(bcache_read,
123	TP_PROTO(struct bio *bio, bool hit, bool bypass),
124	TP_ARGS(bio, hit, bypass),
125
126	TP_STRUCT__entry(
127		__field(dev_t,		dev			)
128		__field(sector_t,	sector			)
129		__field(unsigned int,	nr_sector		)
130		__array(char,		rwbs,	6		)
131		__field(bool,		cache_hit		)
132		__field(bool,		bypass			)
133	),
134
135	TP_fast_assign(
136		__entry->dev		= bio->bi_bdev->bd_dev;
137		__entry->sector		= bio->bi_iter.bi_sector;
138		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
139		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
140		__entry->cache_hit = hit;
141		__entry->bypass = bypass;
142	),
143
144	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
145		  MAJOR(__entry->dev), MINOR(__entry->dev),
146		  __entry->rwbs, (unsigned long long)__entry->sector,
147		  __entry->nr_sector, __entry->cache_hit, __entry->bypass)
148);
149
150TRACE_EVENT(bcache_write,
151	TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
152		bool writeback, bool bypass),
153	TP_ARGS(c, inode, bio, writeback, bypass),
154
155	TP_STRUCT__entry(
156		__array(char,		uuid,	16		)
157		__field(u64,		inode			)
158		__field(sector_t,	sector			)
159		__field(unsigned int,	nr_sector		)
160		__array(char,		rwbs,	6		)
161		__field(bool,		writeback		)
162		__field(bool,		bypass			)
163	),
164
165	TP_fast_assign(
166		memcpy(__entry->uuid, c->sb.set_uuid, 16);
167		__entry->inode		= inode;
168		__entry->sector		= bio->bi_iter.bi_sector;
169		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
170		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
171		__entry->writeback = writeback;
172		__entry->bypass = bypass;
173	),
174
175	TP_printk("%pU inode %llu  %s %llu + %u hit %u bypass %u",
176		  __entry->uuid, __entry->inode,
177		  __entry->rwbs, (unsigned long long)__entry->sector,
178		  __entry->nr_sector, __entry->writeback, __entry->bypass)
179);
180
181DEFINE_EVENT(bcache_bio, bcache_read_retry,
182	TP_PROTO(struct bio *bio),
183	TP_ARGS(bio)
184);
185
186DEFINE_EVENT(bkey, bcache_cache_insert,
187	TP_PROTO(struct bkey *k),
188	TP_ARGS(k)
189);
190
191/* Journal */
192
193DECLARE_EVENT_CLASS(cache_set,
194	TP_PROTO(struct cache_set *c),
195	TP_ARGS(c),
196
197	TP_STRUCT__entry(
198		__array(char,		uuid,	16 )
199	),
200
201	TP_fast_assign(
202		memcpy(__entry->uuid, c->sb.set_uuid, 16);
203	),
204
205	TP_printk("%pU", __entry->uuid)
206);
207
208DEFINE_EVENT(bkey, bcache_journal_replay_key,
209	TP_PROTO(struct bkey *k),
210	TP_ARGS(k)
211);
212
213DEFINE_EVENT(cache_set, bcache_journal_full,
214	TP_PROTO(struct cache_set *c),
215	TP_ARGS(c)
216);
217
218DEFINE_EVENT(cache_set, bcache_journal_entry_full,
219	TP_PROTO(struct cache_set *c),
220	TP_ARGS(c)
221);
222
223DEFINE_EVENT(bcache_bio, bcache_journal_write,
224	TP_PROTO(struct bio *bio),
225	TP_ARGS(bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226);
227
228/* Btree */
229
230DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
231	TP_PROTO(struct cache_set *c),
232	TP_ARGS(c)
233);
234
235DEFINE_EVENT(btree_node, bcache_btree_read,
236	TP_PROTO(struct btree *b),
237	TP_ARGS(b)
238);
239
240TRACE_EVENT(bcache_btree_write,
241	TP_PROTO(struct btree *b),
242	TP_ARGS(b),
243
244	TP_STRUCT__entry(
245		__field(size_t,		bucket			)
246		__field(unsigned,	block			)
247		__field(unsigned,	keys			)
248	),
249
250	TP_fast_assign(
251		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
252		__entry->block	= b->written;
253		__entry->keys	= b->keys.set[b->keys.nsets].data->keys;
254	),
255
256	TP_printk("bucket %zu", __entry->bucket)
257);
258
259DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
260	TP_PROTO(struct btree *b),
261	TP_ARGS(b)
262);
263
264DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
265	TP_PROTO(struct cache_set *c),
266	TP_ARGS(c)
267);
268
269DEFINE_EVENT(btree_node, bcache_btree_node_free,
270	TP_PROTO(struct btree *b),
271	TP_ARGS(b)
272);
273
274TRACE_EVENT(bcache_btree_gc_coalesce,
275	TP_PROTO(unsigned nodes),
276	TP_ARGS(nodes),
277
278	TP_STRUCT__entry(
279		__field(unsigned,	nodes			)
280	),
281
282	TP_fast_assign(
283		__entry->nodes	= nodes;
284	),
285
286	TP_printk("coalesced %u nodes", __entry->nodes)
287);
288
289DEFINE_EVENT(cache_set, bcache_gc_start,
290	TP_PROTO(struct cache_set *c),
291	TP_ARGS(c)
292);
293
294DEFINE_EVENT(cache_set, bcache_gc_end,
295	TP_PROTO(struct cache_set *c),
296	TP_ARGS(c)
297);
298
299DEFINE_EVENT(bkey, bcache_gc_copy,
300	TP_PROTO(struct bkey *k),
301	TP_ARGS(k)
302);
303
304DEFINE_EVENT(bkey, bcache_gc_copy_collision,
305	TP_PROTO(struct bkey *k),
306	TP_ARGS(k)
307);
308
309TRACE_EVENT(bcache_btree_insert_key,
310	TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
311	TP_ARGS(b, k, op, status),
312
313	TP_STRUCT__entry(
314		__field(u64,	btree_node			)
315		__field(u32,	btree_level			)
316		__field(u32,	inode				)
317		__field(u64,	offset				)
318		__field(u32,	size				)
319		__field(u8,	dirty				)
320		__field(u8,	op				)
321		__field(u8,	status				)
322	),
323
324	TP_fast_assign(
325		__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
326		__entry->btree_level = b->level;
327		__entry->inode	= KEY_INODE(k);
328		__entry->offset	= KEY_OFFSET(k);
329		__entry->size	= KEY_SIZE(k);
330		__entry->dirty	= KEY_DIRTY(k);
331		__entry->op = op;
332		__entry->status = status;
333	),
334
335	TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
336		  __entry->status, __entry->op,
337		  __entry->btree_node, __entry->btree_level,
338		  __entry->inode, __entry->offset,
339		  __entry->size, __entry->dirty)
340);
341
342DECLARE_EVENT_CLASS(btree_split,
343	TP_PROTO(struct btree *b, unsigned keys),
344	TP_ARGS(b, keys),
345
346	TP_STRUCT__entry(
347		__field(size_t,		bucket			)
348		__field(unsigned,	keys			)
349	),
350
351	TP_fast_assign(
352		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
353		__entry->keys	= keys;
354	),
355
356	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
357);
358
359DEFINE_EVENT(btree_split, bcache_btree_node_split,
360	TP_PROTO(struct btree *b, unsigned keys),
361	TP_ARGS(b, keys)
362);
363
364DEFINE_EVENT(btree_split, bcache_btree_node_compact,
365	TP_PROTO(struct btree *b, unsigned keys),
366	TP_ARGS(b, keys)
367);
368
369DEFINE_EVENT(btree_node, bcache_btree_set_root,
370	TP_PROTO(struct btree *b),
371	TP_ARGS(b)
372);
373
374TRACE_EVENT(bcache_keyscan,
375	TP_PROTO(unsigned nr_found,
376		 unsigned start_inode, uint64_t start_offset,
377		 unsigned end_inode, uint64_t end_offset),
378	TP_ARGS(nr_found,
379		start_inode, start_offset,
380		end_inode, end_offset),
381
382	TP_STRUCT__entry(
383		__field(__u32,	nr_found			)
384		__field(__u32,	start_inode			)
385		__field(__u64,	start_offset			)
386		__field(__u32,	end_inode			)
387		__field(__u64,	end_offset			)
388	),
389
390	TP_fast_assign(
391		__entry->nr_found	= nr_found;
392		__entry->start_inode	= start_inode;
393		__entry->start_offset	= start_offset;
394		__entry->end_inode	= end_inode;
395		__entry->end_offset	= end_offset;
396	),
397
398	TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
399		  __entry->start_inode, __entry->start_offset,
400		  __entry->end_inode, __entry->end_offset)
401);
402
403/* Allocator */
404
405TRACE_EVENT(bcache_invalidate,
406	TP_PROTO(struct cache *ca, size_t bucket),
407	TP_ARGS(ca, bucket),
408
409	TP_STRUCT__entry(
410		__field(unsigned,	sectors			)
411		__field(dev_t,		dev			)
412		__field(__u64,		offset			)
413	),
414
415	TP_fast_assign(
416		__entry->dev		= ca->bdev->bd_dev;
417		__entry->offset		= bucket << ca->set->bucket_bits;
418		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);
419	),
420
421	TP_printk("invalidated %u sectors at %d,%d sector=%llu",
422		  __entry->sectors, MAJOR(__entry->dev),
423		  MINOR(__entry->dev), __entry->offset)
424);
425
426TRACE_EVENT(bcache_alloc,
427	TP_PROTO(struct cache *ca, size_t bucket),
428	TP_ARGS(ca, bucket),
429
430	TP_STRUCT__entry(
431		__field(dev_t,		dev			)
432		__field(__u64,		offset			)
433	),
434
435	TP_fast_assign(
436		__entry->dev		= ca->bdev->bd_dev;
437		__entry->offset		= bucket << ca->set->bucket_bits;
438	),
439
440	TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
441		  MINOR(__entry->dev), __entry->offset)
442);
443
444TRACE_EVENT(bcache_alloc_fail,
445	TP_PROTO(struct cache *ca, unsigned reserve),
446	TP_ARGS(ca, reserve),
447
448	TP_STRUCT__entry(
449		__field(dev_t,		dev			)
450		__field(unsigned,	free			)
451		__field(unsigned,	free_inc		)
452		__field(unsigned,	blocked			)
453	),
454
455	TP_fast_assign(
456		__entry->dev		= ca->bdev->bd_dev;
457		__entry->free		= fifo_used(&ca->free[reserve]);
458		__entry->free_inc	= fifo_used(&ca->free_inc);
459		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
460	),
461
462	TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
463		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
464		  __entry->free_inc, __entry->blocked)
465);
466
467/* Background writeback */
468
469DEFINE_EVENT(bkey, bcache_writeback,
470	TP_PROTO(struct bkey *k),
471	TP_ARGS(k)
472);
473
474DEFINE_EVENT(bkey, bcache_writeback_collision,
475	TP_PROTO(struct bkey *k),
476	TP_ARGS(k)
477);
478
479#endif /* _TRACE_BCACHE_H */
480
481/* This part must be outside protection */
482#include <trace/define_trace.h>