Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM bcache
4
5#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_BCACHE_H
7
8#include <linux/tracepoint.h>
9
10DECLARE_EVENT_CLASS(bcache_request,
11 TP_PROTO(struct bcache_device *d, struct bio *bio),
12 TP_ARGS(d, bio),
13
14 TP_STRUCT__entry(
15 __field(dev_t, dev )
16 __field(unsigned int, orig_major )
17 __field(unsigned int, orig_minor )
18 __field(sector_t, sector )
19 __field(dev_t, orig_sector )
20 __field(unsigned int, nr_sector )
21 __array(char, rwbs, 6 )
22 ),
23
24 TP_fast_assign(
25 __entry->dev = bio_dev(bio);
26 __entry->orig_major = d->disk->major;
27 __entry->orig_minor = d->disk->first_minor;
28 __entry->sector = bio->bi_iter.bi_sector;
29 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
30 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
32 ),
33
34 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
35 MAJOR(__entry->dev), MINOR(__entry->dev),
36 __entry->rwbs, (unsigned long long)__entry->sector,
37 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
38 (unsigned long long)__entry->orig_sector)
39);
40
41DECLARE_EVENT_CLASS(bkey,
42 TP_PROTO(struct bkey *k),
43 TP_ARGS(k),
44
45 TP_STRUCT__entry(
46 __field(u32, size )
47 __field(u32, inode )
48 __field(u64, offset )
49 __field(bool, dirty )
50 ),
51
52 TP_fast_assign(
53 __entry->inode = KEY_INODE(k);
54 __entry->offset = KEY_OFFSET(k);
55 __entry->size = KEY_SIZE(k);
56 __entry->dirty = KEY_DIRTY(k);
57 ),
58
59 TP_printk("%u:%llu len %u dirty %u", __entry->inode,
60 __entry->offset, __entry->size, __entry->dirty)
61);
62
63DECLARE_EVENT_CLASS(btree_node,
64 TP_PROTO(struct btree *b),
65 TP_ARGS(b),
66
67 TP_STRUCT__entry(
68 __field(size_t, bucket )
69 ),
70
71 TP_fast_assign(
72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
73 ),
74
75 TP_printk("bucket %zu", __entry->bucket)
76);
77
78/* request.c */
79
80DEFINE_EVENT(bcache_request, bcache_request_start,
81 TP_PROTO(struct bcache_device *d, struct bio *bio),
82 TP_ARGS(d, bio)
83);
84
85DEFINE_EVENT(bcache_request, bcache_request_end,
86 TP_PROTO(struct bcache_device *d, struct bio *bio),
87 TP_ARGS(d, bio)
88);
89
90DECLARE_EVENT_CLASS(bcache_bio,
91 TP_PROTO(struct bio *bio),
92 TP_ARGS(bio),
93
94 TP_STRUCT__entry(
95 __field(dev_t, dev )
96 __field(sector_t, sector )
97 __field(unsigned int, nr_sector )
98 __array(char, rwbs, 6 )
99 ),
100
101 TP_fast_assign(
102 __entry->dev = bio_dev(bio);
103 __entry->sector = bio->bi_iter.bi_sector;
104 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
106 ),
107
108 TP_printk("%d,%d %s %llu + %u",
109 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110 (unsigned long long)__entry->sector, __entry->nr_sector)
111);
112
113DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
114 TP_PROTO(struct bio *bio),
115 TP_ARGS(bio)
116);
117
118DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
119 TP_PROTO(struct bio *bio),
120 TP_ARGS(bio)
121);
122
123TRACE_EVENT(bcache_read,
124 TP_PROTO(struct bio *bio, bool hit, bool bypass),
125 TP_ARGS(bio, hit, bypass),
126
127 TP_STRUCT__entry(
128 __field(dev_t, dev )
129 __field(sector_t, sector )
130 __field(unsigned int, nr_sector )
131 __array(char, rwbs, 6 )
132 __field(bool, cache_hit )
133 __field(bool, bypass )
134 ),
135
136 TP_fast_assign(
137 __entry->dev = bio_dev(bio);
138 __entry->sector = bio->bi_iter.bi_sector;
139 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
141 __entry->cache_hit = hit;
142 __entry->bypass = bypass;
143 ),
144
145 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
146 MAJOR(__entry->dev), MINOR(__entry->dev),
147 __entry->rwbs, (unsigned long long)__entry->sector,
148 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
149);
150
151TRACE_EVENT(bcache_write,
152 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
153 bool writeback, bool bypass),
154 TP_ARGS(c, inode, bio, writeback, bypass),
155
156 TP_STRUCT__entry(
157 __array(char, uuid, 16 )
158 __field(u64, inode )
159 __field(sector_t, sector )
160 __field(unsigned int, nr_sector )
161 __array(char, rwbs, 6 )
162 __field(bool, writeback )
163 __field(bool, bypass )
164 ),
165
166 TP_fast_assign(
167 memcpy(__entry->uuid, c->sb.set_uuid, 16);
168 __entry->inode = inode;
169 __entry->sector = bio->bi_iter.bi_sector;
170 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
172 __entry->writeback = writeback;
173 __entry->bypass = bypass;
174 ),
175
176 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
177 __entry->uuid, __entry->inode,
178 __entry->rwbs, (unsigned long long)__entry->sector,
179 __entry->nr_sector, __entry->writeback, __entry->bypass)
180);
181
182DEFINE_EVENT(bcache_bio, bcache_read_retry,
183 TP_PROTO(struct bio *bio),
184 TP_ARGS(bio)
185);
186
187DEFINE_EVENT(bkey, bcache_cache_insert,
188 TP_PROTO(struct bkey *k),
189 TP_ARGS(k)
190);
191
192/* Journal */
193
194DECLARE_EVENT_CLASS(cache_set,
195 TP_PROTO(struct cache_set *c),
196 TP_ARGS(c),
197
198 TP_STRUCT__entry(
199 __array(char, uuid, 16 )
200 ),
201
202 TP_fast_assign(
203 memcpy(__entry->uuid, c->sb.set_uuid, 16);
204 ),
205
206 TP_printk("%pU", __entry->uuid)
207);
208
209DEFINE_EVENT(bkey, bcache_journal_replay_key,
210 TP_PROTO(struct bkey *k),
211 TP_ARGS(k)
212);
213
214DEFINE_EVENT(cache_set, bcache_journal_full,
215 TP_PROTO(struct cache_set *c),
216 TP_ARGS(c)
217);
218
219DEFINE_EVENT(cache_set, bcache_journal_entry_full,
220 TP_PROTO(struct cache_set *c),
221 TP_ARGS(c)
222);
223
224TRACE_EVENT(bcache_journal_write,
225 TP_PROTO(struct bio *bio, u32 keys),
226 TP_ARGS(bio, keys),
227
228 TP_STRUCT__entry(
229 __field(dev_t, dev )
230 __field(sector_t, sector )
231 __field(unsigned int, nr_sector )
232 __array(char, rwbs, 6 )
233 __field(u32, nr_keys )
234 ),
235
236 TP_fast_assign(
237 __entry->dev = bio_dev(bio);
238 __entry->sector = bio->bi_iter.bi_sector;
239 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
240 __entry->nr_keys = keys;
241 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
242 ),
243
244 TP_printk("%d,%d %s %llu + %u keys %u",
245 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
246 (unsigned long long)__entry->sector, __entry->nr_sector,
247 __entry->nr_keys)
248);
249
250/* Btree */
251
252DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
253 TP_PROTO(struct cache_set *c),
254 TP_ARGS(c)
255);
256
257DEFINE_EVENT(btree_node, bcache_btree_read,
258 TP_PROTO(struct btree *b),
259 TP_ARGS(b)
260);
261
262TRACE_EVENT(bcache_btree_write,
263 TP_PROTO(struct btree *b),
264 TP_ARGS(b),
265
266 TP_STRUCT__entry(
267 __field(size_t, bucket )
268 __field(unsigned, block )
269 __field(unsigned, keys )
270 ),
271
272 TP_fast_assign(
273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
274 __entry->block = b->written;
275 __entry->keys = b->keys.set[b->keys.nsets].data->keys;
276 ),
277
278 TP_printk("bucket %zu", __entry->bucket)
279);
280
281DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
282 TP_PROTO(struct btree *b),
283 TP_ARGS(b)
284);
285
286DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
287 TP_PROTO(struct cache_set *c),
288 TP_ARGS(c)
289);
290
291DEFINE_EVENT(btree_node, bcache_btree_node_free,
292 TP_PROTO(struct btree *b),
293 TP_ARGS(b)
294);
295
296TRACE_EVENT(bcache_btree_gc_coalesce,
297 TP_PROTO(unsigned nodes),
298 TP_ARGS(nodes),
299
300 TP_STRUCT__entry(
301 __field(unsigned, nodes )
302 ),
303
304 TP_fast_assign(
305 __entry->nodes = nodes;
306 ),
307
308 TP_printk("coalesced %u nodes", __entry->nodes)
309);
310
311DEFINE_EVENT(cache_set, bcache_gc_start,
312 TP_PROTO(struct cache_set *c),
313 TP_ARGS(c)
314);
315
316DEFINE_EVENT(cache_set, bcache_gc_end,
317 TP_PROTO(struct cache_set *c),
318 TP_ARGS(c)
319);
320
321DEFINE_EVENT(bkey, bcache_gc_copy,
322 TP_PROTO(struct bkey *k),
323 TP_ARGS(k)
324);
325
326DEFINE_EVENT(bkey, bcache_gc_copy_collision,
327 TP_PROTO(struct bkey *k),
328 TP_ARGS(k)
329);
330
331TRACE_EVENT(bcache_btree_insert_key,
332 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
333 TP_ARGS(b, k, op, status),
334
335 TP_STRUCT__entry(
336 __field(u64, btree_node )
337 __field(u32, btree_level )
338 __field(u32, inode )
339 __field(u64, offset )
340 __field(u32, size )
341 __field(u8, dirty )
342 __field(u8, op )
343 __field(u8, status )
344 ),
345
346 TP_fast_assign(
347 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
348 __entry->btree_level = b->level;
349 __entry->inode = KEY_INODE(k);
350 __entry->offset = KEY_OFFSET(k);
351 __entry->size = KEY_SIZE(k);
352 __entry->dirty = KEY_DIRTY(k);
353 __entry->op = op;
354 __entry->status = status;
355 ),
356
357 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
358 __entry->status, __entry->op,
359 __entry->btree_node, __entry->btree_level,
360 __entry->inode, __entry->offset,
361 __entry->size, __entry->dirty)
362);
363
364DECLARE_EVENT_CLASS(btree_split,
365 TP_PROTO(struct btree *b, unsigned keys),
366 TP_ARGS(b, keys),
367
368 TP_STRUCT__entry(
369 __field(size_t, bucket )
370 __field(unsigned, keys )
371 ),
372
373 TP_fast_assign(
374 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
375 __entry->keys = keys;
376 ),
377
378 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
379);
380
381DEFINE_EVENT(btree_split, bcache_btree_node_split,
382 TP_PROTO(struct btree *b, unsigned keys),
383 TP_ARGS(b, keys)
384);
385
386DEFINE_EVENT(btree_split, bcache_btree_node_compact,
387 TP_PROTO(struct btree *b, unsigned keys),
388 TP_ARGS(b, keys)
389);
390
391DEFINE_EVENT(btree_node, bcache_btree_set_root,
392 TP_PROTO(struct btree *b),
393 TP_ARGS(b)
394);
395
396TRACE_EVENT(bcache_keyscan,
397 TP_PROTO(unsigned nr_found,
398 unsigned start_inode, uint64_t start_offset,
399 unsigned end_inode, uint64_t end_offset),
400 TP_ARGS(nr_found,
401 start_inode, start_offset,
402 end_inode, end_offset),
403
404 TP_STRUCT__entry(
405 __field(__u32, nr_found )
406 __field(__u32, start_inode )
407 __field(__u64, start_offset )
408 __field(__u32, end_inode )
409 __field(__u64, end_offset )
410 ),
411
412 TP_fast_assign(
413 __entry->nr_found = nr_found;
414 __entry->start_inode = start_inode;
415 __entry->start_offset = start_offset;
416 __entry->end_inode = end_inode;
417 __entry->end_offset = end_offset;
418 ),
419
420 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
421 __entry->start_inode, __entry->start_offset,
422 __entry->end_inode, __entry->end_offset)
423);
424
425/* Allocator */
426
427TRACE_EVENT(bcache_invalidate,
428 TP_PROTO(struct cache *ca, size_t bucket),
429 TP_ARGS(ca, bucket),
430
431 TP_STRUCT__entry(
432 __field(unsigned, sectors )
433 __field(dev_t, dev )
434 __field(__u64, offset )
435 ),
436
437 TP_fast_assign(
438 __entry->dev = ca->bdev->bd_dev;
439 __entry->offset = bucket << ca->set->bucket_bits;
440 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
441 ),
442
443 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
444 __entry->sectors, MAJOR(__entry->dev),
445 MINOR(__entry->dev), __entry->offset)
446);
447
448TRACE_EVENT(bcache_alloc,
449 TP_PROTO(struct cache *ca, size_t bucket),
450 TP_ARGS(ca, bucket),
451
452 TP_STRUCT__entry(
453 __field(dev_t, dev )
454 __field(__u64, offset )
455 ),
456
457 TP_fast_assign(
458 __entry->dev = ca->bdev->bd_dev;
459 __entry->offset = bucket << ca->set->bucket_bits;
460 ),
461
462 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
463 MINOR(__entry->dev), __entry->offset)
464);
465
466TRACE_EVENT(bcache_alloc_fail,
467 TP_PROTO(struct cache *ca, unsigned reserve),
468 TP_ARGS(ca, reserve),
469
470 TP_STRUCT__entry(
471 __field(dev_t, dev )
472 __field(unsigned, free )
473 __field(unsigned, free_inc )
474 __field(unsigned, blocked )
475 ),
476
477 TP_fast_assign(
478 __entry->dev = ca->bdev->bd_dev;
479 __entry->free = fifo_used(&ca->free[reserve]);
480 __entry->free_inc = fifo_used(&ca->free_inc);
481 __entry->blocked = atomic_read(&ca->set->prio_blocked);
482 ),
483
484 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
485 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
486 __entry->free_inc, __entry->blocked)
487);
488
489/* Background writeback */
490
491DEFINE_EVENT(bkey, bcache_writeback,
492 TP_PROTO(struct bkey *k),
493 TP_ARGS(k)
494);
495
496DEFINE_EVENT(bkey, bcache_writeback_collision,
497 TP_PROTO(struct bkey *k),
498 TP_ARGS(k)
499);
500
501#endif /* _TRACE_BCACHE_H */
502
503/* This part must be outside protection */
504#include <trace/define_trace.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM bcache
4
5#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_BCACHE_H
7
8#include <linux/tracepoint.h>
9
10DECLARE_EVENT_CLASS(bcache_request,
11 TP_PROTO(struct bcache_device *d, struct bio *bio),
12 TP_ARGS(d, bio),
13
14 TP_STRUCT__entry(
15 __field(dev_t, dev )
16 __field(unsigned int, orig_major )
17 __field(unsigned int, orig_minor )
18 __field(sector_t, sector )
19 __field(dev_t, orig_sector )
20 __field(unsigned int, nr_sector )
21 __array(char, rwbs, 6 )
22 ),
23
24 TP_fast_assign(
25 __entry->dev = bio_dev(bio);
26 __entry->orig_major = d->disk->major;
27 __entry->orig_minor = d->disk->first_minor;
28 __entry->sector = bio->bi_iter.bi_sector;
29 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
30 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
32 ),
33
34 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
35 MAJOR(__entry->dev), MINOR(__entry->dev),
36 __entry->rwbs, (unsigned long long)__entry->sector,
37 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
38 (unsigned long long)__entry->orig_sector)
39);
40
41DECLARE_EVENT_CLASS(bkey,
42 TP_PROTO(struct bkey *k),
43 TP_ARGS(k),
44
45 TP_STRUCT__entry(
46 __field(u32, size )
47 __field(u32, inode )
48 __field(u64, offset )
49 __field(bool, dirty )
50 ),
51
52 TP_fast_assign(
53 __entry->inode = KEY_INODE(k);
54 __entry->offset = KEY_OFFSET(k);
55 __entry->size = KEY_SIZE(k);
56 __entry->dirty = KEY_DIRTY(k);
57 ),
58
59 TP_printk("%u:%llu len %u dirty %u", __entry->inode,
60 __entry->offset, __entry->size, __entry->dirty)
61);
62
63DECLARE_EVENT_CLASS(btree_node,
64 TP_PROTO(struct btree *b),
65 TP_ARGS(b),
66
67 TP_STRUCT__entry(
68 __field(size_t, bucket )
69 ),
70
71 TP_fast_assign(
72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
73 ),
74
75 TP_printk("bucket %zu", __entry->bucket)
76);
77
78/* request.c */
79
80DEFINE_EVENT(bcache_request, bcache_request_start,
81 TP_PROTO(struct bcache_device *d, struct bio *bio),
82 TP_ARGS(d, bio)
83);
84
85DEFINE_EVENT(bcache_request, bcache_request_end,
86 TP_PROTO(struct bcache_device *d, struct bio *bio),
87 TP_ARGS(d, bio)
88);
89
90DECLARE_EVENT_CLASS(bcache_bio,
91 TP_PROTO(struct bio *bio),
92 TP_ARGS(bio),
93
94 TP_STRUCT__entry(
95 __field(dev_t, dev )
96 __field(sector_t, sector )
97 __field(unsigned int, nr_sector )
98 __array(char, rwbs, 6 )
99 ),
100
101 TP_fast_assign(
102 __entry->dev = bio_dev(bio);
103 __entry->sector = bio->bi_iter.bi_sector;
104 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
106 ),
107
108 TP_printk("%d,%d %s %llu + %u",
109 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110 (unsigned long long)__entry->sector, __entry->nr_sector)
111);
112
113DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
114 TP_PROTO(struct bio *bio),
115 TP_ARGS(bio)
116);
117
118DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
119 TP_PROTO(struct bio *bio),
120 TP_ARGS(bio)
121);
122
123TRACE_EVENT(bcache_read,
124 TP_PROTO(struct bio *bio, bool hit, bool bypass),
125 TP_ARGS(bio, hit, bypass),
126
127 TP_STRUCT__entry(
128 __field(dev_t, dev )
129 __field(sector_t, sector )
130 __field(unsigned int, nr_sector )
131 __array(char, rwbs, 6 )
132 __field(bool, cache_hit )
133 __field(bool, bypass )
134 ),
135
136 TP_fast_assign(
137 __entry->dev = bio_dev(bio);
138 __entry->sector = bio->bi_iter.bi_sector;
139 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
141 __entry->cache_hit = hit;
142 __entry->bypass = bypass;
143 ),
144
145 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
146 MAJOR(__entry->dev), MINOR(__entry->dev),
147 __entry->rwbs, (unsigned long long)__entry->sector,
148 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
149);
150
151TRACE_EVENT(bcache_write,
152 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
153 bool writeback, bool bypass),
154 TP_ARGS(c, inode, bio, writeback, bypass),
155
156 TP_STRUCT__entry(
157 __array(char, uuid, 16 )
158 __field(u64, inode )
159 __field(sector_t, sector )
160 __field(unsigned int, nr_sector )
161 __array(char, rwbs, 6 )
162 __field(bool, writeback )
163 __field(bool, bypass )
164 ),
165
166 TP_fast_assign(
167 memcpy(__entry->uuid, c->sb.set_uuid, 16);
168 __entry->inode = inode;
169 __entry->sector = bio->bi_iter.bi_sector;
170 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
172 __entry->writeback = writeback;
173 __entry->bypass = bypass;
174 ),
175
176 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
177 __entry->uuid, __entry->inode,
178 __entry->rwbs, (unsigned long long)__entry->sector,
179 __entry->nr_sector, __entry->writeback, __entry->bypass)
180);
181
182DEFINE_EVENT(bcache_bio, bcache_read_retry,
183 TP_PROTO(struct bio *bio),
184 TP_ARGS(bio)
185);
186
187DEFINE_EVENT(bkey, bcache_cache_insert,
188 TP_PROTO(struct bkey *k),
189 TP_ARGS(k)
190);
191
192/* Journal */
193
194DECLARE_EVENT_CLASS(cache_set,
195 TP_PROTO(struct cache_set *c),
196 TP_ARGS(c),
197
198 TP_STRUCT__entry(
199 __array(char, uuid, 16 )
200 ),
201
202 TP_fast_assign(
203 memcpy(__entry->uuid, c->sb.set_uuid, 16);
204 ),
205
206 TP_printk("%pU", __entry->uuid)
207);
208
209DEFINE_EVENT(bkey, bcache_journal_replay_key,
210 TP_PROTO(struct bkey *k),
211 TP_ARGS(k)
212);
213
214DEFINE_EVENT(cache_set, bcache_journal_full,
215 TP_PROTO(struct cache_set *c),
216 TP_ARGS(c)
217);
218
219DEFINE_EVENT(cache_set, bcache_journal_entry_full,
220 TP_PROTO(struct cache_set *c),
221 TP_ARGS(c)
222);
223
224DEFINE_EVENT(bcache_bio, bcache_journal_write,
225 TP_PROTO(struct bio *bio),
226 TP_ARGS(bio)
227);
228
229/* Btree */
230
231DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
232 TP_PROTO(struct cache_set *c),
233 TP_ARGS(c)
234);
235
236DEFINE_EVENT(btree_node, bcache_btree_read,
237 TP_PROTO(struct btree *b),
238 TP_ARGS(b)
239);
240
241TRACE_EVENT(bcache_btree_write,
242 TP_PROTO(struct btree *b),
243 TP_ARGS(b),
244
245 TP_STRUCT__entry(
246 __field(size_t, bucket )
247 __field(unsigned, block )
248 __field(unsigned, keys )
249 ),
250
251 TP_fast_assign(
252 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
253 __entry->block = b->written;
254 __entry->keys = b->keys.set[b->keys.nsets].data->keys;
255 ),
256
257 TP_printk("bucket %zu", __entry->bucket)
258);
259
260DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
261 TP_PROTO(struct btree *b),
262 TP_ARGS(b)
263);
264
265DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
266 TP_PROTO(struct cache_set *c),
267 TP_ARGS(c)
268);
269
270DEFINE_EVENT(btree_node, bcache_btree_node_free,
271 TP_PROTO(struct btree *b),
272 TP_ARGS(b)
273);
274
275TRACE_EVENT(bcache_btree_gc_coalesce,
276 TP_PROTO(unsigned nodes),
277 TP_ARGS(nodes),
278
279 TP_STRUCT__entry(
280 __field(unsigned, nodes )
281 ),
282
283 TP_fast_assign(
284 __entry->nodes = nodes;
285 ),
286
287 TP_printk("coalesced %u nodes", __entry->nodes)
288);
289
290DEFINE_EVENT(cache_set, bcache_gc_start,
291 TP_PROTO(struct cache_set *c),
292 TP_ARGS(c)
293);
294
295DEFINE_EVENT(cache_set, bcache_gc_end,
296 TP_PROTO(struct cache_set *c),
297 TP_ARGS(c)
298);
299
300DEFINE_EVENT(bkey, bcache_gc_copy,
301 TP_PROTO(struct bkey *k),
302 TP_ARGS(k)
303);
304
305DEFINE_EVENT(bkey, bcache_gc_copy_collision,
306 TP_PROTO(struct bkey *k),
307 TP_ARGS(k)
308);
309
310TRACE_EVENT(bcache_btree_insert_key,
311 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
312 TP_ARGS(b, k, op, status),
313
314 TP_STRUCT__entry(
315 __field(u64, btree_node )
316 __field(u32, btree_level )
317 __field(u32, inode )
318 __field(u64, offset )
319 __field(u32, size )
320 __field(u8, dirty )
321 __field(u8, op )
322 __field(u8, status )
323 ),
324
325 TP_fast_assign(
326 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
327 __entry->btree_level = b->level;
328 __entry->inode = KEY_INODE(k);
329 __entry->offset = KEY_OFFSET(k);
330 __entry->size = KEY_SIZE(k);
331 __entry->dirty = KEY_DIRTY(k);
332 __entry->op = op;
333 __entry->status = status;
334 ),
335
336 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
337 __entry->status, __entry->op,
338 __entry->btree_node, __entry->btree_level,
339 __entry->inode, __entry->offset,
340 __entry->size, __entry->dirty)
341);
342
343DECLARE_EVENT_CLASS(btree_split,
344 TP_PROTO(struct btree *b, unsigned keys),
345 TP_ARGS(b, keys),
346
347 TP_STRUCT__entry(
348 __field(size_t, bucket )
349 __field(unsigned, keys )
350 ),
351
352 TP_fast_assign(
353 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
354 __entry->keys = keys;
355 ),
356
357 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
358);
359
360DEFINE_EVENT(btree_split, bcache_btree_node_split,
361 TP_PROTO(struct btree *b, unsigned keys),
362 TP_ARGS(b, keys)
363);
364
365DEFINE_EVENT(btree_split, bcache_btree_node_compact,
366 TP_PROTO(struct btree *b, unsigned keys),
367 TP_ARGS(b, keys)
368);
369
370DEFINE_EVENT(btree_node, bcache_btree_set_root,
371 TP_PROTO(struct btree *b),
372 TP_ARGS(b)
373);
374
375TRACE_EVENT(bcache_keyscan,
376 TP_PROTO(unsigned nr_found,
377 unsigned start_inode, uint64_t start_offset,
378 unsigned end_inode, uint64_t end_offset),
379 TP_ARGS(nr_found,
380 start_inode, start_offset,
381 end_inode, end_offset),
382
383 TP_STRUCT__entry(
384 __field(__u32, nr_found )
385 __field(__u32, start_inode )
386 __field(__u64, start_offset )
387 __field(__u32, end_inode )
388 __field(__u64, end_offset )
389 ),
390
391 TP_fast_assign(
392 __entry->nr_found = nr_found;
393 __entry->start_inode = start_inode;
394 __entry->start_offset = start_offset;
395 __entry->end_inode = end_inode;
396 __entry->end_offset = end_offset;
397 ),
398
399 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
400 __entry->start_inode, __entry->start_offset,
401 __entry->end_inode, __entry->end_offset)
402);
403
404/* Allocator */
405
406TRACE_EVENT(bcache_invalidate,
407 TP_PROTO(struct cache *ca, size_t bucket),
408 TP_ARGS(ca, bucket),
409
410 TP_STRUCT__entry(
411 __field(unsigned, sectors )
412 __field(dev_t, dev )
413 __field(__u64, offset )
414 ),
415
416 TP_fast_assign(
417 __entry->dev = ca->bdev->bd_dev;
418 __entry->offset = bucket << ca->set->bucket_bits;
419 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
420 ),
421
422 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
423 __entry->sectors, MAJOR(__entry->dev),
424 MINOR(__entry->dev), __entry->offset)
425);
426
427TRACE_EVENT(bcache_alloc,
428 TP_PROTO(struct cache *ca, size_t bucket),
429 TP_ARGS(ca, bucket),
430
431 TP_STRUCT__entry(
432 __field(dev_t, dev )
433 __field(__u64, offset )
434 ),
435
436 TP_fast_assign(
437 __entry->dev = ca->bdev->bd_dev;
438 __entry->offset = bucket << ca->set->bucket_bits;
439 ),
440
441 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
442 MINOR(__entry->dev), __entry->offset)
443);
444
445TRACE_EVENT(bcache_alloc_fail,
446 TP_PROTO(struct cache *ca, unsigned reserve),
447 TP_ARGS(ca, reserve),
448
449 TP_STRUCT__entry(
450 __field(dev_t, dev )
451 __field(unsigned, free )
452 __field(unsigned, free_inc )
453 __field(unsigned, blocked )
454 ),
455
456 TP_fast_assign(
457 __entry->dev = ca->bdev->bd_dev;
458 __entry->free = fifo_used(&ca->free[reserve]);
459 __entry->free_inc = fifo_used(&ca->free_inc);
460 __entry->blocked = atomic_read(&ca->set->prio_blocked);
461 ),
462
463 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
464 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
465 __entry->free_inc, __entry->blocked)
466);
467
468/* Background writeback */
469
470DEFINE_EVENT(bkey, bcache_writeback,
471 TP_PROTO(struct bkey *k),
472 TP_ARGS(k)
473);
474
475DEFINE_EVENT(bkey, bcache_writeback_collision,
476 TP_PROTO(struct bkey *k),
477 TP_ARGS(k)
478);
479
480#endif /* _TRACE_BCACHE_H */
481
482/* This part must be outside protection */
483#include <trace/define_trace.h>