Loading...
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM gfs2
3
4#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_GFS2_H
6
7#include <linux/tracepoint.h>
8
9#include <linux/fs.h>
10#include <linux/buffer_head.h>
11#include <linux/dlmconstants.h>
12#include <linux/gfs2_ondisk.h>
13#include <linux/writeback.h>
14#include <linux/ktime.h>
15#include "incore.h"
16#include "glock.h"
17#include "rgrp.h"
18
19#define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
20#define glock_trace_name(x) __print_symbolic(x, \
21 dlm_state_name(IV), \
22 dlm_state_name(NL), \
23 dlm_state_name(CR), \
24 dlm_state_name(CW), \
25 dlm_state_name(PR), \
26 dlm_state_name(PW), \
27 dlm_state_name(EX))
28
29#define block_state_name(x) __print_symbolic(x, \
30 { GFS2_BLKST_FREE, "free" }, \
31 { GFS2_BLKST_USED, "used" }, \
32 { GFS2_BLKST_DINODE, "dinode" }, \
33 { GFS2_BLKST_UNLINKED, "unlinked" })
34
35#define TRACE_RS_DELETE 0
36#define TRACE_RS_TREEDEL 1
37#define TRACE_RS_INSERT 2
38#define TRACE_RS_CLAIM 3
39
40#define rs_func_name(x) __print_symbolic(x, \
41 { 0, "del " }, \
42 { 1, "tdel" }, \
43 { 2, "ins " }, \
44 { 3, "clm " })
45
46#define show_glock_flags(flags) __print_flags(flags, "", \
47 {(1UL << GLF_LOCK), "l" }, \
48 {(1UL << GLF_DEMOTE), "D" }, \
49 {(1UL << GLF_PENDING_DEMOTE), "d" }, \
50 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
51 {(1UL << GLF_DIRTY), "y" }, \
52 {(1UL << GLF_LFLUSH), "f" }, \
53 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
54 {(1UL << GLF_REPLY_PENDING), "r" }, \
55 {(1UL << GLF_INITIAL), "I" }, \
56 {(1UL << GLF_FROZEN), "F" }, \
57 {(1UL << GLF_QUEUED), "q" }, \
58 {(1UL << GLF_LRU), "L" }, \
59 {(1UL << GLF_OBJECT), "o" }, \
60 {(1UL << GLF_BLOCKING), "b" })
61
62#ifndef NUMPTY
63#define NUMPTY
64static inline u8 glock_trace_state(unsigned int state)
65{
66 switch(state) {
67 case LM_ST_SHARED:
68 return DLM_LOCK_PR;
69 case LM_ST_DEFERRED:
70 return DLM_LOCK_CW;
71 case LM_ST_EXCLUSIVE:
72 return DLM_LOCK_EX;
73 }
74 return DLM_LOCK_NL;
75}
76#endif
77
78/* Section 1 - Locking
79 *
80 * Objectives:
81 * Latency: Remote demote request to state change
82 * Latency: Local lock request to state change
83 * Latency: State change to lock grant
84 * Correctness: Ordering of local lock state vs. I/O requests
85 * Correctness: Responses to remote demote requests
86 */
87
88/* General glock state change (DLM lock request completes) */
89TRACE_EVENT(gfs2_glock_state_change,
90
91 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
92
93 TP_ARGS(gl, new_state),
94
95 TP_STRUCT__entry(
96 __field( dev_t, dev )
97 __field( u64, glnum )
98 __field( u32, gltype )
99 __field( u8, cur_state )
100 __field( u8, new_state )
101 __field( u8, dmt_state )
102 __field( u8, tgt_state )
103 __field( unsigned long, flags )
104 ),
105
106 TP_fast_assign(
107 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
108 __entry->glnum = gl->gl_name.ln_number;
109 __entry->gltype = gl->gl_name.ln_type;
110 __entry->cur_state = glock_trace_state(gl->gl_state);
111 __entry->new_state = glock_trace_state(new_state);
112 __entry->tgt_state = glock_trace_state(gl->gl_target);
113 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
114 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
115 ),
116
117 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
118 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
119 (unsigned long long)__entry->glnum,
120 glock_trace_name(__entry->cur_state),
121 glock_trace_name(__entry->new_state),
122 glock_trace_name(__entry->tgt_state),
123 glock_trace_name(__entry->dmt_state),
124 show_glock_flags(__entry->flags))
125);
126
127/* State change -> unlocked, glock is being deallocated */
128TRACE_EVENT(gfs2_glock_put,
129
130 TP_PROTO(const struct gfs2_glock *gl),
131
132 TP_ARGS(gl),
133
134 TP_STRUCT__entry(
135 __field( dev_t, dev )
136 __field( u64, glnum )
137 __field( u32, gltype )
138 __field( u8, cur_state )
139 __field( unsigned long, flags )
140 ),
141
142 TP_fast_assign(
143 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
144 __entry->gltype = gl->gl_name.ln_type;
145 __entry->glnum = gl->gl_name.ln_number;
146 __entry->cur_state = glock_trace_state(gl->gl_state);
147 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
148 ),
149
150 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
151 MAJOR(__entry->dev), MINOR(__entry->dev),
152 __entry->gltype, (unsigned long long)__entry->glnum,
153 glock_trace_name(__entry->cur_state),
154 glock_trace_name(DLM_LOCK_IV),
155 show_glock_flags(__entry->flags))
156
157);
158
159/* Callback (local or remote) requesting lock demotion */
160TRACE_EVENT(gfs2_demote_rq,
161
162 TP_PROTO(const struct gfs2_glock *gl, bool remote),
163
164 TP_ARGS(gl, remote),
165
166 TP_STRUCT__entry(
167 __field( dev_t, dev )
168 __field( u64, glnum )
169 __field( u32, gltype )
170 __field( u8, cur_state )
171 __field( u8, dmt_state )
172 __field( unsigned long, flags )
173 __field( bool, remote )
174 ),
175
176 TP_fast_assign(
177 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
178 __entry->gltype = gl->gl_name.ln_type;
179 __entry->glnum = gl->gl_name.ln_number;
180 __entry->cur_state = glock_trace_state(gl->gl_state);
181 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
182 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
183 __entry->remote = remote;
184 ),
185
186 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s %s",
187 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
188 (unsigned long long)__entry->glnum,
189 glock_trace_name(__entry->cur_state),
190 glock_trace_name(__entry->dmt_state),
191 show_glock_flags(__entry->flags),
192 __entry->remote ? "remote" : "local")
193
194);
195
196/* Promotion/grant of a glock */
197TRACE_EVENT(gfs2_promote,
198
199 TP_PROTO(const struct gfs2_holder *gh, int first),
200
201 TP_ARGS(gh, first),
202
203 TP_STRUCT__entry(
204 __field( dev_t, dev )
205 __field( u64, glnum )
206 __field( u32, gltype )
207 __field( int, first )
208 __field( u8, state )
209 ),
210
211 TP_fast_assign(
212 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
213 __entry->glnum = gh->gh_gl->gl_name.ln_number;
214 __entry->gltype = gh->gh_gl->gl_name.ln_type;
215 __entry->first = first;
216 __entry->state = glock_trace_state(gh->gh_state);
217 ),
218
219 TP_printk("%u,%u glock %u:%llu promote %s %s",
220 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
221 (unsigned long long)__entry->glnum,
222 __entry->first ? "first": "other",
223 glock_trace_name(__entry->state))
224);
225
226/* Queue/dequeue a lock request */
227TRACE_EVENT(gfs2_glock_queue,
228
229 TP_PROTO(const struct gfs2_holder *gh, int queue),
230
231 TP_ARGS(gh, queue),
232
233 TP_STRUCT__entry(
234 __field( dev_t, dev )
235 __field( u64, glnum )
236 __field( u32, gltype )
237 __field( int, queue )
238 __field( u8, state )
239 ),
240
241 TP_fast_assign(
242 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
243 __entry->glnum = gh->gh_gl->gl_name.ln_number;
244 __entry->gltype = gh->gh_gl->gl_name.ln_type;
245 __entry->queue = queue;
246 __entry->state = glock_trace_state(gh->gh_state);
247 ),
248
249 TP_printk("%u,%u glock %u:%llu %squeue %s",
250 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
251 (unsigned long long)__entry->glnum,
252 __entry->queue ? "" : "de",
253 glock_trace_name(__entry->state))
254);
255
256/* DLM sends a reply to GFS2 */
257TRACE_EVENT(gfs2_glock_lock_time,
258
259 TP_PROTO(const struct gfs2_glock *gl, s64 tdiff),
260
261 TP_ARGS(gl, tdiff),
262
263 TP_STRUCT__entry(
264 __field( dev_t, dev )
265 __field( u64, glnum )
266 __field( u32, gltype )
267 __field( int, status )
268 __field( char, flags )
269 __field( s64, tdiff )
270 __field( s64, srtt )
271 __field( s64, srttvar )
272 __field( s64, srttb )
273 __field( s64, srttvarb )
274 __field( s64, sirt )
275 __field( s64, sirtvar )
276 __field( s64, dcount )
277 __field( s64, qcount )
278 ),
279
280 TP_fast_assign(
281 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
282 __entry->glnum = gl->gl_name.ln_number;
283 __entry->gltype = gl->gl_name.ln_type;
284 __entry->status = gl->gl_lksb.sb_status;
285 __entry->flags = gl->gl_lksb.sb_flags;
286 __entry->tdiff = tdiff;
287 __entry->srtt = gl->gl_stats.stats[GFS2_LKS_SRTT];
288 __entry->srttvar = gl->gl_stats.stats[GFS2_LKS_SRTTVAR];
289 __entry->srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
290 __entry->srttvarb = gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
291 __entry->sirt = gl->gl_stats.stats[GFS2_LKS_SIRT];
292 __entry->sirtvar = gl->gl_stats.stats[GFS2_LKS_SIRTVAR];
293 __entry->dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
294 __entry->qcount = gl->gl_stats.stats[GFS2_LKS_QCOUNT];
295 ),
296
297 TP_printk("%u,%u glock %d:%lld status:%d flags:%02x tdiff:%lld srtt:%lld/%lld srttb:%lld/%lld sirt:%lld/%lld dcnt:%lld qcnt:%lld",
298 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
299 (unsigned long long)__entry->glnum,
300 __entry->status, __entry->flags,
301 (long long)__entry->tdiff,
302 (long long)__entry->srtt,
303 (long long)__entry->srttvar,
304 (long long)__entry->srttb,
305 (long long)__entry->srttvarb,
306 (long long)__entry->sirt,
307 (long long)__entry->sirtvar,
308 (long long)__entry->dcount,
309 (long long)__entry->qcount)
310);
311
312/* Section 2 - Log/journal
313 *
314 * Objectives:
315 * Latency: Log flush time
316 * Correctness: pin/unpin vs. disk I/O ordering
317 * Performance: Log usage stats
318 */
319
320/* Pin/unpin a block in the log */
321TRACE_EVENT(gfs2_pin,
322
323 TP_PROTO(const struct gfs2_bufdata *bd, int pin),
324
325 TP_ARGS(bd, pin),
326
327 TP_STRUCT__entry(
328 __field( dev_t, dev )
329 __field( int, pin )
330 __field( u32, len )
331 __field( sector_t, block )
332 __field( u64, ino )
333 ),
334
335 TP_fast_assign(
336 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev;
337 __entry->pin = pin;
338 __entry->len = bd->bd_bh->b_size;
339 __entry->block = bd->bd_bh->b_blocknr;
340 __entry->ino = bd->bd_gl->gl_name.ln_number;
341 ),
342
343 TP_printk("%u,%u log %s %llu/%lu inode %llu",
344 MAJOR(__entry->dev), MINOR(__entry->dev),
345 __entry->pin ? "pin" : "unpin",
346 (unsigned long long)__entry->block,
347 (unsigned long)__entry->len,
348 (unsigned long long)__entry->ino)
349);
350
351/* Flushing the log */
352TRACE_EVENT(gfs2_log_flush,
353
354 TP_PROTO(const struct gfs2_sbd *sdp, int start),
355
356 TP_ARGS(sdp, start),
357
358 TP_STRUCT__entry(
359 __field( dev_t, dev )
360 __field( int, start )
361 __field( u64, log_seq )
362 ),
363
364 TP_fast_assign(
365 __entry->dev = sdp->sd_vfs->s_dev;
366 __entry->start = start;
367 __entry->log_seq = sdp->sd_log_sequence;
368 ),
369
370 TP_printk("%u,%u log flush %s %llu",
371 MAJOR(__entry->dev), MINOR(__entry->dev),
372 __entry->start ? "start" : "end",
373 (unsigned long long)__entry->log_seq)
374);
375
376/* Reserving/releasing blocks in the log */
377TRACE_EVENT(gfs2_log_blocks,
378
379 TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
380
381 TP_ARGS(sdp, blocks),
382
383 TP_STRUCT__entry(
384 __field( dev_t, dev )
385 __field( int, blocks )
386 ),
387
388 TP_fast_assign(
389 __entry->dev = sdp->sd_vfs->s_dev;
390 __entry->blocks = blocks;
391 ),
392
393 TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev),
394 MINOR(__entry->dev), __entry->blocks)
395);
396
397/* Writing back the AIL */
398TRACE_EVENT(gfs2_ail_flush,
399
400 TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
401
402 TP_ARGS(sdp, wbc, start),
403
404 TP_STRUCT__entry(
405 __field( dev_t, dev )
406 __field( int, start )
407 __field( int, sync_mode )
408 __field( long, nr_to_write )
409 ),
410
411 TP_fast_assign(
412 __entry->dev = sdp->sd_vfs->s_dev;
413 __entry->start = start;
414 __entry->sync_mode = wbc->sync_mode;
415 __entry->nr_to_write = wbc->nr_to_write;
416 ),
417
418 TP_printk("%u,%u ail flush %s %s %ld", MAJOR(__entry->dev),
419 MINOR(__entry->dev), __entry->start ? "start" : "end",
420 __entry->sync_mode == WB_SYNC_ALL ? "all" : "none",
421 __entry->nr_to_write)
422);
423
424/* Section 3 - bmap
425 *
426 * Objectives:
427 * Latency: Bmap request time
428 * Performance: Block allocator tracing
429 * Correctness: Test of disard generation vs. blocks allocated
430 */
431
432/* Map an extent of blocks, possibly a new allocation */
433TRACE_EVENT(gfs2_bmap,
434
435 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
436 sector_t lblock, int create, int errno),
437
438 TP_ARGS(ip, bh, lblock, create, errno),
439
440 TP_STRUCT__entry(
441 __field( dev_t, dev )
442 __field( sector_t, lblock )
443 __field( sector_t, pblock )
444 __field( u64, inum )
445 __field( unsigned long, state )
446 __field( u32, len )
447 __field( int, create )
448 __field( int, errno )
449 ),
450
451 TP_fast_assign(
452 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
453 __entry->lblock = lblock;
454 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
455 __entry->inum = ip->i_no_addr;
456 __entry->state = bh->b_state;
457 __entry->len = bh->b_size;
458 __entry->create = create;
459 __entry->errno = errno;
460 ),
461
462 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
463 MAJOR(__entry->dev), MINOR(__entry->dev),
464 (unsigned long long)__entry->inum,
465 (unsigned long long)__entry->lblock,
466 (unsigned long)__entry->len,
467 (unsigned long long)__entry->pblock,
468 __entry->state, __entry->create ? "create " : "nocreate",
469 __entry->errno)
470);
471
472/* Keep track of blocks as they are allocated/freed */
473TRACE_EVENT(gfs2_block_alloc,
474
475 TP_PROTO(const struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
476 u64 block, unsigned len, u8 block_state),
477
478 TP_ARGS(ip, rgd, block, len, block_state),
479
480 TP_STRUCT__entry(
481 __field( dev_t, dev )
482 __field( u64, start )
483 __field( u64, inum )
484 __field( u32, len )
485 __field( u8, block_state )
486 __field( u64, rd_addr )
487 __field( u32, rd_free_clone )
488 __field( u32, rd_reserved )
489 ),
490
491 TP_fast_assign(
492 __entry->dev = rgd->rd_gl->gl_sbd->sd_vfs->s_dev;
493 __entry->start = block;
494 __entry->inum = ip->i_no_addr;
495 __entry->len = len;
496 __entry->block_state = block_state;
497 __entry->rd_addr = rgd->rd_addr;
498 __entry->rd_free_clone = rgd->rd_free_clone;
499 __entry->rd_reserved = rgd->rd_reserved;
500 ),
501
502 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rr:%lu",
503 MAJOR(__entry->dev), MINOR(__entry->dev),
504 (unsigned long long)__entry->inum,
505 (unsigned long long)__entry->start,
506 (unsigned long)__entry->len,
507 block_state_name(__entry->block_state),
508 (unsigned long long)__entry->rd_addr,
509 __entry->rd_free_clone, (unsigned long)__entry->rd_reserved)
510);
511
512/* Keep track of multi-block reservations as they are allocated/freed */
513TRACE_EVENT(gfs2_rs,
514
515 TP_PROTO(const struct gfs2_blkreserv *rs, u8 func),
516
517 TP_ARGS(rs, func),
518
519 TP_STRUCT__entry(
520 __field( dev_t, dev )
521 __field( u64, rd_addr )
522 __field( u32, rd_free_clone )
523 __field( u32, rd_reserved )
524 __field( u64, inum )
525 __field( u64, start )
526 __field( u32, free )
527 __field( u8, func )
528 ),
529
530 TP_fast_assign(
531 __entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev;
532 __entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
533 __entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
534 __entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
535 __entry->inum = rs->rs_inum;
536 __entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
537 __entry->free = rs->rs_free;
538 __entry->func = func;
539 ),
540
541 TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu",
542 MAJOR(__entry->dev), MINOR(__entry->dev),
543 (unsigned long long)__entry->inum,
544 (unsigned long long)__entry->start,
545 (unsigned long long)__entry->rd_addr,
546 (unsigned long)__entry->rd_free_clone,
547 (unsigned long)__entry->rd_reserved,
548 rs_func_name(__entry->func), (unsigned long)__entry->free)
549);
550
551#endif /* _TRACE_GFS2_H */
552
553/* This part must be outside protection */
554#undef TRACE_INCLUDE_PATH
555#define TRACE_INCLUDE_PATH .
556#define TRACE_INCLUDE_FILE trace_gfs2
557#include <trace/define_trace.h>
558
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM gfs2
3
4#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_GFS2_H
6
7#include <linux/tracepoint.h>
8
9#include <linux/fs.h>
10#include <linux/buffer_head.h>
11#include <linux/dlmconstants.h>
12#include <linux/gfs2_ondisk.h>
13#include <linux/writeback.h>
14#include <linux/ktime.h>
15#include "incore.h"
16#include "glock.h"
17
18#define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
19#define glock_trace_name(x) __print_symbolic(x, \
20 dlm_state_name(IV), \
21 dlm_state_name(NL), \
22 dlm_state_name(CR), \
23 dlm_state_name(CW), \
24 dlm_state_name(PR), \
25 dlm_state_name(PW), \
26 dlm_state_name(EX))
27
28#define block_state_name(x) __print_symbolic(x, \
29 { GFS2_BLKST_FREE, "free" }, \
30 { GFS2_BLKST_USED, "used" }, \
31 { GFS2_BLKST_DINODE, "dinode" }, \
32 { GFS2_BLKST_UNLINKED, "unlinked" })
33
34#define show_glock_flags(flags) __print_flags(flags, "", \
35 {(1UL << GLF_LOCK), "l" }, \
36 {(1UL << GLF_DEMOTE), "D" }, \
37 {(1UL << GLF_PENDING_DEMOTE), "d" }, \
38 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
39 {(1UL << GLF_DIRTY), "y" }, \
40 {(1UL << GLF_LFLUSH), "f" }, \
41 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
42 {(1UL << GLF_REPLY_PENDING), "r" }, \
43 {(1UL << GLF_INITIAL), "I" }, \
44 {(1UL << GLF_FROZEN), "F" }, \
45 {(1UL << GLF_QUEUED), "q" }, \
46 {(1UL << GLF_LRU), "L" }, \
47 {(1UL << GLF_OBJECT), "o" }, \
48 {(1UL << GLF_BLOCKING), "b" })
49
50#ifndef NUMPTY
51#define NUMPTY
52static inline u8 glock_trace_state(unsigned int state)
53{
54 switch(state) {
55 case LM_ST_SHARED:
56 return DLM_LOCK_PR;
57 case LM_ST_DEFERRED:
58 return DLM_LOCK_CW;
59 case LM_ST_EXCLUSIVE:
60 return DLM_LOCK_EX;
61 }
62 return DLM_LOCK_NL;
63}
64#endif
65
66/* Section 1 - Locking
67 *
68 * Objectives:
69 * Latency: Remote demote request to state change
70 * Latency: Local lock request to state change
71 * Latency: State change to lock grant
72 * Correctness: Ordering of local lock state vs. I/O requests
73 * Correctness: Responses to remote demote requests
74 */
75
76/* General glock state change (DLM lock request completes) */
77TRACE_EVENT(gfs2_glock_state_change,
78
79 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
80
81 TP_ARGS(gl, new_state),
82
83 TP_STRUCT__entry(
84 __field( dev_t, dev )
85 __field( u64, glnum )
86 __field( u32, gltype )
87 __field( u8, cur_state )
88 __field( u8, new_state )
89 __field( u8, dmt_state )
90 __field( u8, tgt_state )
91 __field( unsigned long, flags )
92 ),
93
94 TP_fast_assign(
95 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
96 __entry->glnum = gl->gl_name.ln_number;
97 __entry->gltype = gl->gl_name.ln_type;
98 __entry->cur_state = glock_trace_state(gl->gl_state);
99 __entry->new_state = glock_trace_state(new_state);
100 __entry->tgt_state = glock_trace_state(gl->gl_target);
101 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
102 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
103 ),
104
105 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
106 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
107 (unsigned long long)__entry->glnum,
108 glock_trace_name(__entry->cur_state),
109 glock_trace_name(__entry->new_state),
110 glock_trace_name(__entry->tgt_state),
111 glock_trace_name(__entry->dmt_state),
112 show_glock_flags(__entry->flags))
113);
114
115/* State change -> unlocked, glock is being deallocated */
116TRACE_EVENT(gfs2_glock_put,
117
118 TP_PROTO(const struct gfs2_glock *gl),
119
120 TP_ARGS(gl),
121
122 TP_STRUCT__entry(
123 __field( dev_t, dev )
124 __field( u64, glnum )
125 __field( u32, gltype )
126 __field( u8, cur_state )
127 __field( unsigned long, flags )
128 ),
129
130 TP_fast_assign(
131 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
132 __entry->gltype = gl->gl_name.ln_type;
133 __entry->glnum = gl->gl_name.ln_number;
134 __entry->cur_state = glock_trace_state(gl->gl_state);
135 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
136 ),
137
138 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
139 MAJOR(__entry->dev), MINOR(__entry->dev),
140 __entry->gltype, (unsigned long long)__entry->glnum,
141 glock_trace_name(__entry->cur_state),
142 glock_trace_name(DLM_LOCK_IV),
143 show_glock_flags(__entry->flags))
144
145);
146
147/* Callback (local or remote) requesting lock demotion */
148TRACE_EVENT(gfs2_demote_rq,
149
150 TP_PROTO(const struct gfs2_glock *gl),
151
152 TP_ARGS(gl),
153
154 TP_STRUCT__entry(
155 __field( dev_t, dev )
156 __field( u64, glnum )
157 __field( u32, gltype )
158 __field( u8, cur_state )
159 __field( u8, dmt_state )
160 __field( unsigned long, flags )
161 ),
162
163 TP_fast_assign(
164 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
165 __entry->gltype = gl->gl_name.ln_type;
166 __entry->glnum = gl->gl_name.ln_number;
167 __entry->cur_state = glock_trace_state(gl->gl_state);
168 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
169 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
170 ),
171
172 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
173 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
174 (unsigned long long)__entry->glnum,
175 glock_trace_name(__entry->cur_state),
176 glock_trace_name(__entry->dmt_state),
177 show_glock_flags(__entry->flags))
178
179);
180
181/* Promotion/grant of a glock */
182TRACE_EVENT(gfs2_promote,
183
184 TP_PROTO(const struct gfs2_holder *gh, int first),
185
186 TP_ARGS(gh, first),
187
188 TP_STRUCT__entry(
189 __field( dev_t, dev )
190 __field( u64, glnum )
191 __field( u32, gltype )
192 __field( int, first )
193 __field( u8, state )
194 ),
195
196 TP_fast_assign(
197 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
198 __entry->glnum = gh->gh_gl->gl_name.ln_number;
199 __entry->gltype = gh->gh_gl->gl_name.ln_type;
200 __entry->first = first;
201 __entry->state = glock_trace_state(gh->gh_state);
202 ),
203
204 TP_printk("%u,%u glock %u:%llu promote %s %s",
205 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
206 (unsigned long long)__entry->glnum,
207 __entry->first ? "first": "other",
208 glock_trace_name(__entry->state))
209);
210
211/* Queue/dequeue a lock request */
212TRACE_EVENT(gfs2_glock_queue,
213
214 TP_PROTO(const struct gfs2_holder *gh, int queue),
215
216 TP_ARGS(gh, queue),
217
218 TP_STRUCT__entry(
219 __field( dev_t, dev )
220 __field( u64, glnum )
221 __field( u32, gltype )
222 __field( int, queue )
223 __field( u8, state )
224 ),
225
226 TP_fast_assign(
227 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
228 __entry->glnum = gh->gh_gl->gl_name.ln_number;
229 __entry->gltype = gh->gh_gl->gl_name.ln_type;
230 __entry->queue = queue;
231 __entry->state = glock_trace_state(gh->gh_state);
232 ),
233
234 TP_printk("%u,%u glock %u:%llu %squeue %s",
235 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
236 (unsigned long long)__entry->glnum,
237 __entry->queue ? "" : "de",
238 glock_trace_name(__entry->state))
239);
240
241/* DLM sends a reply to GFS2 */
242TRACE_EVENT(gfs2_glock_lock_time,
243
244 TP_PROTO(const struct gfs2_glock *gl, s64 tdiff),
245
246 TP_ARGS(gl, tdiff),
247
248 TP_STRUCT__entry(
249 __field( dev_t, dev )
250 __field( u64, glnum )
251 __field( u32, gltype )
252 __field( int, status )
253 __field( char, flags )
254 __field( s64, tdiff )
255 __field( s64, srtt )
256 __field( s64, srttvar )
257 __field( s64, srttb )
258 __field( s64, srttvarb )
259 __field( s64, sirt )
260 __field( s64, sirtvar )
261 __field( s64, dcount )
262 __field( s64, qcount )
263 ),
264
265 TP_fast_assign(
266 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
267 __entry->glnum = gl->gl_name.ln_number;
268 __entry->gltype = gl->gl_name.ln_type;
269 __entry->status = gl->gl_lksb.sb_status;
270 __entry->flags = gl->gl_lksb.sb_flags;
271 __entry->tdiff = tdiff;
272 __entry->srtt = gl->gl_stats.stats[GFS2_LKS_SRTT];
273 __entry->srttvar = gl->gl_stats.stats[GFS2_LKS_SRTTVAR];
274 __entry->srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
275 __entry->srttvarb = gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
276 __entry->sirt = gl->gl_stats.stats[GFS2_LKS_SIRT];
277 __entry->sirtvar = gl->gl_stats.stats[GFS2_LKS_SIRTVAR];
278 __entry->dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
279 __entry->qcount = gl->gl_stats.stats[GFS2_LKS_QCOUNT];
280 ),
281
282 TP_printk("%u,%u glock %d:%lld status:%d flags:%02x tdiff:%lld srtt:%lld/%lld srttb:%lld/%lld sirt:%lld/%lld dcnt:%lld qcnt:%lld",
283 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
284 (unsigned long long)__entry->glnum,
285 __entry->status, __entry->flags,
286 (long long)__entry->tdiff,
287 (long long)__entry->srtt,
288 (long long)__entry->srttvar,
289 (long long)__entry->srttb,
290 (long long)__entry->srttvarb,
291 (long long)__entry->sirt,
292 (long long)__entry->sirtvar,
293 (long long)__entry->dcount,
294 (long long)__entry->qcount)
295);
296
297/* Section 2 - Log/journal
298 *
299 * Objectives:
300 * Latency: Log flush time
301 * Correctness: pin/unpin vs. disk I/O ordering
302 * Performance: Log usage stats
303 */
304
305/* Pin/unpin a block in the log */
306TRACE_EVENT(gfs2_pin,
307
308 TP_PROTO(const struct gfs2_bufdata *bd, int pin),
309
310 TP_ARGS(bd, pin),
311
312 TP_STRUCT__entry(
313 __field( dev_t, dev )
314 __field( int, pin )
315 __field( u32, len )
316 __field( sector_t, block )
317 __field( u64, ino )
318 ),
319
320 TP_fast_assign(
321 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev;
322 __entry->pin = pin;
323 __entry->len = bd->bd_bh->b_size;
324 __entry->block = bd->bd_bh->b_blocknr;
325 __entry->ino = bd->bd_gl->gl_name.ln_number;
326 ),
327
328 TP_printk("%u,%u log %s %llu/%lu inode %llu",
329 MAJOR(__entry->dev), MINOR(__entry->dev),
330 __entry->pin ? "pin" : "unpin",
331 (unsigned long long)__entry->block,
332 (unsigned long)__entry->len,
333 (unsigned long long)__entry->ino)
334);
335
336/* Flushing the log */
337TRACE_EVENT(gfs2_log_flush,
338
339 TP_PROTO(const struct gfs2_sbd *sdp, int start),
340
341 TP_ARGS(sdp, start),
342
343 TP_STRUCT__entry(
344 __field( dev_t, dev )
345 __field( int, start )
346 __field( u64, log_seq )
347 ),
348
349 TP_fast_assign(
350 __entry->dev = sdp->sd_vfs->s_dev;
351 __entry->start = start;
352 __entry->log_seq = sdp->sd_log_sequence;
353 ),
354
355 TP_printk("%u,%u log flush %s %llu",
356 MAJOR(__entry->dev), MINOR(__entry->dev),
357 __entry->start ? "start" : "end",
358 (unsigned long long)__entry->log_seq)
359);
360
361/* Reserving/releasing blocks in the log */
362TRACE_EVENT(gfs2_log_blocks,
363
364 TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
365
366 TP_ARGS(sdp, blocks),
367
368 TP_STRUCT__entry(
369 __field( dev_t, dev )
370 __field( int, blocks )
371 ),
372
373 TP_fast_assign(
374 __entry->dev = sdp->sd_vfs->s_dev;
375 __entry->blocks = blocks;
376 ),
377
378 TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev),
379 MINOR(__entry->dev), __entry->blocks)
380);
381
382/* Writing back the AIL */
383TRACE_EVENT(gfs2_ail_flush,
384
385 TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
386
387 TP_ARGS(sdp, wbc, start),
388
389 TP_STRUCT__entry(
390 __field( dev_t, dev )
391 __field( int, start )
392 __field( int, sync_mode )
393 __field( long, nr_to_write )
394 ),
395
396 TP_fast_assign(
397 __entry->dev = sdp->sd_vfs->s_dev;
398 __entry->start = start;
399 __entry->sync_mode = wbc->sync_mode;
400 __entry->nr_to_write = wbc->nr_to_write;
401 ),
402
403 TP_printk("%u,%u ail flush %s %s %ld", MAJOR(__entry->dev),
404 MINOR(__entry->dev), __entry->start ? "start" : "end",
405 __entry->sync_mode == WB_SYNC_ALL ? "all" : "none",
406 __entry->nr_to_write)
407);
408
409/* Section 3 - bmap
410 *
411 * Objectives:
412 * Latency: Bmap request time
413 * Performance: Block allocator tracing
414 * Correctness: Test of disard generation vs. blocks allocated
415 */
416
417/* Map an extent of blocks, possibly a new allocation */
418TRACE_EVENT(gfs2_bmap,
419
420 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
421 sector_t lblock, int create, int errno),
422
423 TP_ARGS(ip, bh, lblock, create, errno),
424
425 TP_STRUCT__entry(
426 __field( dev_t, dev )
427 __field( sector_t, lblock )
428 __field( sector_t, pblock )
429 __field( u64, inum )
430 __field( unsigned long, state )
431 __field( u32, len )
432 __field( int, create )
433 __field( int, errno )
434 ),
435
436 TP_fast_assign(
437 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
438 __entry->lblock = lblock;
439 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
440 __entry->inum = ip->i_no_addr;
441 __entry->state = bh->b_state;
442 __entry->len = bh->b_size;
443 __entry->create = create;
444 __entry->errno = errno;
445 ),
446
447 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
448 MAJOR(__entry->dev), MINOR(__entry->dev),
449 (unsigned long long)__entry->inum,
450 (unsigned long long)__entry->lblock,
451 (unsigned long)__entry->len,
452 (unsigned long long)__entry->pblock,
453 __entry->state, __entry->create ? "create " : "nocreate",
454 __entry->errno)
455);
456
457/* Keep track of blocks as they are allocated/freed */
458TRACE_EVENT(gfs2_block_alloc,
459
460 TP_PROTO(const struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
461 u64 block, unsigned len, u8 block_state),
462
463 TP_ARGS(ip, rgd, block, len, block_state),
464
465 TP_STRUCT__entry(
466 __field( dev_t, dev )
467 __field( u64, start )
468 __field( u64, inum )
469 __field( u32, len )
470 __field( u8, block_state )
471 __field( u64, rd_addr )
472 __field( u32, rd_free_clone )
473 ),
474
475 TP_fast_assign(
476 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
477 __entry->start = block;
478 __entry->inum = ip->i_no_addr;
479 __entry->len = len;
480 __entry->block_state = block_state;
481 __entry->rd_addr = rgd->rd_addr;
482 __entry->rd_free_clone = rgd->rd_free_clone;
483 ),
484
485 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u",
486 MAJOR(__entry->dev), MINOR(__entry->dev),
487 (unsigned long long)__entry->inum,
488 (unsigned long long)__entry->start,
489 (unsigned long)__entry->len,
490 block_state_name(__entry->block_state),
491 (unsigned long long)__entry->rd_addr,
492 __entry->rd_free_clone)
493);
494
495#endif /* _TRACE_GFS2_H */
496
497/* This part must be outside protection */
498#undef TRACE_INCLUDE_PATH
499#define TRACE_INCLUDE_PATH .
500#define TRACE_INCLUDE_FILE trace_gfs2
501#include <trace/define_trace.h>
502