1 #undef TRACE_SYSTEM 2 #define TRACE_SYSTEM gfs2 3 4 #if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ) 5 #define _TRACE_GFS2_H 6 7 #include <linux/tracepoint.h> 8 9 #include <linux/fs.h> 10 #include <linux/buffer_head.h> 11 #include <linux/dlmconstants.h> 12 #include <linux/gfs2_ondisk.h> 13 #include "incore.h" 14 #include "glock.h" 15 16 #define dlm_state_name(nn) { DLM_LOCK_##nn, #nn } 17 #define glock_trace_name(x) __print_symbolic(x, \ 18 dlm_state_name(IV), \ 19 dlm_state_name(NL), \ 20 dlm_state_name(CR), \ 21 dlm_state_name(CW), \ 22 dlm_state_name(PR), \ 23 dlm_state_name(PW), \ 24 dlm_state_name(EX)) 25 26 #define block_state_name(x) __print_symbolic(x, \ 27 { GFS2_BLKST_FREE, "free" }, \ 28 { GFS2_BLKST_USED, "used" }, \ 29 { GFS2_BLKST_DINODE, "dinode" }, \ 30 { GFS2_BLKST_UNLINKED, "unlinked" }) 31 32 #define show_glock_flags(flags) __print_flags(flags, "", \ 33 {(1UL << GLF_LOCK), "l" }, \ 34 {(1UL << GLF_DEMOTE), "D" }, \ 35 {(1UL << GLF_PENDING_DEMOTE), "d" }, \ 36 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \ 37 {(1UL << GLF_DIRTY), "y" }, \ 38 {(1UL << GLF_LFLUSH), "f" }, \ 39 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \ 40 {(1UL << GLF_REPLY_PENDING), "r" }, \ 41 {(1UL << GLF_INITIAL), "I" }, \ 42 {(1UL << GLF_FROZEN), "F" }, \ 43 {(1UL << GLF_QUEUED), "q" }) 44 45 #ifndef NUMPTY 46 #define NUMPTY 47 static inline u8 glock_trace_state(unsigned int state) 48 { 49 switch(state) { 50 case LM_ST_SHARED: 51 return DLM_LOCK_PR; 52 case LM_ST_DEFERRED: 53 return DLM_LOCK_CW; 54 case LM_ST_EXCLUSIVE: 55 return DLM_LOCK_EX; 56 } 57 return DLM_LOCK_NL; 58 } 59 #endif 60 61 /* Section 1 - Locking 62 * 63 * Objectives: 64 * Latency: Remote demote request to state change 65 * Latency: Local lock request to state change 66 * Latency: State change to lock grant 67 * Correctness: Ordering of local lock state vs. I/O requests 68 * Correctness: Responses to remote demote requests 69 */ 70 71 /* General glock state change (DLM lock request completes) */ 72 TRACE_EVENT(gfs2_glock_state_change, 73 74 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state), 75 76 TP_ARGS(gl, new_state), 77 78 TP_STRUCT__entry( 79 __field( dev_t, dev ) 80 __field( u64, glnum ) 81 __field( u32, gltype ) 82 __field( u8, cur_state ) 83 __field( u8, new_state ) 84 __field( u8, dmt_state ) 85 __field( u8, tgt_state ) 86 __field( unsigned long, flags ) 87 ), 88 89 TP_fast_assign( 90 __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 91 __entry->glnum = gl->gl_name.ln_number; 92 __entry->gltype = gl->gl_name.ln_type; 93 __entry->cur_state = glock_trace_state(gl->gl_state); 94 __entry->new_state = glock_trace_state(new_state); 95 __entry->tgt_state = glock_trace_state(gl->gl_target); 96 __entry->dmt_state = glock_trace_state(gl->gl_demote_state); 97 __entry->flags = gl->gl_flags; 98 ), 99 100 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s", 101 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, 102 (unsigned long long)__entry->glnum, 103 glock_trace_name(__entry->cur_state), 104 glock_trace_name(__entry->new_state), 105 glock_trace_name(__entry->tgt_state), 106 glock_trace_name(__entry->dmt_state), 107 show_glock_flags(__entry->flags)) 108 ); 109 110 /* State change -> unlocked, glock is being deallocated */ 111 TRACE_EVENT(gfs2_glock_put, 112 113 TP_PROTO(const struct gfs2_glock *gl), 114 115 TP_ARGS(gl), 116 117 TP_STRUCT__entry( 118 __field( dev_t, dev ) 119 __field( u64, glnum ) 120 __field( u32, gltype ) 121 __field( u8, cur_state ) 122 __field( unsigned long, flags ) 123 ), 124 125 TP_fast_assign( 126 __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 127 __entry->gltype = gl->gl_name.ln_type; 128 __entry->glnum = gl->gl_name.ln_number; 129 __entry->cur_state = glock_trace_state(gl->gl_state); 130 __entry->flags = gl->gl_flags; 131 ), 132 133 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s", 134 MAJOR(__entry->dev), MINOR(__entry->dev), 135 __entry->gltype, (unsigned long long)__entry->glnum, 136 glock_trace_name(__entry->cur_state), 137 glock_trace_name(DLM_LOCK_IV), 138 show_glock_flags(__entry->flags)) 139 140 ); 141 142 /* Callback (local or remote) requesting lock demotion */ 143 TRACE_EVENT(gfs2_demote_rq, 144 145 TP_PROTO(const struct gfs2_glock *gl), 146 147 TP_ARGS(gl), 148 149 TP_STRUCT__entry( 150 __field( dev_t, dev ) 151 __field( u64, glnum ) 152 __field( u32, gltype ) 153 __field( u8, cur_state ) 154 __field( u8, dmt_state ) 155 __field( unsigned long, flags ) 156 ), 157 158 TP_fast_assign( 159 __entry->dev = gl->gl_sbd->sd_vfs->s_dev; 160 __entry->gltype = gl->gl_name.ln_type; 161 __entry->glnum = gl->gl_name.ln_number; 162 __entry->cur_state = glock_trace_state(gl->gl_state); 163 __entry->dmt_state = glock_trace_state(gl->gl_demote_state); 164 __entry->flags = gl->gl_flags; 165 ), 166 167 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s", 168 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, 169 (unsigned long long)__entry->glnum, 170 glock_trace_name(__entry->cur_state), 171 glock_trace_name(__entry->dmt_state), 172 show_glock_flags(__entry->flags)) 173 174 ); 175 176 /* Promotion/grant of a glock */ 177 TRACE_EVENT(gfs2_promote, 178 179 TP_PROTO(const struct gfs2_holder *gh, int first), 180 181 TP_ARGS(gh, first), 182 183 TP_STRUCT__entry( 184 __field( dev_t, dev ) 185 __field( u64, glnum ) 186 __field( u32, gltype ) 187 __field( int, first ) 188 __field( u8, state ) 189 ), 190 191 TP_fast_assign( 192 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev; 193 __entry->glnum = gh->gh_gl->gl_name.ln_number; 194 __entry->gltype = gh->gh_gl->gl_name.ln_type; 195 __entry->first = first; 196 __entry->state = glock_trace_state(gh->gh_state); 197 ), 198 199 TP_printk("%u,%u glock %u:%llu promote %s %s", 200 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, 201 (unsigned long long)__entry->glnum, 202 __entry->first ? "first": "other", 203 glock_trace_name(__entry->state)) 204 ); 205 206 /* Queue/dequeue a lock request */ 207 TRACE_EVENT(gfs2_glock_queue, 208 209 TP_PROTO(const struct gfs2_holder *gh, int queue), 210 211 TP_ARGS(gh, queue), 212 213 TP_STRUCT__entry( 214 __field( dev_t, dev ) 215 __field( u64, glnum ) 216 __field( u32, gltype ) 217 __field( int, queue ) 218 __field( u8, state ) 219 ), 220 221 TP_fast_assign( 222 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev; 223 __entry->glnum = gh->gh_gl->gl_name.ln_number; 224 __entry->gltype = gh->gh_gl->gl_name.ln_type; 225 __entry->queue = queue; 226 __entry->state = glock_trace_state(gh->gh_state); 227 ), 228 229 TP_printk("%u,%u glock %u:%llu %squeue %s", 230 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, 231 (unsigned long long)__entry->glnum, 232 __entry->queue ? "" : "de", 233 glock_trace_name(__entry->state)) 234 ); 235 236 /* Section 2 - Log/journal 237 * 238 * Objectives: 239 * Latency: Log flush time 240 * Correctness: pin/unpin vs. disk I/O ordering 241 * Performance: Log usage stats 242 */ 243 244 /* Pin/unpin a block in the log */ 245 TRACE_EVENT(gfs2_pin, 246 247 TP_PROTO(const struct gfs2_bufdata *bd, int pin), 248 249 TP_ARGS(bd, pin), 250 251 TP_STRUCT__entry( 252 __field( dev_t, dev ) 253 __field( int, pin ) 254 __field( u32, len ) 255 __field( sector_t, block ) 256 __field( u64, ino ) 257 ), 258 259 TP_fast_assign( 260 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev; 261 __entry->pin = pin; 262 __entry->len = bd->bd_bh->b_size; 263 __entry->block = bd->bd_bh->b_blocknr; 264 __entry->ino = bd->bd_gl->gl_name.ln_number; 265 ), 266 267 TP_printk("%u,%u log %s %llu/%lu inode %llu", 268 MAJOR(__entry->dev), MINOR(__entry->dev), 269 __entry->pin ? "pin" : "unpin", 270 (unsigned long long)__entry->block, 271 (unsigned long)__entry->len, 272 (unsigned long long)__entry->ino) 273 ); 274 275 /* Flushing the log */ 276 TRACE_EVENT(gfs2_log_flush, 277 278 TP_PROTO(const struct gfs2_sbd *sdp, int start), 279 280 TP_ARGS(sdp, start), 281 282 TP_STRUCT__entry( 283 __field( dev_t, dev ) 284 __field( int, start ) 285 __field( u64, log_seq ) 286 ), 287 288 TP_fast_assign( 289 __entry->dev = sdp->sd_vfs->s_dev; 290 __entry->start = start; 291 __entry->log_seq = sdp->sd_log_sequence; 292 ), 293 294 TP_printk("%u,%u log flush %s %llu", 295 MAJOR(__entry->dev), MINOR(__entry->dev), 296 __entry->start ? "start" : "end", 297 (unsigned long long)__entry->log_seq) 298 ); 299 300 /* Reserving/releasing blocks in the log */ 301 TRACE_EVENT(gfs2_log_blocks, 302 303 TP_PROTO(const struct gfs2_sbd *sdp, int blocks), 304 305 TP_ARGS(sdp, blocks), 306 307 TP_STRUCT__entry( 308 __field( dev_t, dev ) 309 __field( int, blocks ) 310 ), 311 312 TP_fast_assign( 313 __entry->dev = sdp->sd_vfs->s_dev; 314 __entry->blocks = blocks; 315 ), 316 317 TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev), 318 MINOR(__entry->dev), __entry->blocks) 319 ); 320 321 /* Section 3 - bmap 322 * 323 * Objectives: 324 * Latency: Bmap request time 325 * Performance: Block allocator tracing 326 * Correctness: Test of disard generation vs. blocks allocated 327 */ 328 329 /* Map an extent of blocks, possibly a new allocation */ 330 TRACE_EVENT(gfs2_bmap, 331 332 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh, 333 sector_t lblock, int create, int errno), 334 335 TP_ARGS(ip, bh, lblock, create, errno), 336 337 TP_STRUCT__entry( 338 __field( dev_t, dev ) 339 __field( sector_t, lblock ) 340 __field( sector_t, pblock ) 341 __field( u64, inum ) 342 __field( unsigned long, state ) 343 __field( u32, len ) 344 __field( int, create ) 345 __field( int, errno ) 346 ), 347 348 TP_fast_assign( 349 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev; 350 __entry->lblock = lblock; 351 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0; 352 __entry->inum = ip->i_no_addr; 353 __entry->state = bh->b_state; 354 __entry->len = bh->b_size; 355 __entry->create = create; 356 __entry->errno = errno; 357 ), 358 359 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d", 360 MAJOR(__entry->dev), MINOR(__entry->dev), 361 (unsigned long long)__entry->inum, 362 (unsigned long long)__entry->lblock, 363 (unsigned long)__entry->len, 364 (unsigned long long)__entry->pblock, 365 __entry->state, __entry->create ? "create " : "nocreate", 366 __entry->errno) 367 ); 368 369 /* Keep track of blocks as they are allocated/freed */ 370 TRACE_EVENT(gfs2_block_alloc, 371 372 TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len, 373 u8 block_state), 374 375 TP_ARGS(ip, block, len, block_state), 376 377 TP_STRUCT__entry( 378 __field( dev_t, dev ) 379 __field( u64, start ) 380 __field( u64, inum ) 381 __field( u32, len ) 382 __field( u8, block_state ) 383 ), 384 385 TP_fast_assign( 386 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev; 387 __entry->start = block; 388 __entry->inum = ip->i_no_addr; 389 __entry->len = len; 390 __entry->block_state = block_state; 391 ), 392 393 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s", 394 MAJOR(__entry->dev), MINOR(__entry->dev), 395 (unsigned long long)__entry->inum, 396 (unsigned long long)__entry->start, 397 (unsigned long)__entry->len, 398 block_state_name(__entry->block_state)) 399 ); 400 401 #endif /* _TRACE_GFS2_H */ 402 403 /* This part must be outside protection */ 404 #undef TRACE_INCLUDE_PATH 405 #define TRACE_INCLUDE_PATH . 406 #define TRACE_INCLUDE_FILE trace_gfs2 407 #include <trace/define_trace.h> 408 409