1 #undef TRACE_SYSTEM 2 #define TRACE_SYSTEM writeback 3 4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) 5 #define _TRACE_WRITEBACK_H 6 7 #include <linux/tracepoint.h> 8 #include <linux/backing-dev.h> 9 #include <linux/writeback.h> 10 11 #define show_inode_state(state) \ 12 __print_flags(state, "|", \ 13 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ 14 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ 15 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ 16 {I_NEW, "I_NEW"}, \ 17 {I_WILL_FREE, "I_WILL_FREE"}, \ 18 {I_FREEING, "I_FREEING"}, \ 19 {I_CLEAR, "I_CLEAR"}, \ 20 {I_SYNC, "I_SYNC"}, \ 21 {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ 22 {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ 23 {I_REFERENCED, "I_REFERENCED"} \ 24 ) 25 26 /* enums need to be exported to user space */ 27 #undef EM 28 #undef EMe 29 #define EM(a,b) TRACE_DEFINE_ENUM(a); 30 #define EMe(a,b) TRACE_DEFINE_ENUM(a); 31 32 #define WB_WORK_REASON \ 33 EM( WB_REASON_BACKGROUND, "background") \ 34 EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \ 35 EM( WB_REASON_SYNC, "sync") \ 36 EM( WB_REASON_PERIODIC, "periodic") \ 37 EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ 38 EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \ 39 EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ 40 EMe(WB_REASON_FORKER_THREAD, "forker_thread") 41 42 WB_WORK_REASON 43 44 /* 45 * Now redefine the EM() and EMe() macros to map the enums to the strings 46 * that will be printed in the output. 47 */ 48 #undef EM 49 #undef EMe 50 #define EM(a,b) { a, b }, 51 #define EMe(a,b) { a, b } 52 53 struct wb_writeback_work; 54 55 TRACE_EVENT(writeback_dirty_page, 56 57 TP_PROTO(struct page *page, struct address_space *mapping), 58 59 TP_ARGS(page, mapping), 60 61 TP_STRUCT__entry ( 62 __array(char, name, 32) 63 __field(unsigned long, ino) 64 __field(pgoff_t, index) 65 ), 66 67 TP_fast_assign( 68 strncpy(__entry->name, 69 mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32); 70 __entry->ino = mapping ? mapping->host->i_ino : 0; 71 __entry->index = page->index; 72 ), 73 74 TP_printk("bdi %s: ino=%lu index=%lu", 75 __entry->name, 76 __entry->ino, 77 __entry->index 78 ) 79 ); 80 81 DECLARE_EVENT_CLASS(writeback_dirty_inode_template, 82 83 TP_PROTO(struct inode *inode, int flags), 84 85 TP_ARGS(inode, flags), 86 87 TP_STRUCT__entry ( 88 __array(char, name, 32) 89 __field(unsigned long, ino) 90 __field(unsigned long, state) 91 __field(unsigned long, flags) 92 ), 93 94 TP_fast_assign( 95 struct backing_dev_info *bdi = inode_to_bdi(inode); 96 97 /* may be called for files on pseudo FSes w/ unregistered bdi */ 98 strncpy(__entry->name, 99 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); 100 __entry->ino = inode->i_ino; 101 __entry->state = inode->i_state; 102 __entry->flags = flags; 103 ), 104 105 TP_printk("bdi %s: ino=%lu state=%s flags=%s", 106 __entry->name, 107 __entry->ino, 108 show_inode_state(__entry->state), 109 show_inode_state(__entry->flags) 110 ) 111 ); 112 113 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty, 114 115 TP_PROTO(struct inode *inode, int flags), 116 117 TP_ARGS(inode, flags) 118 ); 119 120 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, 121 122 TP_PROTO(struct inode *inode, int flags), 123 124 TP_ARGS(inode, flags) 125 ); 126 127 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, 128 129 TP_PROTO(struct inode *inode, int flags), 130 131 TP_ARGS(inode, flags) 132 ); 133 134 DECLARE_EVENT_CLASS(writeback_write_inode_template, 135 136 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 137 138 TP_ARGS(inode, wbc), 139 140 TP_STRUCT__entry ( 141 __array(char, name, 32) 142 __field(unsigned long, ino) 143 __field(int, sync_mode) 144 ), 145 146 TP_fast_assign( 147 strncpy(__entry->name, 148 dev_name(inode_to_bdi(inode)->dev), 32); 149 __entry->ino = inode->i_ino; 150 __entry->sync_mode = wbc->sync_mode; 151 ), 152 153 TP_printk("bdi %s: ino=%lu sync_mode=%d", 154 __entry->name, 155 __entry->ino, 156 __entry->sync_mode 157 ) 158 ); 159 160 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, 161 162 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 163 164 TP_ARGS(inode, wbc) 165 ); 166 167 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, 168 169 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 170 171 TP_ARGS(inode, wbc) 172 ); 173 174 DECLARE_EVENT_CLASS(writeback_work_class, 175 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), 176 TP_ARGS(bdi, work), 177 TP_STRUCT__entry( 178 __array(char, name, 32) 179 __field(long, nr_pages) 180 __field(dev_t, sb_dev) 181 __field(int, sync_mode) 182 __field(int, for_kupdate) 183 __field(int, range_cyclic) 184 __field(int, for_background) 185 __field(int, reason) 186 ), 187 TP_fast_assign( 188 strncpy(__entry->name, 189 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); 190 __entry->nr_pages = work->nr_pages; 191 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 192 __entry->sync_mode = work->sync_mode; 193 __entry->for_kupdate = work->for_kupdate; 194 __entry->range_cyclic = work->range_cyclic; 195 __entry->for_background = work->for_background; 196 __entry->reason = work->reason; 197 ), 198 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " 199 "kupdate=%d range_cyclic=%d background=%d reason=%s", 200 __entry->name, 201 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), 202 __entry->nr_pages, 203 __entry->sync_mode, 204 __entry->for_kupdate, 205 __entry->range_cyclic, 206 __entry->for_background, 207 __print_symbolic(__entry->reason, WB_WORK_REASON) 208 ) 209 ); 210 #define DEFINE_WRITEBACK_WORK_EVENT(name) \ 211 DEFINE_EVENT(writeback_work_class, name, \ 212 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \ 213 TP_ARGS(bdi, work)) 214 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); 215 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); 216 DEFINE_WRITEBACK_WORK_EVENT(writeback_start); 217 DEFINE_WRITEBACK_WORK_EVENT(writeback_written); 218 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait); 219 220 TRACE_EVENT(writeback_pages_written, 221 TP_PROTO(long pages_written), 222 TP_ARGS(pages_written), 223 TP_STRUCT__entry( 224 __field(long, pages) 225 ), 226 TP_fast_assign( 227 __entry->pages = pages_written; 228 ), 229 TP_printk("%ld", __entry->pages) 230 ); 231 232 DECLARE_EVENT_CLASS(writeback_class, 233 TP_PROTO(struct backing_dev_info *bdi), 234 TP_ARGS(bdi), 235 TP_STRUCT__entry( 236 __array(char, name, 32) 237 ), 238 TP_fast_assign( 239 strncpy(__entry->name, dev_name(bdi->dev), 32); 240 ), 241 TP_printk("bdi %s", 242 __entry->name 243 ) 244 ); 245 #define DEFINE_WRITEBACK_EVENT(name) \ 246 DEFINE_EVENT(writeback_class, name, \ 247 TP_PROTO(struct backing_dev_info *bdi), \ 248 TP_ARGS(bdi)) 249 250 DEFINE_WRITEBACK_EVENT(writeback_nowork); 251 DEFINE_WRITEBACK_EVENT(writeback_wake_background); 252 DEFINE_WRITEBACK_EVENT(writeback_bdi_register); 253 DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); 254 255 DECLARE_EVENT_CLASS(wbc_class, 256 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), 257 TP_ARGS(wbc, bdi), 258 TP_STRUCT__entry( 259 __array(char, name, 32) 260 __field(long, nr_to_write) 261 __field(long, pages_skipped) 262 __field(int, sync_mode) 263 __field(int, for_kupdate) 264 __field(int, for_background) 265 __field(int, for_reclaim) 266 __field(int, range_cyclic) 267 __field(long, range_start) 268 __field(long, range_end) 269 ), 270 271 TP_fast_assign( 272 strncpy(__entry->name, dev_name(bdi->dev), 32); 273 __entry->nr_to_write = wbc->nr_to_write; 274 __entry->pages_skipped = wbc->pages_skipped; 275 __entry->sync_mode = wbc->sync_mode; 276 __entry->for_kupdate = wbc->for_kupdate; 277 __entry->for_background = wbc->for_background; 278 __entry->for_reclaim = wbc->for_reclaim; 279 __entry->range_cyclic = wbc->range_cyclic; 280 __entry->range_start = (long)wbc->range_start; 281 __entry->range_end = (long)wbc->range_end; 282 ), 283 284 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " 285 "bgrd=%d reclm=%d cyclic=%d " 286 "start=0x%lx end=0x%lx", 287 __entry->name, 288 __entry->nr_to_write, 289 __entry->pages_skipped, 290 __entry->sync_mode, 291 __entry->for_kupdate, 292 __entry->for_background, 293 __entry->for_reclaim, 294 __entry->range_cyclic, 295 __entry->range_start, 296 __entry->range_end) 297 ) 298 299 #define DEFINE_WBC_EVENT(name) \ 300 DEFINE_EVENT(wbc_class, name, \ 301 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ 302 TP_ARGS(wbc, bdi)) 303 DEFINE_WBC_EVENT(wbc_writepage); 304 305 TRACE_EVENT(writeback_queue_io, 306 TP_PROTO(struct bdi_writeback *wb, 307 struct wb_writeback_work *work, 308 int moved), 309 TP_ARGS(wb, work, moved), 310 TP_STRUCT__entry( 311 __array(char, name, 32) 312 __field(unsigned long, older) 313 __field(long, age) 314 __field(int, moved) 315 __field(int, reason) 316 ), 317 TP_fast_assign( 318 unsigned long *older_than_this = work->older_than_this; 319 strncpy(__entry->name, dev_name(wb->bdi->dev), 32); 320 __entry->older = older_than_this ? *older_than_this : 0; 321 __entry->age = older_than_this ? 322 (jiffies - *older_than_this) * 1000 / HZ : -1; 323 __entry->moved = moved; 324 __entry->reason = work->reason; 325 ), 326 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s", 327 __entry->name, 328 __entry->older, /* older_than_this in jiffies */ 329 __entry->age, /* older_than_this in relative milliseconds */ 330 __entry->moved, 331 __print_symbolic(__entry->reason, WB_WORK_REASON) 332 ) 333 ); 334 335 TRACE_EVENT(global_dirty_state, 336 337 TP_PROTO(unsigned long background_thresh, 338 unsigned long dirty_thresh 339 ), 340 341 TP_ARGS(background_thresh, 342 dirty_thresh 343 ), 344 345 TP_STRUCT__entry( 346 __field(unsigned long, nr_dirty) 347 __field(unsigned long, nr_writeback) 348 __field(unsigned long, nr_unstable) 349 __field(unsigned long, background_thresh) 350 __field(unsigned long, dirty_thresh) 351 __field(unsigned long, dirty_limit) 352 __field(unsigned long, nr_dirtied) 353 __field(unsigned long, nr_written) 354 ), 355 356 TP_fast_assign( 357 __entry->nr_dirty = global_page_state(NR_FILE_DIRTY); 358 __entry->nr_writeback = global_page_state(NR_WRITEBACK); 359 __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS); 360 __entry->nr_dirtied = global_page_state(NR_DIRTIED); 361 __entry->nr_written = global_page_state(NR_WRITTEN); 362 __entry->background_thresh = background_thresh; 363 __entry->dirty_thresh = dirty_thresh; 364 __entry->dirty_limit = global_dirty_limit; 365 ), 366 367 TP_printk("dirty=%lu writeback=%lu unstable=%lu " 368 "bg_thresh=%lu thresh=%lu limit=%lu " 369 "dirtied=%lu written=%lu", 370 __entry->nr_dirty, 371 __entry->nr_writeback, 372 __entry->nr_unstable, 373 __entry->background_thresh, 374 __entry->dirty_thresh, 375 __entry->dirty_limit, 376 __entry->nr_dirtied, 377 __entry->nr_written 378 ) 379 ); 380 381 #define KBps(x) ((x) << (PAGE_SHIFT - 10)) 382 383 TRACE_EVENT(bdi_dirty_ratelimit, 384 385 TP_PROTO(struct backing_dev_info *bdi, 386 unsigned long dirty_rate, 387 unsigned long task_ratelimit), 388 389 TP_ARGS(bdi, dirty_rate, task_ratelimit), 390 391 TP_STRUCT__entry( 392 __array(char, bdi, 32) 393 __field(unsigned long, write_bw) 394 __field(unsigned long, avg_write_bw) 395 __field(unsigned long, dirty_rate) 396 __field(unsigned long, dirty_ratelimit) 397 __field(unsigned long, task_ratelimit) 398 __field(unsigned long, balanced_dirty_ratelimit) 399 ), 400 401 TP_fast_assign( 402 strlcpy(__entry->bdi, dev_name(bdi->dev), 32); 403 __entry->write_bw = KBps(bdi->write_bandwidth); 404 __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth); 405 __entry->dirty_rate = KBps(dirty_rate); 406 __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit); 407 __entry->task_ratelimit = KBps(task_ratelimit); 408 __entry->balanced_dirty_ratelimit = 409 KBps(bdi->balanced_dirty_ratelimit); 410 ), 411 412 TP_printk("bdi %s: " 413 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " 414 "dirty_ratelimit=%lu task_ratelimit=%lu " 415 "balanced_dirty_ratelimit=%lu", 416 __entry->bdi, 417 __entry->write_bw, /* write bandwidth */ 418 __entry->avg_write_bw, /* avg write bandwidth */ 419 __entry->dirty_rate, /* bdi dirty rate */ 420 __entry->dirty_ratelimit, /* base ratelimit */ 421 __entry->task_ratelimit, /* ratelimit with position control */ 422 __entry->balanced_dirty_ratelimit /* the balanced ratelimit */ 423 ) 424 ); 425 426 TRACE_EVENT(balance_dirty_pages, 427 428 TP_PROTO(struct backing_dev_info *bdi, 429 unsigned long thresh, 430 unsigned long bg_thresh, 431 unsigned long dirty, 432 unsigned long bdi_thresh, 433 unsigned long bdi_dirty, 434 unsigned long dirty_ratelimit, 435 unsigned long task_ratelimit, 436 unsigned long dirtied, 437 unsigned long period, 438 long pause, 439 unsigned long start_time), 440 441 TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, 442 dirty_ratelimit, task_ratelimit, 443 dirtied, period, pause, start_time), 444 445 TP_STRUCT__entry( 446 __array( char, bdi, 32) 447 __field(unsigned long, limit) 448 __field(unsigned long, setpoint) 449 __field(unsigned long, dirty) 450 __field(unsigned long, bdi_setpoint) 451 __field(unsigned long, bdi_dirty) 452 __field(unsigned long, dirty_ratelimit) 453 __field(unsigned long, task_ratelimit) 454 __field(unsigned int, dirtied) 455 __field(unsigned int, dirtied_pause) 456 __field(unsigned long, paused) 457 __field( long, pause) 458 __field(unsigned long, period) 459 __field( long, think) 460 ), 461 462 TP_fast_assign( 463 unsigned long freerun = (thresh + bg_thresh) / 2; 464 strlcpy(__entry->bdi, dev_name(bdi->dev), 32); 465 466 __entry->limit = global_dirty_limit; 467 __entry->setpoint = (global_dirty_limit + freerun) / 2; 468 __entry->dirty = dirty; 469 __entry->bdi_setpoint = __entry->setpoint * 470 bdi_thresh / (thresh + 1); 471 __entry->bdi_dirty = bdi_dirty; 472 __entry->dirty_ratelimit = KBps(dirty_ratelimit); 473 __entry->task_ratelimit = KBps(task_ratelimit); 474 __entry->dirtied = dirtied; 475 __entry->dirtied_pause = current->nr_dirtied_pause; 476 __entry->think = current->dirty_paused_when == 0 ? 0 : 477 (long)(jiffies - current->dirty_paused_when) * 1000/HZ; 478 __entry->period = period * 1000 / HZ; 479 __entry->pause = pause * 1000 / HZ; 480 __entry->paused = (jiffies - start_time) * 1000 / HZ; 481 ), 482 483 484 TP_printk("bdi %s: " 485 "limit=%lu setpoint=%lu dirty=%lu " 486 "bdi_setpoint=%lu bdi_dirty=%lu " 487 "dirty_ratelimit=%lu task_ratelimit=%lu " 488 "dirtied=%u dirtied_pause=%u " 489 "paused=%lu pause=%ld period=%lu think=%ld", 490 __entry->bdi, 491 __entry->limit, 492 __entry->setpoint, 493 __entry->dirty, 494 __entry->bdi_setpoint, 495 __entry->bdi_dirty, 496 __entry->dirty_ratelimit, 497 __entry->task_ratelimit, 498 __entry->dirtied, 499 __entry->dirtied_pause, 500 __entry->paused, /* ms */ 501 __entry->pause, /* ms */ 502 __entry->period, /* ms */ 503 __entry->think /* ms */ 504 ) 505 ); 506 507 TRACE_EVENT(writeback_sb_inodes_requeue, 508 509 TP_PROTO(struct inode *inode), 510 TP_ARGS(inode), 511 512 TP_STRUCT__entry( 513 __array(char, name, 32) 514 __field(unsigned long, ino) 515 __field(unsigned long, state) 516 __field(unsigned long, dirtied_when) 517 ), 518 519 TP_fast_assign( 520 strncpy(__entry->name, 521 dev_name(inode_to_bdi(inode)->dev), 32); 522 __entry->ino = inode->i_ino; 523 __entry->state = inode->i_state; 524 __entry->dirtied_when = inode->dirtied_when; 525 ), 526 527 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu", 528 __entry->name, 529 __entry->ino, 530 show_inode_state(__entry->state), 531 __entry->dirtied_when, 532 (jiffies - __entry->dirtied_when) / HZ 533 ) 534 ); 535 536 DECLARE_EVENT_CLASS(writeback_congest_waited_template, 537 538 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 539 540 TP_ARGS(usec_timeout, usec_delayed), 541 542 TP_STRUCT__entry( 543 __field( unsigned int, usec_timeout ) 544 __field( unsigned int, usec_delayed ) 545 ), 546 547 TP_fast_assign( 548 __entry->usec_timeout = usec_timeout; 549 __entry->usec_delayed = usec_delayed; 550 ), 551 552 TP_printk("usec_timeout=%u usec_delayed=%u", 553 __entry->usec_timeout, 554 __entry->usec_delayed) 555 ); 556 557 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait, 558 559 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 560 561 TP_ARGS(usec_timeout, usec_delayed) 562 ); 563 564 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, 565 566 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 567 568 TP_ARGS(usec_timeout, usec_delayed) 569 ); 570 571 DECLARE_EVENT_CLASS(writeback_single_inode_template, 572 573 TP_PROTO(struct inode *inode, 574 struct writeback_control *wbc, 575 unsigned long nr_to_write 576 ), 577 578 TP_ARGS(inode, wbc, nr_to_write), 579 580 TP_STRUCT__entry( 581 __array(char, name, 32) 582 __field(unsigned long, ino) 583 __field(unsigned long, state) 584 __field(unsigned long, dirtied_when) 585 __field(unsigned long, writeback_index) 586 __field(long, nr_to_write) 587 __field(unsigned long, wrote) 588 ), 589 590 TP_fast_assign( 591 strncpy(__entry->name, 592 dev_name(inode_to_bdi(inode)->dev), 32); 593 __entry->ino = inode->i_ino; 594 __entry->state = inode->i_state; 595 __entry->dirtied_when = inode->dirtied_when; 596 __entry->writeback_index = inode->i_mapping->writeback_index; 597 __entry->nr_to_write = nr_to_write; 598 __entry->wrote = nr_to_write - wbc->nr_to_write; 599 ), 600 601 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " 602 "index=%lu to_write=%ld wrote=%lu", 603 __entry->name, 604 __entry->ino, 605 show_inode_state(__entry->state), 606 __entry->dirtied_when, 607 (jiffies - __entry->dirtied_when) / HZ, 608 __entry->writeback_index, 609 __entry->nr_to_write, 610 __entry->wrote 611 ) 612 ); 613 614 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, 615 TP_PROTO(struct inode *inode, 616 struct writeback_control *wbc, 617 unsigned long nr_to_write), 618 TP_ARGS(inode, wbc, nr_to_write) 619 ); 620 621 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, 622 TP_PROTO(struct inode *inode, 623 struct writeback_control *wbc, 624 unsigned long nr_to_write), 625 TP_ARGS(inode, wbc, nr_to_write) 626 ); 627 628 DECLARE_EVENT_CLASS(writeback_lazytime_template, 629 TP_PROTO(struct inode *inode), 630 631 TP_ARGS(inode), 632 633 TP_STRUCT__entry( 634 __field( dev_t, dev ) 635 __field(unsigned long, ino ) 636 __field(unsigned long, state ) 637 __field( __u16, mode ) 638 __field(unsigned long, dirtied_when ) 639 ), 640 641 TP_fast_assign( 642 __entry->dev = inode->i_sb->s_dev; 643 __entry->ino = inode->i_ino; 644 __entry->state = inode->i_state; 645 __entry->mode = inode->i_mode; 646 __entry->dirtied_when = inode->dirtied_when; 647 ), 648 649 TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o", 650 MAJOR(__entry->dev), MINOR(__entry->dev), 651 __entry->ino, __entry->dirtied_when, 652 show_inode_state(__entry->state), __entry->mode) 653 ); 654 655 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime, 656 TP_PROTO(struct inode *inode), 657 658 TP_ARGS(inode) 659 ); 660 661 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput, 662 TP_PROTO(struct inode *inode), 663 664 TP_ARGS(inode) 665 ); 666 667 DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue, 668 669 TP_PROTO(struct inode *inode), 670 671 TP_ARGS(inode) 672 ); 673 674 #endif /* _TRACE_WRITEBACK_H */ 675 676 /* This part must be outside protection */ 677 #include <trace/define_trace.h> 678