1 #undef TRACE_SYSTEM 2 #define TRACE_SYSTEM writeback 3 4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) 5 #define _TRACE_WRITEBACK_H 6 7 #include <linux/tracepoint.h> 8 #include <linux/backing-dev.h> 9 #include <linux/writeback.h> 10 11 #define show_inode_state(state) \ 12 __print_flags(state, "|", \ 13 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ 14 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ 15 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ 16 {I_NEW, "I_NEW"}, \ 17 {I_WILL_FREE, "I_WILL_FREE"}, \ 18 {I_FREEING, "I_FREEING"}, \ 19 {I_CLEAR, "I_CLEAR"}, \ 20 {I_SYNC, "I_SYNC"}, \ 21 {I_REFERENCED, "I_REFERENCED"} \ 22 ) 23 24 #define WB_WORK_REASON \ 25 {WB_REASON_BACKGROUND, "background"}, \ 26 {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \ 27 {WB_REASON_SYNC, "sync"}, \ 28 {WB_REASON_PERIODIC, "periodic"}, \ 29 {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \ 30 {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \ 31 {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \ 32 {WB_REASON_FORKER_THREAD, "forker_thread"} 33 34 struct wb_writeback_work; 35 36 TRACE_EVENT(writeback_dirty_page, 37 38 TP_PROTO(struct page *page, struct address_space *mapping), 39 40 TP_ARGS(page, mapping), 41 42 TP_STRUCT__entry ( 43 __array(char, name, 32) 44 __field(unsigned long, ino) 45 __field(pgoff_t, index) 46 ), 47 48 TP_fast_assign( 49 strncpy(__entry->name, 50 mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32); 51 __entry->ino = mapping ? mapping->host->i_ino : 0; 52 __entry->index = page->index; 53 ), 54 55 TP_printk("bdi %s: ino=%lu index=%lu", 56 __entry->name, 57 __entry->ino, 58 __entry->index 59 ) 60 ); 61 62 DECLARE_EVENT_CLASS(writeback_dirty_inode_template, 63 64 TP_PROTO(struct inode *inode, int flags), 65 66 TP_ARGS(inode, flags), 67 68 TP_STRUCT__entry ( 69 __array(char, name, 32) 70 __field(unsigned long, ino) 71 __field(unsigned long, flags) 72 ), 73 74 TP_fast_assign( 75 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 76 77 /* may be called for files on pseudo FSes w/ unregistered bdi */ 78 strncpy(__entry->name, 79 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); 80 __entry->ino = inode->i_ino; 81 __entry->flags = flags; 82 ), 83 84 TP_printk("bdi %s: ino=%lu flags=%s", 85 __entry->name, 86 __entry->ino, 87 show_inode_state(__entry->flags) 88 ) 89 ); 90 91 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, 92 93 TP_PROTO(struct inode *inode, int flags), 94 95 TP_ARGS(inode, flags) 96 ); 97 98 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, 99 100 TP_PROTO(struct inode *inode, int flags), 101 102 TP_ARGS(inode, flags) 103 ); 104 105 DECLARE_EVENT_CLASS(writeback_write_inode_template, 106 107 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 108 109 TP_ARGS(inode, wbc), 110 111 TP_STRUCT__entry ( 112 __array(char, name, 32) 113 __field(unsigned long, ino) 114 __field(int, sync_mode) 115 ), 116 117 TP_fast_assign( 118 strncpy(__entry->name, 119 dev_name(inode->i_mapping->backing_dev_info->dev), 32); 120 __entry->ino = inode->i_ino; 121 __entry->sync_mode = wbc->sync_mode; 122 ), 123 124 TP_printk("bdi %s: ino=%lu sync_mode=%d", 125 __entry->name, 126 __entry->ino, 127 __entry->sync_mode 128 ) 129 ); 130 131 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, 132 133 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 134 135 TP_ARGS(inode, wbc) 136 ); 137 138 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, 139 140 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 141 142 TP_ARGS(inode, wbc) 143 ); 144 145 DECLARE_EVENT_CLASS(writeback_work_class, 146 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), 147 TP_ARGS(bdi, work), 148 TP_STRUCT__entry( 149 __array(char, name, 32) 150 __field(long, nr_pages) 151 __field(dev_t, sb_dev) 152 __field(int, sync_mode) 153 __field(int, for_kupdate) 154 __field(int, range_cyclic) 155 __field(int, for_background) 156 __field(int, reason) 157 ), 158 TP_fast_assign( 159 struct device *dev = bdi->dev; 160 if (!dev) 161 dev = default_backing_dev_info.dev; 162 strncpy(__entry->name, dev_name(dev), 32); 163 __entry->nr_pages = work->nr_pages; 164 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 165 __entry->sync_mode = work->sync_mode; 166 __entry->for_kupdate = work->for_kupdate; 167 __entry->range_cyclic = work->range_cyclic; 168 __entry->for_background = work->for_background; 169 __entry->reason = work->reason; 170 ), 171 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " 172 "kupdate=%d range_cyclic=%d background=%d reason=%s", 173 __entry->name, 174 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), 175 __entry->nr_pages, 176 __entry->sync_mode, 177 __entry->for_kupdate, 178 __entry->range_cyclic, 179 __entry->for_background, 180 __print_symbolic(__entry->reason, WB_WORK_REASON) 181 ) 182 ); 183 #define DEFINE_WRITEBACK_WORK_EVENT(name) \ 184 DEFINE_EVENT(writeback_work_class, name, \ 185 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \ 186 TP_ARGS(bdi, work)) 187 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); 188 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); 189 DEFINE_WRITEBACK_WORK_EVENT(writeback_start); 190 DEFINE_WRITEBACK_WORK_EVENT(writeback_written); 191 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait); 192 193 TRACE_EVENT(writeback_pages_written, 194 TP_PROTO(long pages_written), 195 TP_ARGS(pages_written), 196 TP_STRUCT__entry( 197 __field(long, pages) 198 ), 199 TP_fast_assign( 200 __entry->pages = pages_written; 201 ), 202 TP_printk("%ld", __entry->pages) 203 ); 204 205 DECLARE_EVENT_CLASS(writeback_class, 206 TP_PROTO(struct backing_dev_info *bdi), 207 TP_ARGS(bdi), 208 TP_STRUCT__entry( 209 __array(char, name, 32) 210 ), 211 TP_fast_assign( 212 strncpy(__entry->name, dev_name(bdi->dev), 32); 213 ), 214 TP_printk("bdi %s", 215 __entry->name 216 ) 217 ); 218 #define DEFINE_WRITEBACK_EVENT(name) \ 219 DEFINE_EVENT(writeback_class, name, \ 220 TP_PROTO(struct backing_dev_info *bdi), \ 221 TP_ARGS(bdi)) 222 223 DEFINE_WRITEBACK_EVENT(writeback_nowork); 224 DEFINE_WRITEBACK_EVENT(writeback_wake_background); 225 DEFINE_WRITEBACK_EVENT(writeback_bdi_register); 226 DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); 227 228 DECLARE_EVENT_CLASS(wbc_class, 229 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), 230 TP_ARGS(wbc, bdi), 231 TP_STRUCT__entry( 232 __array(char, name, 32) 233 __field(long, nr_to_write) 234 __field(long, pages_skipped) 235 __field(int, sync_mode) 236 __field(int, for_kupdate) 237 __field(int, for_background) 238 __field(int, for_reclaim) 239 __field(int, range_cyclic) 240 __field(long, range_start) 241 __field(long, range_end) 242 ), 243 244 TP_fast_assign( 245 strncpy(__entry->name, dev_name(bdi->dev), 32); 246 __entry->nr_to_write = wbc->nr_to_write; 247 __entry->pages_skipped = wbc->pages_skipped; 248 __entry->sync_mode = wbc->sync_mode; 249 __entry->for_kupdate = wbc->for_kupdate; 250 __entry->for_background = wbc->for_background; 251 __entry->for_reclaim = wbc->for_reclaim; 252 __entry->range_cyclic = wbc->range_cyclic; 253 __entry->range_start = (long)wbc->range_start; 254 __entry->range_end = (long)wbc->range_end; 255 ), 256 257 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " 258 "bgrd=%d reclm=%d cyclic=%d " 259 "start=0x%lx end=0x%lx", 260 __entry->name, 261 __entry->nr_to_write, 262 __entry->pages_skipped, 263 __entry->sync_mode, 264 __entry->for_kupdate, 265 __entry->for_background, 266 __entry->for_reclaim, 267 __entry->range_cyclic, 268 __entry->range_start, 269 __entry->range_end) 270 ) 271 272 #define DEFINE_WBC_EVENT(name) \ 273 DEFINE_EVENT(wbc_class, name, \ 274 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ 275 TP_ARGS(wbc, bdi)) 276 DEFINE_WBC_EVENT(wbc_writepage); 277 278 TRACE_EVENT(writeback_queue_io, 279 TP_PROTO(struct bdi_writeback *wb, 280 struct wb_writeback_work *work, 281 int moved), 282 TP_ARGS(wb, work, moved), 283 TP_STRUCT__entry( 284 __array(char, name, 32) 285 __field(unsigned long, older) 286 __field(long, age) 287 __field(int, moved) 288 __field(int, reason) 289 ), 290 TP_fast_assign( 291 unsigned long *older_than_this = work->older_than_this; 292 strncpy(__entry->name, dev_name(wb->bdi->dev), 32); 293 __entry->older = older_than_this ? *older_than_this : 0; 294 __entry->age = older_than_this ? 295 (jiffies - *older_than_this) * 1000 / HZ : -1; 296 __entry->moved = moved; 297 __entry->reason = work->reason; 298 ), 299 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s", 300 __entry->name, 301 __entry->older, /* older_than_this in jiffies */ 302 __entry->age, /* older_than_this in relative milliseconds */ 303 __entry->moved, 304 __print_symbolic(__entry->reason, WB_WORK_REASON) 305 ) 306 ); 307 308 TRACE_EVENT(global_dirty_state, 309 310 TP_PROTO(unsigned long background_thresh, 311 unsigned long dirty_thresh 312 ), 313 314 TP_ARGS(background_thresh, 315 dirty_thresh 316 ), 317 318 TP_STRUCT__entry( 319 __field(unsigned long, nr_dirty) 320 __field(unsigned long, nr_writeback) 321 __field(unsigned long, nr_unstable) 322 __field(unsigned long, background_thresh) 323 __field(unsigned long, dirty_thresh) 324 __field(unsigned long, dirty_limit) 325 __field(unsigned long, nr_dirtied) 326 __field(unsigned long, nr_written) 327 ), 328 329 TP_fast_assign( 330 __entry->nr_dirty = global_page_state(NR_FILE_DIRTY); 331 __entry->nr_writeback = global_page_state(NR_WRITEBACK); 332 __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS); 333 __entry->nr_dirtied = global_page_state(NR_DIRTIED); 334 __entry->nr_written = global_page_state(NR_WRITTEN); 335 __entry->background_thresh = background_thresh; 336 __entry->dirty_thresh = dirty_thresh; 337 __entry->dirty_limit = global_dirty_limit; 338 ), 339 340 TP_printk("dirty=%lu writeback=%lu unstable=%lu " 341 "bg_thresh=%lu thresh=%lu limit=%lu " 342 "dirtied=%lu written=%lu", 343 __entry->nr_dirty, 344 __entry->nr_writeback, 345 __entry->nr_unstable, 346 __entry->background_thresh, 347 __entry->dirty_thresh, 348 __entry->dirty_limit, 349 __entry->nr_dirtied, 350 __entry->nr_written 351 ) 352 ); 353 354 #define KBps(x) ((x) << (PAGE_SHIFT - 10)) 355 356 TRACE_EVENT(bdi_dirty_ratelimit, 357 358 TP_PROTO(struct backing_dev_info *bdi, 359 unsigned long dirty_rate, 360 unsigned long task_ratelimit), 361 362 TP_ARGS(bdi, dirty_rate, task_ratelimit), 363 364 TP_STRUCT__entry( 365 __array(char, bdi, 32) 366 __field(unsigned long, write_bw) 367 __field(unsigned long, avg_write_bw) 368 __field(unsigned long, dirty_rate) 369 __field(unsigned long, dirty_ratelimit) 370 __field(unsigned long, task_ratelimit) 371 __field(unsigned long, balanced_dirty_ratelimit) 372 ), 373 374 TP_fast_assign( 375 strlcpy(__entry->bdi, dev_name(bdi->dev), 32); 376 __entry->write_bw = KBps(bdi->write_bandwidth); 377 __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth); 378 __entry->dirty_rate = KBps(dirty_rate); 379 __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit); 380 __entry->task_ratelimit = KBps(task_ratelimit); 381 __entry->balanced_dirty_ratelimit = 382 KBps(bdi->balanced_dirty_ratelimit); 383 ), 384 385 TP_printk("bdi %s: " 386 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " 387 "dirty_ratelimit=%lu task_ratelimit=%lu " 388 "balanced_dirty_ratelimit=%lu", 389 __entry->bdi, 390 __entry->write_bw, /* write bandwidth */ 391 __entry->avg_write_bw, /* avg write bandwidth */ 392 __entry->dirty_rate, /* bdi dirty rate */ 393 __entry->dirty_ratelimit, /* base ratelimit */ 394 __entry->task_ratelimit, /* ratelimit with position control */ 395 __entry->balanced_dirty_ratelimit /* the balanced ratelimit */ 396 ) 397 ); 398 399 TRACE_EVENT(balance_dirty_pages, 400 401 TP_PROTO(struct backing_dev_info *bdi, 402 unsigned long thresh, 403 unsigned long bg_thresh, 404 unsigned long dirty, 405 unsigned long bdi_thresh, 406 unsigned long bdi_dirty, 407 unsigned long dirty_ratelimit, 408 unsigned long task_ratelimit, 409 unsigned long dirtied, 410 unsigned long period, 411 long pause, 412 unsigned long start_time), 413 414 TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, 415 dirty_ratelimit, task_ratelimit, 416 dirtied, period, pause, start_time), 417 418 TP_STRUCT__entry( 419 __array( char, bdi, 32) 420 __field(unsigned long, limit) 421 __field(unsigned long, setpoint) 422 __field(unsigned long, dirty) 423 __field(unsigned long, bdi_setpoint) 424 __field(unsigned long, bdi_dirty) 425 __field(unsigned long, dirty_ratelimit) 426 __field(unsigned long, task_ratelimit) 427 __field(unsigned int, dirtied) 428 __field(unsigned int, dirtied_pause) 429 __field(unsigned long, paused) 430 __field( long, pause) 431 __field(unsigned long, period) 432 __field( long, think) 433 ), 434 435 TP_fast_assign( 436 unsigned long freerun = (thresh + bg_thresh) / 2; 437 strlcpy(__entry->bdi, dev_name(bdi->dev), 32); 438 439 __entry->limit = global_dirty_limit; 440 __entry->setpoint = (global_dirty_limit + freerun) / 2; 441 __entry->dirty = dirty; 442 __entry->bdi_setpoint = __entry->setpoint * 443 bdi_thresh / (thresh + 1); 444 __entry->bdi_dirty = bdi_dirty; 445 __entry->dirty_ratelimit = KBps(dirty_ratelimit); 446 __entry->task_ratelimit = KBps(task_ratelimit); 447 __entry->dirtied = dirtied; 448 __entry->dirtied_pause = current->nr_dirtied_pause; 449 __entry->think = current->dirty_paused_when == 0 ? 0 : 450 (long)(jiffies - current->dirty_paused_when) * 1000/HZ; 451 __entry->period = period * 1000 / HZ; 452 __entry->pause = pause * 1000 / HZ; 453 __entry->paused = (jiffies - start_time) * 1000 / HZ; 454 ), 455 456 457 TP_printk("bdi %s: " 458 "limit=%lu setpoint=%lu dirty=%lu " 459 "bdi_setpoint=%lu bdi_dirty=%lu " 460 "dirty_ratelimit=%lu task_ratelimit=%lu " 461 "dirtied=%u dirtied_pause=%u " 462 "paused=%lu pause=%ld period=%lu think=%ld", 463 __entry->bdi, 464 __entry->limit, 465 __entry->setpoint, 466 __entry->dirty, 467 __entry->bdi_setpoint, 468 __entry->bdi_dirty, 469 __entry->dirty_ratelimit, 470 __entry->task_ratelimit, 471 __entry->dirtied, 472 __entry->dirtied_pause, 473 __entry->paused, /* ms */ 474 __entry->pause, /* ms */ 475 __entry->period, /* ms */ 476 __entry->think /* ms */ 477 ) 478 ); 479 480 TRACE_EVENT(writeback_sb_inodes_requeue, 481 482 TP_PROTO(struct inode *inode), 483 TP_ARGS(inode), 484 485 TP_STRUCT__entry( 486 __array(char, name, 32) 487 __field(unsigned long, ino) 488 __field(unsigned long, state) 489 __field(unsigned long, dirtied_when) 490 ), 491 492 TP_fast_assign( 493 strncpy(__entry->name, 494 dev_name(inode_to_bdi(inode)->dev), 32); 495 __entry->ino = inode->i_ino; 496 __entry->state = inode->i_state; 497 __entry->dirtied_when = inode->dirtied_when; 498 ), 499 500 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu", 501 __entry->name, 502 __entry->ino, 503 show_inode_state(__entry->state), 504 __entry->dirtied_when, 505 (jiffies - __entry->dirtied_when) / HZ 506 ) 507 ); 508 509 DECLARE_EVENT_CLASS(writeback_congest_waited_template, 510 511 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 512 513 TP_ARGS(usec_timeout, usec_delayed), 514 515 TP_STRUCT__entry( 516 __field( unsigned int, usec_timeout ) 517 __field( unsigned int, usec_delayed ) 518 ), 519 520 TP_fast_assign( 521 __entry->usec_timeout = usec_timeout; 522 __entry->usec_delayed = usec_delayed; 523 ), 524 525 TP_printk("usec_timeout=%u usec_delayed=%u", 526 __entry->usec_timeout, 527 __entry->usec_delayed) 528 ); 529 530 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait, 531 532 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 533 534 TP_ARGS(usec_timeout, usec_delayed) 535 ); 536 537 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, 538 539 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 540 541 TP_ARGS(usec_timeout, usec_delayed) 542 ); 543 544 DECLARE_EVENT_CLASS(writeback_single_inode_template, 545 546 TP_PROTO(struct inode *inode, 547 struct writeback_control *wbc, 548 unsigned long nr_to_write 549 ), 550 551 TP_ARGS(inode, wbc, nr_to_write), 552 553 TP_STRUCT__entry( 554 __array(char, name, 32) 555 __field(unsigned long, ino) 556 __field(unsigned long, state) 557 __field(unsigned long, dirtied_when) 558 __field(unsigned long, writeback_index) 559 __field(long, nr_to_write) 560 __field(unsigned long, wrote) 561 ), 562 563 TP_fast_assign( 564 strncpy(__entry->name, 565 dev_name(inode_to_bdi(inode)->dev), 32); 566 __entry->ino = inode->i_ino; 567 __entry->state = inode->i_state; 568 __entry->dirtied_when = inode->dirtied_when; 569 __entry->writeback_index = inode->i_mapping->writeback_index; 570 __entry->nr_to_write = nr_to_write; 571 __entry->wrote = nr_to_write - wbc->nr_to_write; 572 ), 573 574 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " 575 "index=%lu to_write=%ld wrote=%lu", 576 __entry->name, 577 __entry->ino, 578 show_inode_state(__entry->state), 579 __entry->dirtied_when, 580 (jiffies - __entry->dirtied_when) / HZ, 581 __entry->writeback_index, 582 __entry->nr_to_write, 583 __entry->wrote 584 ) 585 ); 586 587 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, 588 TP_PROTO(struct inode *inode, 589 struct writeback_control *wbc, 590 unsigned long nr_to_write), 591 TP_ARGS(inode, wbc, nr_to_write) 592 ); 593 594 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, 595 TP_PROTO(struct inode *inode, 596 struct writeback_control *wbc, 597 unsigned long nr_to_write), 598 TP_ARGS(inode, wbc, nr_to_write) 599 ); 600 601 #endif /* _TRACE_WRITEBACK_H */ 602 603 /* This part must be outside protection */ 604 #include <trace/define_trace.h> 605