page-writeback.c (36715cef0770b7e2547892b7c3197fc024274630) | page-writeback.c (d46db3d58233be4be980eb1e42eebe7808bcabab) |
---|---|
1/* 2 * mm/page-writeback.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * 7 * Contains functions related to writing back dirty pages at the 8 * address_space level. --- 477 unchanged lines hidden (view full) --- 486 unsigned long dirty_thresh; 487 unsigned long bdi_thresh; 488 unsigned long pages_written = 0; 489 unsigned long pause = 1; 490 bool dirty_exceeded = false; 491 struct backing_dev_info *bdi = mapping->backing_dev_info; 492 493 for (;;) { | 1/* 2 * mm/page-writeback.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * 7 * Contains functions related to writing back dirty pages at the 8 * address_space level. --- 477 unchanged lines hidden (view full) --- 486 unsigned long dirty_thresh; 487 unsigned long bdi_thresh; 488 unsigned long pages_written = 0; 489 unsigned long pause = 1; 490 bool dirty_exceeded = false; 491 struct backing_dev_info *bdi = mapping->backing_dev_info; 492 493 for (;;) { |
494 struct writeback_control wbc = { 495 .sync_mode = WB_SYNC_NONE, 496 .older_than_this = NULL, 497 .nr_to_write = write_chunk, 498 .range_cyclic = 1, 499 }; 500 | |
501 nr_reclaimable = global_page_state(NR_FILE_DIRTY) + 502 global_page_state(NR_UNSTABLE_NFS); 503 nr_writeback = global_page_state(NR_WRITEBACK); 504 505 global_dirty_limits(&background_thresh, &dirty_thresh); 506 507 /* 508 * Throttle it only when the background writeback cannot --- 45 unchanged lines hidden (view full) --- 554 * Unstable writes are a feature of certain networked 555 * filesystems (i.e. NFS) in which data may have been 556 * written to the server's write cache, but has not yet 557 * been flushed to permanent storage. 558 * Only move pages to writeback if this bdi is over its 559 * threshold otherwise wait until the disk writes catch 560 * up. 561 */ | 494 nr_reclaimable = global_page_state(NR_FILE_DIRTY) + 495 global_page_state(NR_UNSTABLE_NFS); 496 nr_writeback = global_page_state(NR_WRITEBACK); 497 498 global_dirty_limits(&background_thresh, &dirty_thresh); 499 500 /* 501 * Throttle it only when the background writeback cannot --- 45 unchanged lines hidden (view full) --- 547 * Unstable writes are a feature of certain networked 548 * filesystems (i.e. NFS) in which data may have been 549 * written to the server's write cache, but has not yet 550 * been flushed to permanent storage. 551 * Only move pages to writeback if this bdi is over its 552 * threshold otherwise wait until the disk writes catch 553 * up. 554 */ |
562 trace_wbc_balance_dirty_start(&wbc, bdi); | 555 trace_balance_dirty_start(bdi); |
563 if (bdi_nr_reclaimable > bdi_thresh) { | 556 if (bdi_nr_reclaimable > bdi_thresh) { |
564 writeback_inodes_wb(&bdi->wb, &wbc); 565 pages_written += write_chunk - wbc.nr_to_write; 566 trace_wbc_balance_dirty_written(&wbc, bdi); | 557 pages_written += writeback_inodes_wb(&bdi->wb, 558 write_chunk); 559 trace_balance_dirty_written(bdi, pages_written); |
567 if (pages_written >= write_chunk) 568 break; /* We've done our duty */ 569 } | 560 if (pages_written >= write_chunk) 561 break; /* We've done our duty */ 562 } |
570 trace_wbc_balance_dirty_wait(&wbc, bdi); | |
571 __set_current_state(TASK_UNINTERRUPTIBLE); 572 io_schedule_timeout(pause); | 563 __set_current_state(TASK_UNINTERRUPTIBLE); 564 io_schedule_timeout(pause); |
565 trace_balance_dirty_wait(bdi); |
|
573 574 /* 575 * Increase the delay for each loop, up to our previous 576 * default of taking a 100ms nap. 577 */ 578 pause <<= 1; 579 if (pause > HZ / 10) 580 pause = HZ / 10; --- 840 unchanged lines hidden --- | 566 567 /* 568 * Increase the delay for each loop, up to our previous 569 * default of taking a 100ms nap. 570 */ 571 pause <<= 1; 572 if (pause > HZ / 10) 573 pause = HZ / 10; --- 840 unchanged lines hidden --- |