1 /* 2 * fs/fs-writeback.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * 6 * Contains all the functions related to writing back and waiting 7 * upon dirty inodes against superblocks, and writing back dirty 8 * pages against inodes. ie: data writeback. Writeout of the 9 * inode itself is not handled here. 10 * 11 * 10Apr2002 Andrew Morton 12 * Split out of fs/inode.c 13 * Additions for address_space-based writeback 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/spinlock.h> 19 #include <linux/sched.h> 20 #include <linux/fs.h> 21 #include <linux/mm.h> 22 #include <linux/kthread.h> 23 #include <linux/freezer.h> 24 #include <linux/writeback.h> 25 #include <linux/blkdev.h> 26 #include <linux/backing-dev.h> 27 #include <linux/buffer_head.h> 28 #include "internal.h" 29 30 #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 31 32 /* 33 * We don't actually have pdflush, but this one is exported though /proc... 34 */ 35 int nr_pdflush_threads; 36 37 /* 38 * Passed into wb_writeback(), essentially a subset of writeback_control 39 */ 40 struct wb_writeback_args { 41 long nr_pages; 42 struct super_block *sb; 43 enum writeback_sync_modes sync_mode; 44 int for_kupdate:1; 45 int range_cyclic:1; 46 int for_background:1; 47 }; 48 49 /* 50 * Work items for the bdi_writeback threads 51 */ 52 struct bdi_work { 53 struct list_head list; /* pending work list */ 54 struct rcu_head rcu_head; /* for RCU free/clear of work */ 55 56 unsigned long seen; /* threads that have seen this work */ 57 atomic_t pending; /* number of threads still to do work */ 58 59 struct wb_writeback_args args; /* writeback arguments */ 60 61 unsigned long state; /* flag bits, see WS_* */ 62 }; 63 64 enum { 65 WS_USED_B = 0, 66 WS_ONSTACK_B, 67 }; 68 69 #define WS_USED (1 << WS_USED_B) 70 #define WS_ONSTACK (1 << WS_ONSTACK_B) 71 72 static inline bool bdi_work_on_stack(struct bdi_work *work) 73 { 74 return test_bit(WS_ONSTACK_B, &work->state); 75 } 76 77 static inline void bdi_work_init(struct bdi_work *work, 78 struct wb_writeback_args *args) 79 { 80 INIT_RCU_HEAD(&work->rcu_head); 81 work->args = *args; 82 work->state = WS_USED; 83 } 84 85 /** 86 * writeback_in_progress - determine whether there is writeback in progress 87 * @bdi: the device's backing_dev_info structure. 88 * 89 * Determine whether there is writeback waiting to be handled against a 90 * backing device. 91 */ 92 int writeback_in_progress(struct backing_dev_info *bdi) 93 { 94 return !list_empty(&bdi->work_list); 95 } 96 97 static void bdi_work_clear(struct bdi_work *work) 98 { 99 clear_bit(WS_USED_B, &work->state); 100 smp_mb__after_clear_bit(); 101 /* 102 * work can have disappeared at this point. bit waitq functions 103 * should be able to tolerate this, provided bdi_sched_wait does 104 * not dereference it's pointer argument. 105 */ 106 wake_up_bit(&work->state, WS_USED_B); 107 } 108 109 static void bdi_work_free(struct rcu_head *head) 110 { 111 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); 112 113 if (!bdi_work_on_stack(work)) 114 kfree(work); 115 else 116 bdi_work_clear(work); 117 } 118 119 static void wb_work_complete(struct bdi_work *work) 120 { 121 const enum writeback_sync_modes sync_mode = work->args.sync_mode; 122 int onstack = bdi_work_on_stack(work); 123 124 /* 125 * For allocated work, we can clear the done/seen bit right here. 126 * For on-stack work, we need to postpone both the clear and free 127 * to after the RCU grace period, since the stack could be invalidated 128 * as soon as bdi_work_clear() has done the wakeup. 129 */ 130 if (!onstack) 131 bdi_work_clear(work); 132 if (sync_mode == WB_SYNC_NONE || onstack) 133 call_rcu(&work->rcu_head, bdi_work_free); 134 } 135 136 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) 137 { 138 /* 139 * The caller has retrieved the work arguments from this work, 140 * drop our reference. If this is the last ref, delete and free it 141 */ 142 if (atomic_dec_and_test(&work->pending)) { 143 struct backing_dev_info *bdi = wb->bdi; 144 145 spin_lock(&bdi->wb_lock); 146 list_del_rcu(&work->list); 147 spin_unlock(&bdi->wb_lock); 148 149 wb_work_complete(work); 150 } 151 } 152 153 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) 154 { 155 work->seen = bdi->wb_mask; 156 BUG_ON(!work->seen); 157 atomic_set(&work->pending, bdi->wb_cnt); 158 BUG_ON(!bdi->wb_cnt); 159 160 /* 161 * list_add_tail_rcu() contains the necessary barriers to 162 * make sure the above stores are seen before the item is 163 * noticed on the list 164 */ 165 spin_lock(&bdi->wb_lock); 166 list_add_tail_rcu(&work->list, &bdi->work_list); 167 spin_unlock(&bdi->wb_lock); 168 169 /* 170 * If the default thread isn't there, make sure we add it. When 171 * it gets created and wakes up, we'll run this work. 172 */ 173 if (unlikely(list_empty_careful(&bdi->wb_list))) 174 wake_up_process(default_backing_dev_info.wb.task); 175 else { 176 struct bdi_writeback *wb = &bdi->wb; 177 178 if (wb->task) 179 wake_up_process(wb->task); 180 } 181 } 182 183 /* 184 * Used for on-stack allocated work items. The caller needs to wait until 185 * the wb threads have acked the work before it's safe to continue. 186 */ 187 static void bdi_wait_on_work_clear(struct bdi_work *work) 188 { 189 wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait, 190 TASK_UNINTERRUPTIBLE); 191 } 192 193 static void bdi_alloc_queue_work(struct backing_dev_info *bdi, 194 struct wb_writeback_args *args) 195 { 196 struct bdi_work *work; 197 198 /* 199 * This is WB_SYNC_NONE writeback, so if allocation fails just 200 * wakeup the thread for old dirty data writeback 201 */ 202 work = kmalloc(sizeof(*work), GFP_ATOMIC); 203 if (work) { 204 bdi_work_init(work, args); 205 bdi_queue_work(bdi, work); 206 } else { 207 struct bdi_writeback *wb = &bdi->wb; 208 209 if (wb->task) 210 wake_up_process(wb->task); 211 } 212 } 213 214 /** 215 * bdi_sync_writeback - start and wait for writeback 216 * @bdi: the backing device to write from 217 * @sb: write inodes from this super_block 218 * 219 * Description: 220 * This does WB_SYNC_ALL data integrity writeback and waits for the 221 * IO to complete. Callers must hold the sb s_umount semaphore for 222 * reading, to avoid having the super disappear before we are done. 223 */ 224 static void bdi_sync_writeback(struct backing_dev_info *bdi, 225 struct super_block *sb) 226 { 227 struct wb_writeback_args args = { 228 .sb = sb, 229 .sync_mode = WB_SYNC_ALL, 230 .nr_pages = LONG_MAX, 231 .range_cyclic = 0, 232 }; 233 struct bdi_work work; 234 235 bdi_work_init(&work, &args); 236 work.state |= WS_ONSTACK; 237 238 bdi_queue_work(bdi, &work); 239 bdi_wait_on_work_clear(&work); 240 } 241 242 /** 243 * bdi_start_writeback - start writeback 244 * @bdi: the backing device to write from 245 * @sb: write inodes from this super_block 246 * @nr_pages: the number of pages to write 247 * 248 * Description: 249 * This does WB_SYNC_NONE opportunistic writeback. The IO is only 250 * started when this function returns, we make no guarentees on 251 * completion. Caller need not hold sb s_umount semaphore. 252 * 253 */ 254 void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, 255 long nr_pages) 256 { 257 struct wb_writeback_args args = { 258 .sb = sb, 259 .sync_mode = WB_SYNC_NONE, 260 .nr_pages = nr_pages, 261 .range_cyclic = 1, 262 }; 263 264 /* 265 * We treat @nr_pages=0 as the special case to do background writeback, 266 * ie. to sync pages until the background dirty threshold is reached. 267 */ 268 if (!nr_pages) { 269 args.nr_pages = LONG_MAX; 270 args.for_background = 1; 271 } 272 273 bdi_alloc_queue_work(bdi, &args); 274 } 275 276 /* 277 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 278 * furthest end of its superblock's dirty-inode list. 279 * 280 * Before stamping the inode's ->dirtied_when, we check to see whether it is 281 * already the most-recently-dirtied inode on the b_dirty list. If that is 282 * the case then the inode must have been redirtied while it was being written 283 * out and we don't reset its dirtied_when. 284 */ 285 static void redirty_tail(struct inode *inode) 286 { 287 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 288 289 if (!list_empty(&wb->b_dirty)) { 290 struct inode *tail; 291 292 tail = list_entry(wb->b_dirty.next, struct inode, i_list); 293 if (time_before(inode->dirtied_when, tail->dirtied_when)) 294 inode->dirtied_when = jiffies; 295 } 296 list_move(&inode->i_list, &wb->b_dirty); 297 } 298 299 /* 300 * requeue inode for re-scanning after bdi->b_io list is exhausted. 301 */ 302 static void requeue_io(struct inode *inode) 303 { 304 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 305 306 list_move(&inode->i_list, &wb->b_more_io); 307 } 308 309 static void inode_sync_complete(struct inode *inode) 310 { 311 /* 312 * Prevent speculative execution through spin_unlock(&inode_lock); 313 */ 314 smp_mb(); 315 wake_up_bit(&inode->i_state, __I_SYNC); 316 } 317 318 static bool inode_dirtied_after(struct inode *inode, unsigned long t) 319 { 320 bool ret = time_after(inode->dirtied_when, t); 321 #ifndef CONFIG_64BIT 322 /* 323 * For inodes being constantly redirtied, dirtied_when can get stuck. 324 * It _appears_ to be in the future, but is actually in distant past. 325 * This test is necessary to prevent such wrapped-around relative times 326 * from permanently stopping the whole bdi writeback. 327 */ 328 ret = ret && time_before_eq(inode->dirtied_when, jiffies); 329 #endif 330 return ret; 331 } 332 333 /* 334 * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 335 */ 336 static void move_expired_inodes(struct list_head *delaying_queue, 337 struct list_head *dispatch_queue, 338 unsigned long *older_than_this) 339 { 340 LIST_HEAD(tmp); 341 struct list_head *pos, *node; 342 struct super_block *sb = NULL; 343 struct inode *inode; 344 int do_sb_sort = 0; 345 346 while (!list_empty(delaying_queue)) { 347 inode = list_entry(delaying_queue->prev, struct inode, i_list); 348 if (older_than_this && 349 inode_dirtied_after(inode, *older_than_this)) 350 break; 351 if (sb && sb != inode->i_sb) 352 do_sb_sort = 1; 353 sb = inode->i_sb; 354 list_move(&inode->i_list, &tmp); 355 } 356 357 /* just one sb in list, splice to dispatch_queue and we're done */ 358 if (!do_sb_sort) { 359 list_splice(&tmp, dispatch_queue); 360 return; 361 } 362 363 /* Move inodes from one superblock together */ 364 while (!list_empty(&tmp)) { 365 inode = list_entry(tmp.prev, struct inode, i_list); 366 sb = inode->i_sb; 367 list_for_each_prev_safe(pos, node, &tmp) { 368 inode = list_entry(pos, struct inode, i_list); 369 if (inode->i_sb == sb) 370 list_move(&inode->i_list, dispatch_queue); 371 } 372 } 373 } 374 375 /* 376 * Queue all expired dirty inodes for io, eldest first. 377 */ 378 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 379 { 380 list_splice_init(&wb->b_more_io, wb->b_io.prev); 381 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 382 } 383 384 static int write_inode(struct inode *inode, int sync) 385 { 386 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 387 return inode->i_sb->s_op->write_inode(inode, sync); 388 return 0; 389 } 390 391 /* 392 * Wait for writeback on an inode to complete. 393 */ 394 static void inode_wait_for_writeback(struct inode *inode) 395 { 396 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 397 wait_queue_head_t *wqh; 398 399 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 400 do { 401 spin_unlock(&inode_lock); 402 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 403 spin_lock(&inode_lock); 404 } while (inode->i_state & I_SYNC); 405 } 406 407 /* 408 * Write out an inode's dirty pages. Called under inode_lock. Either the 409 * caller has ref on the inode (either via __iget or via syscall against an fd) 410 * or the inode has I_WILL_FREE set (via generic_forget_inode) 411 * 412 * If `wait' is set, wait on the writeout. 413 * 414 * The whole writeout design is quite complex and fragile. We want to avoid 415 * starvation of particular inodes when others are being redirtied, prevent 416 * livelocks, etc. 417 * 418 * Called under inode_lock. 419 */ 420 static int 421 writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 422 { 423 struct address_space *mapping = inode->i_mapping; 424 int wait = wbc->sync_mode == WB_SYNC_ALL; 425 unsigned dirty; 426 int ret; 427 428 if (!atomic_read(&inode->i_count)) 429 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 430 else 431 WARN_ON(inode->i_state & I_WILL_FREE); 432 433 if (inode->i_state & I_SYNC) { 434 /* 435 * If this inode is locked for writeback and we are not doing 436 * writeback-for-data-integrity, move it to b_more_io so that 437 * writeback can proceed with the other inodes on s_io. 438 * 439 * We'll have another go at writing back this inode when we 440 * completed a full scan of b_io. 441 */ 442 if (!wait) { 443 requeue_io(inode); 444 return 0; 445 } 446 447 /* 448 * It's a data-integrity sync. We must wait. 449 */ 450 inode_wait_for_writeback(inode); 451 } 452 453 BUG_ON(inode->i_state & I_SYNC); 454 455 /* Set I_SYNC, reset I_DIRTY */ 456 dirty = inode->i_state & I_DIRTY; 457 inode->i_state |= I_SYNC; 458 inode->i_state &= ~I_DIRTY; 459 460 spin_unlock(&inode_lock); 461 462 ret = do_writepages(mapping, wbc); 463 464 /* Don't write the inode if only I_DIRTY_PAGES was set */ 465 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 466 int err = write_inode(inode, wait); 467 if (ret == 0) 468 ret = err; 469 } 470 471 if (wait) { 472 int err = filemap_fdatawait(mapping); 473 if (ret == 0) 474 ret = err; 475 } 476 477 spin_lock(&inode_lock); 478 inode->i_state &= ~I_SYNC; 479 if (!(inode->i_state & (I_FREEING | I_CLEAR))) { 480 if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) { 481 /* 482 * More pages get dirtied by a fast dirtier. 483 */ 484 goto select_queue; 485 } else if (inode->i_state & I_DIRTY) { 486 /* 487 * At least XFS will redirty the inode during the 488 * writeback (delalloc) and on io completion (isize). 489 */ 490 redirty_tail(inode); 491 } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 492 /* 493 * We didn't write back all the pages. nfs_writepages() 494 * sometimes bales out without doing anything. Redirty 495 * the inode; Move it from b_io onto b_more_io/b_dirty. 496 */ 497 /* 498 * akpm: if the caller was the kupdate function we put 499 * this inode at the head of b_dirty so it gets first 500 * consideration. Otherwise, move it to the tail, for 501 * the reasons described there. I'm not really sure 502 * how much sense this makes. Presumably I had a good 503 * reasons for doing it this way, and I'd rather not 504 * muck with it at present. 505 */ 506 if (wbc->for_kupdate) { 507 /* 508 * For the kupdate function we move the inode 509 * to b_more_io so it will get more writeout as 510 * soon as the queue becomes uncongested. 511 */ 512 inode->i_state |= I_DIRTY_PAGES; 513 select_queue: 514 if (wbc->nr_to_write <= 0) { 515 /* 516 * slice used up: queue for next turn 517 */ 518 requeue_io(inode); 519 } else { 520 /* 521 * somehow blocked: retry later 522 */ 523 redirty_tail(inode); 524 } 525 } else { 526 /* 527 * Otherwise fully redirty the inode so that 528 * other inodes on this superblock will get some 529 * writeout. Otherwise heavy writing to one 530 * file would indefinitely suspend writeout of 531 * all the other files. 532 */ 533 inode->i_state |= I_DIRTY_PAGES; 534 redirty_tail(inode); 535 } 536 } else if (atomic_read(&inode->i_count)) { 537 /* 538 * The inode is clean, inuse 539 */ 540 list_move(&inode->i_list, &inode_in_use); 541 } else { 542 /* 543 * The inode is clean, unused 544 */ 545 list_move(&inode->i_list, &inode_unused); 546 } 547 } 548 inode_sync_complete(inode); 549 return ret; 550 } 551 552 static void unpin_sb_for_writeback(struct super_block **psb) 553 { 554 struct super_block *sb = *psb; 555 556 if (sb) { 557 up_read(&sb->s_umount); 558 put_super(sb); 559 *psb = NULL; 560 } 561 } 562 563 /* 564 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned 565 * before calling writeback. So make sure that we do pin it, so it doesn't 566 * go away while we are writing inodes from it. 567 * 568 * Returns 0 if the super was successfully pinned (or pinning wasn't needed), 569 * 1 if we failed. 570 */ 571 static int pin_sb_for_writeback(struct writeback_control *wbc, 572 struct inode *inode, struct super_block **psb) 573 { 574 struct super_block *sb = inode->i_sb; 575 576 /* 577 * If this sb is already pinned, nothing more to do. If not and 578 * *psb is non-NULL, unpin the old one first 579 */ 580 if (sb == *psb) 581 return 0; 582 else if (*psb) 583 unpin_sb_for_writeback(psb); 584 585 /* 586 * Caller must already hold the ref for this 587 */ 588 if (wbc->sync_mode == WB_SYNC_ALL) { 589 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 590 return 0; 591 } 592 593 spin_lock(&sb_lock); 594 sb->s_count++; 595 if (down_read_trylock(&sb->s_umount)) { 596 if (sb->s_root) { 597 spin_unlock(&sb_lock); 598 goto pinned; 599 } 600 /* 601 * umounted, drop rwsem again and fall through to failure 602 */ 603 up_read(&sb->s_umount); 604 } 605 606 sb->s_count--; 607 spin_unlock(&sb_lock); 608 return 1; 609 pinned: 610 *psb = sb; 611 return 0; 612 } 613 614 static void writeback_inodes_wb(struct bdi_writeback *wb, 615 struct writeback_control *wbc) 616 { 617 struct super_block *sb = wbc->sb, *pin_sb = NULL; 618 const unsigned long start = jiffies; /* livelock avoidance */ 619 620 spin_lock(&inode_lock); 621 622 if (!wbc->for_kupdate || list_empty(&wb->b_io)) 623 queue_io(wb, wbc->older_than_this); 624 625 while (!list_empty(&wb->b_io)) { 626 struct inode *inode = list_entry(wb->b_io.prev, 627 struct inode, i_list); 628 long pages_skipped; 629 630 /* 631 * super block given and doesn't match, skip this inode 632 */ 633 if (sb && sb != inode->i_sb) { 634 redirty_tail(inode); 635 continue; 636 } 637 638 if (inode->i_state & (I_NEW | I_WILL_FREE)) { 639 requeue_io(inode); 640 continue; 641 } 642 643 /* 644 * Was this inode dirtied after sync_sb_inodes was called? 645 * This keeps sync from extra jobs and livelock. 646 */ 647 if (inode_dirtied_after(inode, start)) 648 break; 649 650 if (pin_sb_for_writeback(wbc, inode, &pin_sb)) { 651 requeue_io(inode); 652 continue; 653 } 654 655 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); 656 __iget(inode); 657 pages_skipped = wbc->pages_skipped; 658 writeback_single_inode(inode, wbc); 659 if (wbc->pages_skipped != pages_skipped) { 660 /* 661 * writeback is not making progress due to locked 662 * buffers. Skip this inode for now. 663 */ 664 redirty_tail(inode); 665 } 666 spin_unlock(&inode_lock); 667 iput(inode); 668 cond_resched(); 669 spin_lock(&inode_lock); 670 if (wbc->nr_to_write <= 0) { 671 wbc->more_io = 1; 672 break; 673 } 674 if (!list_empty(&wb->b_more_io)) 675 wbc->more_io = 1; 676 } 677 678 unpin_sb_for_writeback(&pin_sb); 679 680 spin_unlock(&inode_lock); 681 /* Leave any unwritten inodes on b_io */ 682 } 683 684 void writeback_inodes_wbc(struct writeback_control *wbc) 685 { 686 struct backing_dev_info *bdi = wbc->bdi; 687 688 writeback_inodes_wb(&bdi->wb, wbc); 689 } 690 691 /* 692 * The maximum number of pages to writeout in a single bdi flush/kupdate 693 * operation. We do this so we don't hold I_SYNC against an inode for 694 * enormous amounts of time, which would block a userspace task which has 695 * been forced to throttle against that inode. Also, the code reevaluates 696 * the dirty each time it has written this many pages. 697 */ 698 #define MAX_WRITEBACK_PAGES 1024 699 700 static inline bool over_bground_thresh(void) 701 { 702 unsigned long background_thresh, dirty_thresh; 703 704 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); 705 706 return (global_page_state(NR_FILE_DIRTY) + 707 global_page_state(NR_UNSTABLE_NFS) >= background_thresh); 708 } 709 710 /* 711 * Explicit flushing or periodic writeback of "old" data. 712 * 713 * Define "old": the first time one of an inode's pages is dirtied, we mark the 714 * dirtying-time in the inode's address_space. So this periodic writeback code 715 * just walks the superblock inode list, writing back any inodes which are 716 * older than a specific point in time. 717 * 718 * Try to run once per dirty_writeback_interval. But if a writeback event 719 * takes longer than a dirty_writeback_interval interval, then leave a 720 * one-second gap. 721 * 722 * older_than_this takes precedence over nr_to_write. So we'll only write back 723 * all dirty pages if they are all attached to "old" mappings. 724 */ 725 static long wb_writeback(struct bdi_writeback *wb, 726 struct wb_writeback_args *args) 727 { 728 struct writeback_control wbc = { 729 .bdi = wb->bdi, 730 .sb = args->sb, 731 .sync_mode = args->sync_mode, 732 .older_than_this = NULL, 733 .for_kupdate = args->for_kupdate, 734 .for_background = args->for_background, 735 .range_cyclic = args->range_cyclic, 736 }; 737 unsigned long oldest_jif; 738 long wrote = 0; 739 struct inode *inode; 740 741 if (wbc.for_kupdate) { 742 wbc.older_than_this = &oldest_jif; 743 oldest_jif = jiffies - 744 msecs_to_jiffies(dirty_expire_interval * 10); 745 } 746 if (!wbc.range_cyclic) { 747 wbc.range_start = 0; 748 wbc.range_end = LLONG_MAX; 749 } 750 751 for (;;) { 752 /* 753 * Stop writeback when nr_pages has been consumed 754 */ 755 if (args->nr_pages <= 0) 756 break; 757 758 /* 759 * For background writeout, stop when we are below the 760 * background dirty threshold 761 */ 762 if (args->for_background && !over_bground_thresh()) 763 break; 764 765 wbc.more_io = 0; 766 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 767 wbc.pages_skipped = 0; 768 writeback_inodes_wb(wb, &wbc); 769 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 770 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 771 772 /* 773 * If we consumed everything, see if we have more 774 */ 775 if (wbc.nr_to_write <= 0) 776 continue; 777 /* 778 * Didn't write everything and we don't have more IO, bail 779 */ 780 if (!wbc.more_io) 781 break; 782 /* 783 * Did we write something? Try for more 784 */ 785 if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) 786 continue; 787 /* 788 * Nothing written. Wait for some inode to 789 * become available for writeback. Otherwise 790 * we'll just busyloop. 791 */ 792 spin_lock(&inode_lock); 793 if (!list_empty(&wb->b_more_io)) { 794 inode = list_entry(wb->b_more_io.prev, 795 struct inode, i_list); 796 inode_wait_for_writeback(inode); 797 } 798 spin_unlock(&inode_lock); 799 } 800 801 return wrote; 802 } 803 804 /* 805 * Return the next bdi_work struct that hasn't been processed by this 806 * wb thread yet. ->seen is initially set for each thread that exists 807 * for this device, when a thread first notices a piece of work it 808 * clears its bit. Depending on writeback type, the thread will notify 809 * completion on either receiving the work (WB_SYNC_NONE) or after 810 * it is done (WB_SYNC_ALL). 811 */ 812 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, 813 struct bdi_writeback *wb) 814 { 815 struct bdi_work *work, *ret = NULL; 816 817 rcu_read_lock(); 818 819 list_for_each_entry_rcu(work, &bdi->work_list, list) { 820 if (!test_bit(wb->nr, &work->seen)) 821 continue; 822 clear_bit(wb->nr, &work->seen); 823 824 ret = work; 825 break; 826 } 827 828 rcu_read_unlock(); 829 return ret; 830 } 831 832 static long wb_check_old_data_flush(struct bdi_writeback *wb) 833 { 834 unsigned long expired; 835 long nr_pages; 836 837 expired = wb->last_old_flush + 838 msecs_to_jiffies(dirty_writeback_interval * 10); 839 if (time_before(jiffies, expired)) 840 return 0; 841 842 wb->last_old_flush = jiffies; 843 nr_pages = global_page_state(NR_FILE_DIRTY) + 844 global_page_state(NR_UNSTABLE_NFS) + 845 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 846 847 if (nr_pages) { 848 struct wb_writeback_args args = { 849 .nr_pages = nr_pages, 850 .sync_mode = WB_SYNC_NONE, 851 .for_kupdate = 1, 852 .range_cyclic = 1, 853 }; 854 855 return wb_writeback(wb, &args); 856 } 857 858 return 0; 859 } 860 861 /* 862 * Retrieve work items and do the writeback they describe 863 */ 864 long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 865 { 866 struct backing_dev_info *bdi = wb->bdi; 867 struct bdi_work *work; 868 long wrote = 0; 869 870 while ((work = get_next_work_item(bdi, wb)) != NULL) { 871 struct wb_writeback_args args = work->args; 872 873 /* 874 * Override sync mode, in case we must wait for completion 875 */ 876 if (force_wait) 877 work->args.sync_mode = args.sync_mode = WB_SYNC_ALL; 878 879 /* 880 * If this isn't a data integrity operation, just notify 881 * that we have seen this work and we are now starting it. 882 */ 883 if (args.sync_mode == WB_SYNC_NONE) 884 wb_clear_pending(wb, work); 885 886 wrote += wb_writeback(wb, &args); 887 888 /* 889 * This is a data integrity writeback, so only do the 890 * notification when we have completed the work. 891 */ 892 if (args.sync_mode == WB_SYNC_ALL) 893 wb_clear_pending(wb, work); 894 } 895 896 /* 897 * Check for periodic writeback, kupdated() style 898 */ 899 wrote += wb_check_old_data_flush(wb); 900 901 return wrote; 902 } 903 904 /* 905 * Handle writeback of dirty data for the device backed by this bdi. Also 906 * wakes up periodically and does kupdated style flushing. 907 */ 908 int bdi_writeback_task(struct bdi_writeback *wb) 909 { 910 unsigned long last_active = jiffies; 911 unsigned long wait_jiffies = -1UL; 912 long pages_written; 913 914 while (!kthread_should_stop()) { 915 pages_written = wb_do_writeback(wb, 0); 916 917 if (pages_written) 918 last_active = jiffies; 919 else if (wait_jiffies != -1UL) { 920 unsigned long max_idle; 921 922 /* 923 * Longest period of inactivity that we tolerate. If we 924 * see dirty data again later, the task will get 925 * recreated automatically. 926 */ 927 max_idle = max(5UL * 60 * HZ, wait_jiffies); 928 if (time_after(jiffies, max_idle + last_active)) 929 break; 930 } 931 932 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); 933 schedule_timeout_interruptible(wait_jiffies); 934 try_to_freeze(); 935 } 936 937 return 0; 938 } 939 940 /* 941 * Schedule writeback for all backing devices. This does WB_SYNC_NONE 942 * writeback, for integrity writeback see bdi_sync_writeback(). 943 */ 944 static void bdi_writeback_all(struct super_block *sb, long nr_pages) 945 { 946 struct wb_writeback_args args = { 947 .sb = sb, 948 .nr_pages = nr_pages, 949 .sync_mode = WB_SYNC_NONE, 950 }; 951 struct backing_dev_info *bdi; 952 953 rcu_read_lock(); 954 955 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 956 if (!bdi_has_dirty_io(bdi)) 957 continue; 958 959 bdi_alloc_queue_work(bdi, &args); 960 } 961 962 rcu_read_unlock(); 963 } 964 965 /* 966 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 967 * the whole world. 968 */ 969 void wakeup_flusher_threads(long nr_pages) 970 { 971 if (nr_pages == 0) 972 nr_pages = global_page_state(NR_FILE_DIRTY) + 973 global_page_state(NR_UNSTABLE_NFS); 974 bdi_writeback_all(NULL, nr_pages); 975 } 976 977 static noinline void block_dump___mark_inode_dirty(struct inode *inode) 978 { 979 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 980 struct dentry *dentry; 981 const char *name = "?"; 982 983 dentry = d_find_alias(inode); 984 if (dentry) { 985 spin_lock(&dentry->d_lock); 986 name = (const char *) dentry->d_name.name; 987 } 988 printk(KERN_DEBUG 989 "%s(%d): dirtied inode %lu (%s) on %s\n", 990 current->comm, task_pid_nr(current), inode->i_ino, 991 name, inode->i_sb->s_id); 992 if (dentry) { 993 spin_unlock(&dentry->d_lock); 994 dput(dentry); 995 } 996 } 997 } 998 999 /** 1000 * __mark_inode_dirty - internal function 1001 * @inode: inode to mark 1002 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 1003 * Mark an inode as dirty. Callers should use mark_inode_dirty or 1004 * mark_inode_dirty_sync. 1005 * 1006 * Put the inode on the super block's dirty list. 1007 * 1008 * CAREFUL! We mark it dirty unconditionally, but move it onto the 1009 * dirty list only if it is hashed or if it refers to a blockdev. 1010 * If it was not hashed, it will never be added to the dirty list 1011 * even if it is later hashed, as it will have been marked dirty already. 1012 * 1013 * In short, make sure you hash any inodes _before_ you start marking 1014 * them dirty. 1015 * 1016 * This function *must* be atomic for the I_DIRTY_PAGES case - 1017 * set_page_dirty() is called under spinlock in several places. 1018 * 1019 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 1020 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 1021 * the kernel-internal blockdev inode represents the dirtying time of the 1022 * blockdev's pages. This is why for I_DIRTY_PAGES we always use 1023 * page->mapping->host, so the page-dirtying time is recorded in the internal 1024 * blockdev inode. 1025 */ 1026 void __mark_inode_dirty(struct inode *inode, int flags) 1027 { 1028 struct super_block *sb = inode->i_sb; 1029 1030 /* 1031 * Don't do this for I_DIRTY_PAGES - that doesn't actually 1032 * dirty the inode itself 1033 */ 1034 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 1035 if (sb->s_op->dirty_inode) 1036 sb->s_op->dirty_inode(inode); 1037 } 1038 1039 /* 1040 * make sure that changes are seen by all cpus before we test i_state 1041 * -- mikulas 1042 */ 1043 smp_mb(); 1044 1045 /* avoid the locking if we can */ 1046 if ((inode->i_state & flags) == flags) 1047 return; 1048 1049 if (unlikely(block_dump)) 1050 block_dump___mark_inode_dirty(inode); 1051 1052 spin_lock(&inode_lock); 1053 if ((inode->i_state & flags) != flags) { 1054 const int was_dirty = inode->i_state & I_DIRTY; 1055 1056 inode->i_state |= flags; 1057 1058 /* 1059 * If the inode is being synced, just update its dirty state. 1060 * The unlocker will place the inode on the appropriate 1061 * superblock list, based upon its state. 1062 */ 1063 if (inode->i_state & I_SYNC) 1064 goto out; 1065 1066 /* 1067 * Only add valid (hashed) inodes to the superblock's 1068 * dirty list. Add blockdev inodes as well. 1069 */ 1070 if (!S_ISBLK(inode->i_mode)) { 1071 if (hlist_unhashed(&inode->i_hash)) 1072 goto out; 1073 } 1074 if (inode->i_state & (I_FREEING|I_CLEAR)) 1075 goto out; 1076 1077 /* 1078 * If the inode was already on b_dirty/b_io/b_more_io, don't 1079 * reposition it (that would break b_dirty time-ordering). 1080 */ 1081 if (!was_dirty) { 1082 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1083 struct backing_dev_info *bdi = wb->bdi; 1084 1085 if (bdi_cap_writeback_dirty(bdi) && 1086 !test_bit(BDI_registered, &bdi->state)) { 1087 WARN_ON(1); 1088 printk(KERN_ERR "bdi-%s not registered\n", 1089 bdi->name); 1090 } 1091 1092 inode->dirtied_when = jiffies; 1093 list_move(&inode->i_list, &wb->b_dirty); 1094 } 1095 } 1096 out: 1097 spin_unlock(&inode_lock); 1098 } 1099 EXPORT_SYMBOL(__mark_inode_dirty); 1100 1101 /* 1102 * Write out a superblock's list of dirty inodes. A wait will be performed 1103 * upon no inodes, all inodes or the final one, depending upon sync_mode. 1104 * 1105 * If older_than_this is non-NULL, then only write out inodes which 1106 * had their first dirtying at a time earlier than *older_than_this. 1107 * 1108 * If `bdi' is non-zero then we're being asked to writeback a specific queue. 1109 * This function assumes that the blockdev superblock's inodes are backed by 1110 * a variety of queues, so all inodes are searched. For other superblocks, 1111 * assume that all inodes are backed by the same queue. 1112 * 1113 * The inodes to be written are parked on bdi->b_io. They are moved back onto 1114 * bdi->b_dirty as they are selected for writing. This way, none can be missed 1115 * on the writer throttling path, and we get decent balancing between many 1116 * throttled threads: we don't want them all piling up on inode_sync_wait. 1117 */ 1118 static void wait_sb_inodes(struct super_block *sb) 1119 { 1120 struct inode *inode, *old_inode = NULL; 1121 1122 /* 1123 * We need to be protected against the filesystem going from 1124 * r/o to r/w or vice versa. 1125 */ 1126 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1127 1128 spin_lock(&inode_lock); 1129 1130 /* 1131 * Data integrity sync. Must wait for all pages under writeback, 1132 * because there may have been pages dirtied before our sync 1133 * call, but which had writeout started before we write it out. 1134 * In which case, the inode may not be on the dirty list, but 1135 * we still have to wait for that writeout. 1136 */ 1137 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1138 struct address_space *mapping; 1139 1140 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 1141 continue; 1142 mapping = inode->i_mapping; 1143 if (mapping->nrpages == 0) 1144 continue; 1145 __iget(inode); 1146 spin_unlock(&inode_lock); 1147 /* 1148 * We hold a reference to 'inode' so it couldn't have 1149 * been removed from s_inodes list while we dropped the 1150 * inode_lock. We cannot iput the inode now as we can 1151 * be holding the last reference and we cannot iput it 1152 * under inode_lock. So we keep the reference and iput 1153 * it later. 1154 */ 1155 iput(old_inode); 1156 old_inode = inode; 1157 1158 filemap_fdatawait(mapping); 1159 1160 cond_resched(); 1161 1162 spin_lock(&inode_lock); 1163 } 1164 spin_unlock(&inode_lock); 1165 iput(old_inode); 1166 } 1167 1168 /** 1169 * writeback_inodes_sb - writeback dirty inodes from given super_block 1170 * @sb: the superblock 1171 * 1172 * Start writeback on some inodes on this super_block. No guarantees are made 1173 * on how many (if any) will be written, and this function does not wait 1174 * for IO completion of submitted IO. The number of pages submitted is 1175 * returned. 1176 */ 1177 void writeback_inodes_sb(struct super_block *sb) 1178 { 1179 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 1180 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 1181 long nr_to_write; 1182 1183 nr_to_write = nr_dirty + nr_unstable + 1184 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 1185 1186 bdi_start_writeback(sb->s_bdi, sb, nr_to_write); 1187 } 1188 EXPORT_SYMBOL(writeback_inodes_sb); 1189 1190 /** 1191 * writeback_inodes_sb_if_idle - start writeback if none underway 1192 * @sb: the superblock 1193 * 1194 * Invoke writeback_inodes_sb if no writeback is currently underway. 1195 * Returns 1 if writeback was started, 0 if not. 1196 */ 1197 int writeback_inodes_sb_if_idle(struct super_block *sb) 1198 { 1199 if (!writeback_in_progress(sb->s_bdi)) { 1200 writeback_inodes_sb(sb); 1201 return 1; 1202 } else 1203 return 0; 1204 } 1205 EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 1206 1207 /** 1208 * sync_inodes_sb - sync sb inode pages 1209 * @sb: the superblock 1210 * 1211 * This function writes and waits on any dirty inode belonging to this 1212 * super_block. The number of pages synced is returned. 1213 */ 1214 void sync_inodes_sb(struct super_block *sb) 1215 { 1216 bdi_sync_writeback(sb->s_bdi, sb); 1217 wait_sb_inodes(sb); 1218 } 1219 EXPORT_SYMBOL(sync_inodes_sb); 1220 1221 /** 1222 * write_inode_now - write an inode to disk 1223 * @inode: inode to write to disk 1224 * @sync: whether the write should be synchronous or not 1225 * 1226 * This function commits an inode to disk immediately if it is dirty. This is 1227 * primarily needed by knfsd. 1228 * 1229 * The caller must either have a ref on the inode or must have set I_WILL_FREE. 1230 */ 1231 int write_inode_now(struct inode *inode, int sync) 1232 { 1233 int ret; 1234 struct writeback_control wbc = { 1235 .nr_to_write = LONG_MAX, 1236 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1237 .range_start = 0, 1238 .range_end = LLONG_MAX, 1239 }; 1240 1241 if (!mapping_cap_writeback_dirty(inode->i_mapping)) 1242 wbc.nr_to_write = 0; 1243 1244 might_sleep(); 1245 spin_lock(&inode_lock); 1246 ret = writeback_single_inode(inode, &wbc); 1247 spin_unlock(&inode_lock); 1248 if (sync) 1249 inode_sync_wait(inode); 1250 return ret; 1251 } 1252 EXPORT_SYMBOL(write_inode_now); 1253 1254 /** 1255 * sync_inode - write an inode and its pages to disk. 1256 * @inode: the inode to sync 1257 * @wbc: controls the writeback mode 1258 * 1259 * sync_inode() will write an inode and its pages to disk. It will also 1260 * correctly update the inode on its superblock's dirty inode lists and will 1261 * update inode->i_state. 1262 * 1263 * The caller must have a ref on the inode. 1264 */ 1265 int sync_inode(struct inode *inode, struct writeback_control *wbc) 1266 { 1267 int ret; 1268 1269 spin_lock(&inode_lock); 1270 ret = writeback_single_inode(inode, wbc); 1271 spin_unlock(&inode_lock); 1272 return ret; 1273 } 1274 EXPORT_SYMBOL(sync_inode); 1275