1 /* 2 * fs/fs-writeback.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * 6 * Contains all the functions related to writing back and waiting 7 * upon dirty inodes against superblocks, and writing back dirty 8 * pages against inodes. ie: data writeback. Writeout of the 9 * inode itself is not handled here. 10 * 11 * 10Apr2002 Andrew Morton 12 * Split out of fs/inode.c 13 * Additions for address_space-based writeback 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/spinlock.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/fs.h> 22 #include <linux/mm.h> 23 #include <linux/pagemap.h> 24 #include <linux/kthread.h> 25 #include <linux/writeback.h> 26 #include <linux/blkdev.h> 27 #include <linux/backing-dev.h> 28 #include <linux/tracepoint.h> 29 #include <linux/device.h> 30 #include "internal.h" 31 32 /* 33 * 4MB minimal write chunk size 34 */ 35 #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) 36 37 /* 38 * Passed into wb_writeback(), essentially a subset of writeback_control 39 */ 40 struct wb_writeback_work { 41 long nr_pages; 42 struct super_block *sb; 43 unsigned long *older_than_this; 44 enum writeback_sync_modes sync_mode; 45 unsigned int tagged_writepages:1; 46 unsigned int for_kupdate:1; 47 unsigned int range_cyclic:1; 48 unsigned int for_background:1; 49 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 50 enum wb_reason reason; /* why was writeback initiated? */ 51 52 struct list_head list; /* pending work list */ 53 struct completion *done; /* set if the caller waits */ 54 }; 55 56 /** 57 * writeback_in_progress - determine whether there is writeback in progress 58 * @bdi: the device's backing_dev_info structure. 59 * 60 * Determine whether there is writeback waiting to be handled against a 61 * backing device. 62 */ 63 int writeback_in_progress(struct backing_dev_info *bdi) 64 { 65 return test_bit(BDI_writeback_running, &bdi->state); 66 } 67 EXPORT_SYMBOL(writeback_in_progress); 68 69 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 70 { 71 struct super_block *sb = inode->i_sb; 72 73 if (sb_is_blkdev_sb(sb)) 74 return inode->i_mapping->backing_dev_info; 75 76 return sb->s_bdi; 77 } 78 79 static inline struct inode *wb_inode(struct list_head *head) 80 { 81 return list_entry(head, struct inode, i_wb_list); 82 } 83 84 /* 85 * Include the creation of the trace points after defining the 86 * wb_writeback_work structure and inline functions so that the definition 87 * remains local to this file. 88 */ 89 #define CREATE_TRACE_POINTS 90 #include <trace/events/writeback.h> 91 92 EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage); 93 94 static void bdi_wakeup_thread(struct backing_dev_info *bdi) 95 { 96 spin_lock_bh(&bdi->wb_lock); 97 if (test_bit(BDI_registered, &bdi->state)) 98 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 99 spin_unlock_bh(&bdi->wb_lock); 100 } 101 102 static void bdi_queue_work(struct backing_dev_info *bdi, 103 struct wb_writeback_work *work) 104 { 105 trace_writeback_queue(bdi, work); 106 107 spin_lock_bh(&bdi->wb_lock); 108 if (!test_bit(BDI_registered, &bdi->state)) { 109 if (work->done) 110 complete(work->done); 111 goto out_unlock; 112 } 113 list_add_tail(&work->list, &bdi->work_list); 114 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 115 out_unlock: 116 spin_unlock_bh(&bdi->wb_lock); 117 } 118 119 static void 120 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 121 bool range_cyclic, enum wb_reason reason) 122 { 123 struct wb_writeback_work *work; 124 125 /* 126 * This is WB_SYNC_NONE writeback, so if allocation fails just 127 * wakeup the thread for old dirty data writeback 128 */ 129 work = kzalloc(sizeof(*work), GFP_ATOMIC); 130 if (!work) { 131 trace_writeback_nowork(bdi); 132 bdi_wakeup_thread(bdi); 133 return; 134 } 135 136 work->sync_mode = WB_SYNC_NONE; 137 work->nr_pages = nr_pages; 138 work->range_cyclic = range_cyclic; 139 work->reason = reason; 140 141 bdi_queue_work(bdi, work); 142 } 143 144 /** 145 * bdi_start_writeback - start writeback 146 * @bdi: the backing device to write from 147 * @nr_pages: the number of pages to write 148 * @reason: reason why some writeback work was initiated 149 * 150 * Description: 151 * This does WB_SYNC_NONE opportunistic writeback. The IO is only 152 * started when this function returns, we make no guarantees on 153 * completion. Caller need not hold sb s_umount semaphore. 154 * 155 */ 156 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 157 enum wb_reason reason) 158 { 159 __bdi_start_writeback(bdi, nr_pages, true, reason); 160 } 161 162 /** 163 * bdi_start_background_writeback - start background writeback 164 * @bdi: the backing device to write from 165 * 166 * Description: 167 * This makes sure WB_SYNC_NONE background writeback happens. When 168 * this function returns, it is only guaranteed that for given BDI 169 * some IO is happening if we are over background dirty threshold. 170 * Caller need not hold sb s_umount semaphore. 171 */ 172 void bdi_start_background_writeback(struct backing_dev_info *bdi) 173 { 174 /* 175 * We just wake up the flusher thread. It will perform background 176 * writeback as soon as there is no other work to do. 177 */ 178 trace_writeback_wake_background(bdi); 179 bdi_wakeup_thread(bdi); 180 } 181 182 /* 183 * Remove the inode from the writeback list it is on. 184 */ 185 void inode_wb_list_del(struct inode *inode) 186 { 187 struct backing_dev_info *bdi = inode_to_bdi(inode); 188 189 spin_lock(&bdi->wb.list_lock); 190 list_del_init(&inode->i_wb_list); 191 spin_unlock(&bdi->wb.list_lock); 192 } 193 194 /* 195 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 196 * furthest end of its superblock's dirty-inode list. 197 * 198 * Before stamping the inode's ->dirtied_when, we check to see whether it is 199 * already the most-recently-dirtied inode on the b_dirty list. If that is 200 * the case then the inode must have been redirtied while it was being written 201 * out and we don't reset its dirtied_when. 202 */ 203 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 204 { 205 assert_spin_locked(&wb->list_lock); 206 if (!list_empty(&wb->b_dirty)) { 207 struct inode *tail; 208 209 tail = wb_inode(wb->b_dirty.next); 210 if (time_before(inode->dirtied_when, tail->dirtied_when)) 211 inode->dirtied_when = jiffies; 212 } 213 list_move(&inode->i_wb_list, &wb->b_dirty); 214 } 215 216 /* 217 * requeue inode for re-scanning after bdi->b_io list is exhausted. 218 */ 219 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 220 { 221 assert_spin_locked(&wb->list_lock); 222 list_move(&inode->i_wb_list, &wb->b_more_io); 223 } 224 225 static void inode_sync_complete(struct inode *inode) 226 { 227 inode->i_state &= ~I_SYNC; 228 /* If inode is clean an unused, put it into LRU now... */ 229 inode_add_lru(inode); 230 /* Waiters must see I_SYNC cleared before being woken up */ 231 smp_mb(); 232 wake_up_bit(&inode->i_state, __I_SYNC); 233 } 234 235 static bool inode_dirtied_after(struct inode *inode, unsigned long t) 236 { 237 bool ret = time_after(inode->dirtied_when, t); 238 #ifndef CONFIG_64BIT 239 /* 240 * For inodes being constantly redirtied, dirtied_when can get stuck. 241 * It _appears_ to be in the future, but is actually in distant past. 242 * This test is necessary to prevent such wrapped-around relative times 243 * from permanently stopping the whole bdi writeback. 244 */ 245 ret = ret && time_before_eq(inode->dirtied_when, jiffies); 246 #endif 247 return ret; 248 } 249 250 /* 251 * Move expired (dirtied before work->older_than_this) dirty inodes from 252 * @delaying_queue to @dispatch_queue. 253 */ 254 static int move_expired_inodes(struct list_head *delaying_queue, 255 struct list_head *dispatch_queue, 256 struct wb_writeback_work *work) 257 { 258 LIST_HEAD(tmp); 259 struct list_head *pos, *node; 260 struct super_block *sb = NULL; 261 struct inode *inode; 262 int do_sb_sort = 0; 263 int moved = 0; 264 265 while (!list_empty(delaying_queue)) { 266 inode = wb_inode(delaying_queue->prev); 267 if (work->older_than_this && 268 inode_dirtied_after(inode, *work->older_than_this)) 269 break; 270 list_move(&inode->i_wb_list, &tmp); 271 moved++; 272 if (sb_is_blkdev_sb(inode->i_sb)) 273 continue; 274 if (sb && sb != inode->i_sb) 275 do_sb_sort = 1; 276 sb = inode->i_sb; 277 } 278 279 /* just one sb in list, splice to dispatch_queue and we're done */ 280 if (!do_sb_sort) { 281 list_splice(&tmp, dispatch_queue); 282 goto out; 283 } 284 285 /* Move inodes from one superblock together */ 286 while (!list_empty(&tmp)) { 287 sb = wb_inode(tmp.prev)->i_sb; 288 list_for_each_prev_safe(pos, node, &tmp) { 289 inode = wb_inode(pos); 290 if (inode->i_sb == sb) 291 list_move(&inode->i_wb_list, dispatch_queue); 292 } 293 } 294 out: 295 return moved; 296 } 297 298 /* 299 * Queue all expired dirty inodes for io, eldest first. 300 * Before 301 * newly dirtied b_dirty b_io b_more_io 302 * =============> gf edc BA 303 * After 304 * newly dirtied b_dirty b_io b_more_io 305 * =============> g fBAedc 306 * | 307 * +--> dequeue for IO 308 */ 309 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 310 { 311 int moved; 312 assert_spin_locked(&wb->list_lock); 313 list_splice_init(&wb->b_more_io, &wb->b_io); 314 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); 315 trace_writeback_queue_io(wb, work, moved); 316 } 317 318 static int write_inode(struct inode *inode, struct writeback_control *wbc) 319 { 320 int ret; 321 322 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { 323 trace_writeback_write_inode_start(inode, wbc); 324 ret = inode->i_sb->s_op->write_inode(inode, wbc); 325 trace_writeback_write_inode(inode, wbc); 326 return ret; 327 } 328 return 0; 329 } 330 331 /* 332 * Wait for writeback on an inode to complete. Called with i_lock held. 333 * Caller must make sure inode cannot go away when we drop i_lock. 334 */ 335 static void __inode_wait_for_writeback(struct inode *inode) 336 __releases(inode->i_lock) 337 __acquires(inode->i_lock) 338 { 339 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 340 wait_queue_head_t *wqh; 341 342 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 343 while (inode->i_state & I_SYNC) { 344 spin_unlock(&inode->i_lock); 345 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 346 spin_lock(&inode->i_lock); 347 } 348 } 349 350 /* 351 * Wait for writeback on an inode to complete. Caller must have inode pinned. 352 */ 353 void inode_wait_for_writeback(struct inode *inode) 354 { 355 spin_lock(&inode->i_lock); 356 __inode_wait_for_writeback(inode); 357 spin_unlock(&inode->i_lock); 358 } 359 360 /* 361 * Sleep until I_SYNC is cleared. This function must be called with i_lock 362 * held and drops it. It is aimed for callers not holding any inode reference 363 * so once i_lock is dropped, inode can go away. 364 */ 365 static void inode_sleep_on_writeback(struct inode *inode) 366 __releases(inode->i_lock) 367 { 368 DEFINE_WAIT(wait); 369 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 370 int sleep; 371 372 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 373 sleep = inode->i_state & I_SYNC; 374 spin_unlock(&inode->i_lock); 375 if (sleep) 376 schedule(); 377 finish_wait(wqh, &wait); 378 } 379 380 /* 381 * Find proper writeback list for the inode depending on its current state and 382 * possibly also change of its state while we were doing writeback. Here we 383 * handle things such as livelock prevention or fairness of writeback among 384 * inodes. This function can be called only by flusher thread - noone else 385 * processes all inodes in writeback lists and requeueing inodes behind flusher 386 * thread's back can have unexpected consequences. 387 */ 388 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, 389 struct writeback_control *wbc) 390 { 391 if (inode->i_state & I_FREEING) 392 return; 393 394 /* 395 * Sync livelock prevention. Each inode is tagged and synced in one 396 * shot. If still dirty, it will be redirty_tail()'ed below. Update 397 * the dirty time to prevent enqueue and sync it again. 398 */ 399 if ((inode->i_state & I_DIRTY) && 400 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 401 inode->dirtied_when = jiffies; 402 403 if (wbc->pages_skipped) { 404 /* 405 * writeback is not making progress due to locked 406 * buffers. Skip this inode for now. 407 */ 408 redirty_tail(inode, wb); 409 return; 410 } 411 412 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 413 /* 414 * We didn't write back all the pages. nfs_writepages() 415 * sometimes bales out without doing anything. 416 */ 417 if (wbc->nr_to_write <= 0) { 418 /* Slice used up. Queue for next turn. */ 419 requeue_io(inode, wb); 420 } else { 421 /* 422 * Writeback blocked by something other than 423 * congestion. Delay the inode for some time to 424 * avoid spinning on the CPU (100% iowait) 425 * retrying writeback of the dirty page/inode 426 * that cannot be performed immediately. 427 */ 428 redirty_tail(inode, wb); 429 } 430 } else if (inode->i_state & I_DIRTY) { 431 /* 432 * Filesystems can dirty the inode during writeback operations, 433 * such as delayed allocation during submission or metadata 434 * updates after data IO completion. 435 */ 436 redirty_tail(inode, wb); 437 } else { 438 /* The inode is clean. Remove from writeback lists. */ 439 list_del_init(&inode->i_wb_list); 440 } 441 } 442 443 /* 444 * Write out an inode and its dirty pages. Do not update the writeback list 445 * linkage. That is left to the caller. The caller is also responsible for 446 * setting I_SYNC flag and calling inode_sync_complete() to clear it. 447 */ 448 static int 449 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 450 { 451 struct address_space *mapping = inode->i_mapping; 452 long nr_to_write = wbc->nr_to_write; 453 unsigned dirty; 454 int ret; 455 456 WARN_ON(!(inode->i_state & I_SYNC)); 457 458 trace_writeback_single_inode_start(inode, wbc, nr_to_write); 459 460 ret = do_writepages(mapping, wbc); 461 462 /* 463 * Make sure to wait on the data before writing out the metadata. 464 * This is important for filesystems that modify metadata on data 465 * I/O completion. We don't do it for sync(2) writeback because it has a 466 * separate, external IO completion path and ->sync_fs for guaranteeing 467 * inode metadata is written back correctly. 468 */ 469 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { 470 int err = filemap_fdatawait(mapping); 471 if (ret == 0) 472 ret = err; 473 } 474 475 /* 476 * Some filesystems may redirty the inode during the writeback 477 * due to delalloc, clear dirty metadata flags right before 478 * write_inode() 479 */ 480 spin_lock(&inode->i_lock); 481 /* Clear I_DIRTY_PAGES if we've written out all dirty pages */ 482 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 483 inode->i_state &= ~I_DIRTY_PAGES; 484 dirty = inode->i_state & I_DIRTY; 485 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 486 spin_unlock(&inode->i_lock); 487 /* Don't write the inode if only I_DIRTY_PAGES was set */ 488 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 489 int err = write_inode(inode, wbc); 490 if (ret == 0) 491 ret = err; 492 } 493 trace_writeback_single_inode(inode, wbc, nr_to_write); 494 return ret; 495 } 496 497 /* 498 * Write out an inode's dirty pages. Either the caller has an active reference 499 * on the inode or the inode has I_WILL_FREE set. 500 * 501 * This function is designed to be called for writing back one inode which 502 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() 503 * and does more profound writeback list handling in writeback_sb_inodes(). 504 */ 505 static int 506 writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, 507 struct writeback_control *wbc) 508 { 509 int ret = 0; 510 511 spin_lock(&inode->i_lock); 512 if (!atomic_read(&inode->i_count)) 513 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 514 else 515 WARN_ON(inode->i_state & I_WILL_FREE); 516 517 if (inode->i_state & I_SYNC) { 518 if (wbc->sync_mode != WB_SYNC_ALL) 519 goto out; 520 /* 521 * It's a data-integrity sync. We must wait. Since callers hold 522 * inode reference or inode has I_WILL_FREE set, it cannot go 523 * away under us. 524 */ 525 __inode_wait_for_writeback(inode); 526 } 527 WARN_ON(inode->i_state & I_SYNC); 528 /* 529 * Skip inode if it is clean and we have no outstanding writeback in 530 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this 531 * function since flusher thread may be doing for example sync in 532 * parallel and if we move the inode, it could get skipped. So here we 533 * make sure inode is on some writeback list and leave it there unless 534 * we have completely cleaned the inode. 535 */ 536 if (!(inode->i_state & I_DIRTY) && 537 (wbc->sync_mode != WB_SYNC_ALL || 538 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 539 goto out; 540 inode->i_state |= I_SYNC; 541 spin_unlock(&inode->i_lock); 542 543 ret = __writeback_single_inode(inode, wbc); 544 545 spin_lock(&wb->list_lock); 546 spin_lock(&inode->i_lock); 547 /* 548 * If inode is clean, remove it from writeback lists. Otherwise don't 549 * touch it. See comment above for explanation. 550 */ 551 if (!(inode->i_state & I_DIRTY)) 552 list_del_init(&inode->i_wb_list); 553 spin_unlock(&wb->list_lock); 554 inode_sync_complete(inode); 555 out: 556 spin_unlock(&inode->i_lock); 557 return ret; 558 } 559 560 static long writeback_chunk_size(struct backing_dev_info *bdi, 561 struct wb_writeback_work *work) 562 { 563 long pages; 564 565 /* 566 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 567 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 568 * here avoids calling into writeback_inodes_wb() more than once. 569 * 570 * The intended call sequence for WB_SYNC_ALL writeback is: 571 * 572 * wb_writeback() 573 * writeback_sb_inodes() <== called only once 574 * write_cache_pages() <== called once for each inode 575 * (quickly) tag currently dirty pages 576 * (maybe slowly) sync all tagged pages 577 */ 578 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 579 pages = LONG_MAX; 580 else { 581 pages = min(bdi->avg_write_bandwidth / 2, 582 global_dirty_limit / DIRTY_SCOPE); 583 pages = min(pages, work->nr_pages); 584 pages = round_down(pages + MIN_WRITEBACK_PAGES, 585 MIN_WRITEBACK_PAGES); 586 } 587 588 return pages; 589 } 590 591 /* 592 * Write a portion of b_io inodes which belong to @sb. 593 * 594 * Return the number of pages and/or inodes written. 595 */ 596 static long writeback_sb_inodes(struct super_block *sb, 597 struct bdi_writeback *wb, 598 struct wb_writeback_work *work) 599 { 600 struct writeback_control wbc = { 601 .sync_mode = work->sync_mode, 602 .tagged_writepages = work->tagged_writepages, 603 .for_kupdate = work->for_kupdate, 604 .for_background = work->for_background, 605 .for_sync = work->for_sync, 606 .range_cyclic = work->range_cyclic, 607 .range_start = 0, 608 .range_end = LLONG_MAX, 609 }; 610 unsigned long start_time = jiffies; 611 long write_chunk; 612 long wrote = 0; /* count both pages and inodes */ 613 614 while (!list_empty(&wb->b_io)) { 615 struct inode *inode = wb_inode(wb->b_io.prev); 616 617 if (inode->i_sb != sb) { 618 if (work->sb) { 619 /* 620 * We only want to write back data for this 621 * superblock, move all inodes not belonging 622 * to it back onto the dirty list. 623 */ 624 redirty_tail(inode, wb); 625 continue; 626 } 627 628 /* 629 * The inode belongs to a different superblock. 630 * Bounce back to the caller to unpin this and 631 * pin the next superblock. 632 */ 633 break; 634 } 635 636 /* 637 * Don't bother with new inodes or inodes being freed, first 638 * kind does not need periodic writeout yet, and for the latter 639 * kind writeout is handled by the freer. 640 */ 641 spin_lock(&inode->i_lock); 642 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 643 spin_unlock(&inode->i_lock); 644 redirty_tail(inode, wb); 645 continue; 646 } 647 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 648 /* 649 * If this inode is locked for writeback and we are not 650 * doing writeback-for-data-integrity, move it to 651 * b_more_io so that writeback can proceed with the 652 * other inodes on s_io. 653 * 654 * We'll have another go at writing back this inode 655 * when we completed a full scan of b_io. 656 */ 657 spin_unlock(&inode->i_lock); 658 requeue_io(inode, wb); 659 trace_writeback_sb_inodes_requeue(inode); 660 continue; 661 } 662 spin_unlock(&wb->list_lock); 663 664 /* 665 * We already requeued the inode if it had I_SYNC set and we 666 * are doing WB_SYNC_NONE writeback. So this catches only the 667 * WB_SYNC_ALL case. 668 */ 669 if (inode->i_state & I_SYNC) { 670 /* Wait for I_SYNC. This function drops i_lock... */ 671 inode_sleep_on_writeback(inode); 672 /* Inode may be gone, start again */ 673 spin_lock(&wb->list_lock); 674 continue; 675 } 676 inode->i_state |= I_SYNC; 677 spin_unlock(&inode->i_lock); 678 679 write_chunk = writeback_chunk_size(wb->bdi, work); 680 wbc.nr_to_write = write_chunk; 681 wbc.pages_skipped = 0; 682 683 /* 684 * We use I_SYNC to pin the inode in memory. While it is set 685 * evict_inode() will wait so the inode cannot be freed. 686 */ 687 __writeback_single_inode(inode, &wbc); 688 689 work->nr_pages -= write_chunk - wbc.nr_to_write; 690 wrote += write_chunk - wbc.nr_to_write; 691 spin_lock(&wb->list_lock); 692 spin_lock(&inode->i_lock); 693 if (!(inode->i_state & I_DIRTY)) 694 wrote++; 695 requeue_inode(inode, wb, &wbc); 696 inode_sync_complete(inode); 697 spin_unlock(&inode->i_lock); 698 cond_resched_lock(&wb->list_lock); 699 /* 700 * bail out to wb_writeback() often enough to check 701 * background threshold and other termination conditions. 702 */ 703 if (wrote) { 704 if (time_is_before_jiffies(start_time + HZ / 10UL)) 705 break; 706 if (work->nr_pages <= 0) 707 break; 708 } 709 } 710 return wrote; 711 } 712 713 static long __writeback_inodes_wb(struct bdi_writeback *wb, 714 struct wb_writeback_work *work) 715 { 716 unsigned long start_time = jiffies; 717 long wrote = 0; 718 719 while (!list_empty(&wb->b_io)) { 720 struct inode *inode = wb_inode(wb->b_io.prev); 721 struct super_block *sb = inode->i_sb; 722 723 if (!grab_super_passive(sb)) { 724 /* 725 * grab_super_passive() may fail consistently due to 726 * s_umount being grabbed by someone else. Don't use 727 * requeue_io() to avoid busy retrying the inode/sb. 728 */ 729 redirty_tail(inode, wb); 730 continue; 731 } 732 wrote += writeback_sb_inodes(sb, wb, work); 733 drop_super(sb); 734 735 /* refer to the same tests at the end of writeback_sb_inodes */ 736 if (wrote) { 737 if (time_is_before_jiffies(start_time + HZ / 10UL)) 738 break; 739 if (work->nr_pages <= 0) 740 break; 741 } 742 } 743 /* Leave any unwritten inodes on b_io */ 744 return wrote; 745 } 746 747 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 748 enum wb_reason reason) 749 { 750 struct wb_writeback_work work = { 751 .nr_pages = nr_pages, 752 .sync_mode = WB_SYNC_NONE, 753 .range_cyclic = 1, 754 .reason = reason, 755 }; 756 757 spin_lock(&wb->list_lock); 758 if (list_empty(&wb->b_io)) 759 queue_io(wb, &work); 760 __writeback_inodes_wb(wb, &work); 761 spin_unlock(&wb->list_lock); 762 763 return nr_pages - work.nr_pages; 764 } 765 766 static bool over_bground_thresh(struct backing_dev_info *bdi) 767 { 768 unsigned long background_thresh, dirty_thresh; 769 770 global_dirty_limits(&background_thresh, &dirty_thresh); 771 772 if (global_page_state(NR_FILE_DIRTY) + 773 global_page_state(NR_UNSTABLE_NFS) > background_thresh) 774 return true; 775 776 if (bdi_stat(bdi, BDI_RECLAIMABLE) > 777 bdi_dirty_limit(bdi, background_thresh)) 778 return true; 779 780 return false; 781 } 782 783 /* 784 * Called under wb->list_lock. If there are multiple wb per bdi, 785 * only the flusher working on the first wb should do it. 786 */ 787 static void wb_update_bandwidth(struct bdi_writeback *wb, 788 unsigned long start_time) 789 { 790 __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); 791 } 792 793 /* 794 * Explicit flushing or periodic writeback of "old" data. 795 * 796 * Define "old": the first time one of an inode's pages is dirtied, we mark the 797 * dirtying-time in the inode's address_space. So this periodic writeback code 798 * just walks the superblock inode list, writing back any inodes which are 799 * older than a specific point in time. 800 * 801 * Try to run once per dirty_writeback_interval. But if a writeback event 802 * takes longer than a dirty_writeback_interval interval, then leave a 803 * one-second gap. 804 * 805 * older_than_this takes precedence over nr_to_write. So we'll only write back 806 * all dirty pages if they are all attached to "old" mappings. 807 */ 808 static long wb_writeback(struct bdi_writeback *wb, 809 struct wb_writeback_work *work) 810 { 811 unsigned long wb_start = jiffies; 812 long nr_pages = work->nr_pages; 813 unsigned long oldest_jif; 814 struct inode *inode; 815 long progress; 816 817 oldest_jif = jiffies; 818 work->older_than_this = &oldest_jif; 819 820 spin_lock(&wb->list_lock); 821 for (;;) { 822 /* 823 * Stop writeback when nr_pages has been consumed 824 */ 825 if (work->nr_pages <= 0) 826 break; 827 828 /* 829 * Background writeout and kupdate-style writeback may 830 * run forever. Stop them if there is other work to do 831 * so that e.g. sync can proceed. They'll be restarted 832 * after the other works are all done. 833 */ 834 if ((work->for_background || work->for_kupdate) && 835 !list_empty(&wb->bdi->work_list)) 836 break; 837 838 /* 839 * For background writeout, stop when we are below the 840 * background dirty threshold 841 */ 842 if (work->for_background && !over_bground_thresh(wb->bdi)) 843 break; 844 845 /* 846 * Kupdate and background works are special and we want to 847 * include all inodes that need writing. Livelock avoidance is 848 * handled by these works yielding to any other work so we are 849 * safe. 850 */ 851 if (work->for_kupdate) { 852 oldest_jif = jiffies - 853 msecs_to_jiffies(dirty_expire_interval * 10); 854 } else if (work->for_background) 855 oldest_jif = jiffies; 856 857 trace_writeback_start(wb->bdi, work); 858 if (list_empty(&wb->b_io)) 859 queue_io(wb, work); 860 if (work->sb) 861 progress = writeback_sb_inodes(work->sb, wb, work); 862 else 863 progress = __writeback_inodes_wb(wb, work); 864 trace_writeback_written(wb->bdi, work); 865 866 wb_update_bandwidth(wb, wb_start); 867 868 /* 869 * Did we write something? Try for more 870 * 871 * Dirty inodes are moved to b_io for writeback in batches. 872 * The completion of the current batch does not necessarily 873 * mean the overall work is done. So we keep looping as long 874 * as made some progress on cleaning pages or inodes. 875 */ 876 if (progress) 877 continue; 878 /* 879 * No more inodes for IO, bail 880 */ 881 if (list_empty(&wb->b_more_io)) 882 break; 883 /* 884 * Nothing written. Wait for some inode to 885 * become available for writeback. Otherwise 886 * we'll just busyloop. 887 */ 888 if (!list_empty(&wb->b_more_io)) { 889 trace_writeback_wait(wb->bdi, work); 890 inode = wb_inode(wb->b_more_io.prev); 891 spin_lock(&inode->i_lock); 892 spin_unlock(&wb->list_lock); 893 /* This function drops i_lock... */ 894 inode_sleep_on_writeback(inode); 895 spin_lock(&wb->list_lock); 896 } 897 } 898 spin_unlock(&wb->list_lock); 899 900 return nr_pages - work->nr_pages; 901 } 902 903 /* 904 * Return the next wb_writeback_work struct that hasn't been processed yet. 905 */ 906 static struct wb_writeback_work * 907 get_next_work_item(struct backing_dev_info *bdi) 908 { 909 struct wb_writeback_work *work = NULL; 910 911 spin_lock_bh(&bdi->wb_lock); 912 if (!list_empty(&bdi->work_list)) { 913 work = list_entry(bdi->work_list.next, 914 struct wb_writeback_work, list); 915 list_del_init(&work->list); 916 } 917 spin_unlock_bh(&bdi->wb_lock); 918 return work; 919 } 920 921 /* 922 * Add in the number of potentially dirty inodes, because each inode 923 * write can dirty pagecache in the underlying blockdev. 924 */ 925 static unsigned long get_nr_dirty_pages(void) 926 { 927 return global_page_state(NR_FILE_DIRTY) + 928 global_page_state(NR_UNSTABLE_NFS) + 929 get_nr_dirty_inodes(); 930 } 931 932 static long wb_check_background_flush(struct bdi_writeback *wb) 933 { 934 if (over_bground_thresh(wb->bdi)) { 935 936 struct wb_writeback_work work = { 937 .nr_pages = LONG_MAX, 938 .sync_mode = WB_SYNC_NONE, 939 .for_background = 1, 940 .range_cyclic = 1, 941 .reason = WB_REASON_BACKGROUND, 942 }; 943 944 return wb_writeback(wb, &work); 945 } 946 947 return 0; 948 } 949 950 static long wb_check_old_data_flush(struct bdi_writeback *wb) 951 { 952 unsigned long expired; 953 long nr_pages; 954 955 /* 956 * When set to zero, disable periodic writeback 957 */ 958 if (!dirty_writeback_interval) 959 return 0; 960 961 expired = wb->last_old_flush + 962 msecs_to_jiffies(dirty_writeback_interval * 10); 963 if (time_before(jiffies, expired)) 964 return 0; 965 966 wb->last_old_flush = jiffies; 967 nr_pages = get_nr_dirty_pages(); 968 969 if (nr_pages) { 970 struct wb_writeback_work work = { 971 .nr_pages = nr_pages, 972 .sync_mode = WB_SYNC_NONE, 973 .for_kupdate = 1, 974 .range_cyclic = 1, 975 .reason = WB_REASON_PERIODIC, 976 }; 977 978 return wb_writeback(wb, &work); 979 } 980 981 return 0; 982 } 983 984 /* 985 * Retrieve work items and do the writeback they describe 986 */ 987 static long wb_do_writeback(struct bdi_writeback *wb) 988 { 989 struct backing_dev_info *bdi = wb->bdi; 990 struct wb_writeback_work *work; 991 long wrote = 0; 992 993 set_bit(BDI_writeback_running, &wb->bdi->state); 994 while ((work = get_next_work_item(bdi)) != NULL) { 995 996 trace_writeback_exec(bdi, work); 997 998 wrote += wb_writeback(wb, work); 999 1000 /* 1001 * Notify the caller of completion if this is a synchronous 1002 * work item, otherwise just free it. 1003 */ 1004 if (work->done) 1005 complete(work->done); 1006 else 1007 kfree(work); 1008 } 1009 1010 /* 1011 * Check for periodic writeback, kupdated() style 1012 */ 1013 wrote += wb_check_old_data_flush(wb); 1014 wrote += wb_check_background_flush(wb); 1015 clear_bit(BDI_writeback_running, &wb->bdi->state); 1016 1017 return wrote; 1018 } 1019 1020 /* 1021 * Handle writeback of dirty data for the device backed by this bdi. Also 1022 * reschedules periodically and does kupdated style flushing. 1023 */ 1024 void bdi_writeback_workfn(struct work_struct *work) 1025 { 1026 struct bdi_writeback *wb = container_of(to_delayed_work(work), 1027 struct bdi_writeback, dwork); 1028 struct backing_dev_info *bdi = wb->bdi; 1029 long pages_written; 1030 1031 set_worker_desc("flush-%s", dev_name(bdi->dev)); 1032 current->flags |= PF_SWAPWRITE; 1033 1034 if (likely(!current_is_workqueue_rescuer() || 1035 !test_bit(BDI_registered, &bdi->state))) { 1036 /* 1037 * The normal path. Keep writing back @bdi until its 1038 * work_list is empty. Note that this path is also taken 1039 * if @bdi is shutting down even when we're running off the 1040 * rescuer as work_list needs to be drained. 1041 */ 1042 do { 1043 pages_written = wb_do_writeback(wb); 1044 trace_writeback_pages_written(pages_written); 1045 } while (!list_empty(&bdi->work_list)); 1046 } else { 1047 /* 1048 * bdi_wq can't get enough workers and we're running off 1049 * the emergency worker. Don't hog it. Hopefully, 1024 is 1050 * enough for efficient IO. 1051 */ 1052 pages_written = writeback_inodes_wb(&bdi->wb, 1024, 1053 WB_REASON_FORKER_THREAD); 1054 trace_writeback_pages_written(pages_written); 1055 } 1056 1057 if (!list_empty(&bdi->work_list)) 1058 mod_delayed_work(bdi_wq, &wb->dwork, 0); 1059 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) 1060 bdi_wakeup_thread_delayed(bdi); 1061 1062 current->flags &= ~PF_SWAPWRITE; 1063 } 1064 1065 /* 1066 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 1067 * the whole world. 1068 */ 1069 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) 1070 { 1071 struct backing_dev_info *bdi; 1072 1073 if (!nr_pages) 1074 nr_pages = get_nr_dirty_pages(); 1075 1076 rcu_read_lock(); 1077 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1078 if (!bdi_has_dirty_io(bdi)) 1079 continue; 1080 __bdi_start_writeback(bdi, nr_pages, false, reason); 1081 } 1082 rcu_read_unlock(); 1083 } 1084 1085 static noinline void block_dump___mark_inode_dirty(struct inode *inode) 1086 { 1087 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 1088 struct dentry *dentry; 1089 const char *name = "?"; 1090 1091 dentry = d_find_alias(inode); 1092 if (dentry) { 1093 spin_lock(&dentry->d_lock); 1094 name = (const char *) dentry->d_name.name; 1095 } 1096 printk(KERN_DEBUG 1097 "%s(%d): dirtied inode %lu (%s) on %s\n", 1098 current->comm, task_pid_nr(current), inode->i_ino, 1099 name, inode->i_sb->s_id); 1100 if (dentry) { 1101 spin_unlock(&dentry->d_lock); 1102 dput(dentry); 1103 } 1104 } 1105 } 1106 1107 /** 1108 * __mark_inode_dirty - internal function 1109 * @inode: inode to mark 1110 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 1111 * Mark an inode as dirty. Callers should use mark_inode_dirty or 1112 * mark_inode_dirty_sync. 1113 * 1114 * Put the inode on the super block's dirty list. 1115 * 1116 * CAREFUL! We mark it dirty unconditionally, but move it onto the 1117 * dirty list only if it is hashed or if it refers to a blockdev. 1118 * If it was not hashed, it will never be added to the dirty list 1119 * even if it is later hashed, as it will have been marked dirty already. 1120 * 1121 * In short, make sure you hash any inodes _before_ you start marking 1122 * them dirty. 1123 * 1124 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 1125 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 1126 * the kernel-internal blockdev inode represents the dirtying time of the 1127 * blockdev's pages. This is why for I_DIRTY_PAGES we always use 1128 * page->mapping->host, so the page-dirtying time is recorded in the internal 1129 * blockdev inode. 1130 */ 1131 void __mark_inode_dirty(struct inode *inode, int flags) 1132 { 1133 struct super_block *sb = inode->i_sb; 1134 struct backing_dev_info *bdi = NULL; 1135 1136 /* 1137 * Don't do this for I_DIRTY_PAGES - that doesn't actually 1138 * dirty the inode itself 1139 */ 1140 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 1141 trace_writeback_dirty_inode_start(inode, flags); 1142 1143 if (sb->s_op->dirty_inode) 1144 sb->s_op->dirty_inode(inode, flags); 1145 1146 trace_writeback_dirty_inode(inode, flags); 1147 } 1148 1149 /* 1150 * make sure that changes are seen by all cpus before we test i_state 1151 * -- mikulas 1152 */ 1153 smp_mb(); 1154 1155 /* avoid the locking if we can */ 1156 if ((inode->i_state & flags) == flags) 1157 return; 1158 1159 if (unlikely(block_dump)) 1160 block_dump___mark_inode_dirty(inode); 1161 1162 spin_lock(&inode->i_lock); 1163 if ((inode->i_state & flags) != flags) { 1164 const int was_dirty = inode->i_state & I_DIRTY; 1165 1166 inode->i_state |= flags; 1167 1168 /* 1169 * If the inode is being synced, just update its dirty state. 1170 * The unlocker will place the inode on the appropriate 1171 * superblock list, based upon its state. 1172 */ 1173 if (inode->i_state & I_SYNC) 1174 goto out_unlock_inode; 1175 1176 /* 1177 * Only add valid (hashed) inodes to the superblock's 1178 * dirty list. Add blockdev inodes as well. 1179 */ 1180 if (!S_ISBLK(inode->i_mode)) { 1181 if (inode_unhashed(inode)) 1182 goto out_unlock_inode; 1183 } 1184 if (inode->i_state & I_FREEING) 1185 goto out_unlock_inode; 1186 1187 /* 1188 * If the inode was already on b_dirty/b_io/b_more_io, don't 1189 * reposition it (that would break b_dirty time-ordering). 1190 */ 1191 if (!was_dirty) { 1192 bool wakeup_bdi = false; 1193 bdi = inode_to_bdi(inode); 1194 1195 spin_unlock(&inode->i_lock); 1196 spin_lock(&bdi->wb.list_lock); 1197 if (bdi_cap_writeback_dirty(bdi)) { 1198 WARN(!test_bit(BDI_registered, &bdi->state), 1199 "bdi-%s not registered\n", bdi->name); 1200 1201 /* 1202 * If this is the first dirty inode for this 1203 * bdi, we have to wake-up the corresponding 1204 * bdi thread to make sure background 1205 * write-back happens later. 1206 */ 1207 if (!wb_has_dirty_io(&bdi->wb)) 1208 wakeup_bdi = true; 1209 } 1210 1211 inode->dirtied_when = jiffies; 1212 list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1213 spin_unlock(&bdi->wb.list_lock); 1214 1215 if (wakeup_bdi) 1216 bdi_wakeup_thread_delayed(bdi); 1217 return; 1218 } 1219 } 1220 out_unlock_inode: 1221 spin_unlock(&inode->i_lock); 1222 1223 } 1224 EXPORT_SYMBOL(__mark_inode_dirty); 1225 1226 static void wait_sb_inodes(struct super_block *sb) 1227 { 1228 struct inode *inode, *old_inode = NULL; 1229 1230 /* 1231 * We need to be protected against the filesystem going from 1232 * r/o to r/w or vice versa. 1233 */ 1234 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1235 1236 spin_lock(&inode_sb_list_lock); 1237 1238 /* 1239 * Data integrity sync. Must wait for all pages under writeback, 1240 * because there may have been pages dirtied before our sync 1241 * call, but which had writeout started before we write it out. 1242 * In which case, the inode may not be on the dirty list, but 1243 * we still have to wait for that writeout. 1244 */ 1245 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1246 struct address_space *mapping = inode->i_mapping; 1247 1248 spin_lock(&inode->i_lock); 1249 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1250 (mapping->nrpages == 0)) { 1251 spin_unlock(&inode->i_lock); 1252 continue; 1253 } 1254 __iget(inode); 1255 spin_unlock(&inode->i_lock); 1256 spin_unlock(&inode_sb_list_lock); 1257 1258 /* 1259 * We hold a reference to 'inode' so it couldn't have been 1260 * removed from s_inodes list while we dropped the 1261 * inode_sb_list_lock. We cannot iput the inode now as we can 1262 * be holding the last reference and we cannot iput it under 1263 * inode_sb_list_lock. So we keep the reference and iput it 1264 * later. 1265 */ 1266 iput(old_inode); 1267 old_inode = inode; 1268 1269 filemap_fdatawait(mapping); 1270 1271 cond_resched(); 1272 1273 spin_lock(&inode_sb_list_lock); 1274 } 1275 spin_unlock(&inode_sb_list_lock); 1276 iput(old_inode); 1277 } 1278 1279 /** 1280 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1281 * @sb: the superblock 1282 * @nr: the number of pages to write 1283 * @reason: reason why some writeback work initiated 1284 * 1285 * Start writeback on some inodes on this super_block. No guarantees are made 1286 * on how many (if any) will be written, and this function does not wait 1287 * for IO completion of submitted IO. 1288 */ 1289 void writeback_inodes_sb_nr(struct super_block *sb, 1290 unsigned long nr, 1291 enum wb_reason reason) 1292 { 1293 DECLARE_COMPLETION_ONSTACK(done); 1294 struct wb_writeback_work work = { 1295 .sb = sb, 1296 .sync_mode = WB_SYNC_NONE, 1297 .tagged_writepages = 1, 1298 .done = &done, 1299 .nr_pages = nr, 1300 .reason = reason, 1301 }; 1302 1303 if (sb->s_bdi == &noop_backing_dev_info) 1304 return; 1305 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1306 bdi_queue_work(sb->s_bdi, &work); 1307 wait_for_completion(&done); 1308 } 1309 EXPORT_SYMBOL(writeback_inodes_sb_nr); 1310 1311 /** 1312 * writeback_inodes_sb - writeback dirty inodes from given super_block 1313 * @sb: the superblock 1314 * @reason: reason why some writeback work was initiated 1315 * 1316 * Start writeback on some inodes on this super_block. No guarantees are made 1317 * on how many (if any) will be written, and this function does not wait 1318 * for IO completion of submitted IO. 1319 */ 1320 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 1321 { 1322 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 1323 } 1324 EXPORT_SYMBOL(writeback_inodes_sb); 1325 1326 /** 1327 * try_to_writeback_inodes_sb_nr - try to start writeback if none underway 1328 * @sb: the superblock 1329 * @nr: the number of pages to write 1330 * @reason: the reason of writeback 1331 * 1332 * Invoke writeback_inodes_sb_nr if no writeback is currently underway. 1333 * Returns 1 if writeback was started, 0 if not. 1334 */ 1335 int try_to_writeback_inodes_sb_nr(struct super_block *sb, 1336 unsigned long nr, 1337 enum wb_reason reason) 1338 { 1339 if (writeback_in_progress(sb->s_bdi)) 1340 return 1; 1341 1342 if (!down_read_trylock(&sb->s_umount)) 1343 return 0; 1344 1345 writeback_inodes_sb_nr(sb, nr, reason); 1346 up_read(&sb->s_umount); 1347 return 1; 1348 } 1349 EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); 1350 1351 /** 1352 * try_to_writeback_inodes_sb - try to start writeback if none underway 1353 * @sb: the superblock 1354 * @reason: reason why some writeback work was initiated 1355 * 1356 * Implement by try_to_writeback_inodes_sb_nr() 1357 * Returns 1 if writeback was started, 0 if not. 1358 */ 1359 int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 1360 { 1361 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 1362 } 1363 EXPORT_SYMBOL(try_to_writeback_inodes_sb); 1364 1365 /** 1366 * sync_inodes_sb - sync sb inode pages 1367 * @sb: the superblock 1368 * 1369 * This function writes and waits on any dirty inode belonging to this 1370 * super_block. 1371 */ 1372 void sync_inodes_sb(struct super_block *sb) 1373 { 1374 DECLARE_COMPLETION_ONSTACK(done); 1375 struct wb_writeback_work work = { 1376 .sb = sb, 1377 .sync_mode = WB_SYNC_ALL, 1378 .nr_pages = LONG_MAX, 1379 .range_cyclic = 0, 1380 .done = &done, 1381 .reason = WB_REASON_SYNC, 1382 .for_sync = 1, 1383 }; 1384 1385 /* Nothing to do? */ 1386 if (sb->s_bdi == &noop_backing_dev_info) 1387 return; 1388 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1389 1390 bdi_queue_work(sb->s_bdi, &work); 1391 wait_for_completion(&done); 1392 1393 wait_sb_inodes(sb); 1394 } 1395 EXPORT_SYMBOL(sync_inodes_sb); 1396 1397 /** 1398 * write_inode_now - write an inode to disk 1399 * @inode: inode to write to disk 1400 * @sync: whether the write should be synchronous or not 1401 * 1402 * This function commits an inode to disk immediately if it is dirty. This is 1403 * primarily needed by knfsd. 1404 * 1405 * The caller must either have a ref on the inode or must have set I_WILL_FREE. 1406 */ 1407 int write_inode_now(struct inode *inode, int sync) 1408 { 1409 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1410 struct writeback_control wbc = { 1411 .nr_to_write = LONG_MAX, 1412 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1413 .range_start = 0, 1414 .range_end = LLONG_MAX, 1415 }; 1416 1417 if (!mapping_cap_writeback_dirty(inode->i_mapping)) 1418 wbc.nr_to_write = 0; 1419 1420 might_sleep(); 1421 return writeback_single_inode(inode, wb, &wbc); 1422 } 1423 EXPORT_SYMBOL(write_inode_now); 1424 1425 /** 1426 * sync_inode - write an inode and its pages to disk. 1427 * @inode: the inode to sync 1428 * @wbc: controls the writeback mode 1429 * 1430 * sync_inode() will write an inode and its pages to disk. It will also 1431 * correctly update the inode on its superblock's dirty inode lists and will 1432 * update inode->i_state. 1433 * 1434 * The caller must have a ref on the inode. 1435 */ 1436 int sync_inode(struct inode *inode, struct writeback_control *wbc) 1437 { 1438 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc); 1439 } 1440 EXPORT_SYMBOL(sync_inode); 1441 1442 /** 1443 * sync_inode_metadata - write an inode to disk 1444 * @inode: the inode to sync 1445 * @wait: wait for I/O to complete. 1446 * 1447 * Write an inode to disk and adjust its dirty state after completion. 1448 * 1449 * Note: only writes the actual inode, no associated data or other metadata. 1450 */ 1451 int sync_inode_metadata(struct inode *inode, int wait) 1452 { 1453 struct writeback_control wbc = { 1454 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1455 .nr_to_write = 0, /* metadata-only */ 1456 }; 1457 1458 return sync_inode(inode, &wbc); 1459 } 1460 EXPORT_SYMBOL(sync_inode_metadata); 1461