1 /* 2 * fs/fs-writeback.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * 6 * Contains all the functions related to writing back and waiting 7 * upon dirty inodes against superblocks, and writing back dirty 8 * pages against inodes. ie: data writeback. Writeout of the 9 * inode itself is not handled here. 10 * 11 * 10Apr2002 Andrew Morton 12 * Split out of fs/inode.c 13 * Additions for address_space-based writeback 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/spinlock.h> 19 #include <linux/sched.h> 20 #include <linux/fs.h> 21 #include <linux/mm.h> 22 #include <linux/writeback.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/buffer_head.h> 26 #include "internal.h" 27 28 29 /** 30 * writeback_acquire - attempt to get exclusive writeback access to a device 31 * @bdi: the device's backing_dev_info structure 32 * 33 * It is a waste of resources to have more than one pdflush thread blocked on 34 * a single request queue. Exclusion at the request_queue level is obtained 35 * via a flag in the request_queue's backing_dev_info.state. 36 * 37 * Non-request_queue-backed address_spaces will share default_backing_dev_info, 38 * unless they implement their own. Which is somewhat inefficient, as this 39 * may prevent concurrent writeback against multiple devices. 40 */ 41 static int writeback_acquire(struct backing_dev_info *bdi) 42 { 43 return !test_and_set_bit(BDI_pdflush, &bdi->state); 44 } 45 46 /** 47 * writeback_in_progress - determine whether there is writeback in progress 48 * @bdi: the device's backing_dev_info structure. 49 * 50 * Determine whether there is writeback in progress against a backing device. 51 */ 52 int writeback_in_progress(struct backing_dev_info *bdi) 53 { 54 return test_bit(BDI_pdflush, &bdi->state); 55 } 56 57 /** 58 * writeback_release - relinquish exclusive writeback access against a device. 59 * @bdi: the device's backing_dev_info structure 60 */ 61 static void writeback_release(struct backing_dev_info *bdi) 62 { 63 BUG_ON(!writeback_in_progress(bdi)); 64 clear_bit(BDI_pdflush, &bdi->state); 65 } 66 67 /** 68 * __mark_inode_dirty - internal function 69 * @inode: inode to mark 70 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 71 * Mark an inode as dirty. Callers should use mark_inode_dirty or 72 * mark_inode_dirty_sync. 73 * 74 * Put the inode on the super block's dirty list. 75 * 76 * CAREFUL! We mark it dirty unconditionally, but move it onto the 77 * dirty list only if it is hashed or if it refers to a blockdev. 78 * If it was not hashed, it will never be added to the dirty list 79 * even if it is later hashed, as it will have been marked dirty already. 80 * 81 * In short, make sure you hash any inodes _before_ you start marking 82 * them dirty. 83 * 84 * This function *must* be atomic for the I_DIRTY_PAGES case - 85 * set_page_dirty() is called under spinlock in several places. 86 * 87 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 88 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 89 * the kernel-internal blockdev inode represents the dirtying time of the 90 * blockdev's pages. This is why for I_DIRTY_PAGES we always use 91 * page->mapping->host, so the page-dirtying time is recorded in the internal 92 * blockdev inode. 93 */ 94 void __mark_inode_dirty(struct inode *inode, int flags) 95 { 96 struct super_block *sb = inode->i_sb; 97 98 /* 99 * Don't do this for I_DIRTY_PAGES - that doesn't actually 100 * dirty the inode itself 101 */ 102 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 103 if (sb->s_op->dirty_inode) 104 sb->s_op->dirty_inode(inode); 105 } 106 107 /* 108 * make sure that changes are seen by all cpus before we test i_state 109 * -- mikulas 110 */ 111 smp_mb(); 112 113 /* avoid the locking if we can */ 114 if ((inode->i_state & flags) == flags) 115 return; 116 117 if (unlikely(block_dump)) { 118 struct dentry *dentry = NULL; 119 const char *name = "?"; 120 121 if (!list_empty(&inode->i_dentry)) { 122 dentry = list_entry(inode->i_dentry.next, 123 struct dentry, d_alias); 124 if (dentry && dentry->d_name.name) 125 name = (const char *) dentry->d_name.name; 126 } 127 128 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) 129 printk(KERN_DEBUG 130 "%s(%d): dirtied inode %lu (%s) on %s\n", 131 current->comm, task_pid_nr(current), inode->i_ino, 132 name, inode->i_sb->s_id); 133 } 134 135 spin_lock(&inode_lock); 136 if ((inode->i_state & flags) != flags) { 137 const int was_dirty = inode->i_state & I_DIRTY; 138 139 inode->i_state |= flags; 140 141 /* 142 * If the inode is being synced, just update its dirty state. 143 * The unlocker will place the inode on the appropriate 144 * superblock list, based upon its state. 145 */ 146 if (inode->i_state & I_SYNC) 147 goto out; 148 149 /* 150 * Only add valid (hashed) inodes to the superblock's 151 * dirty list. Add blockdev inodes as well. 152 */ 153 if (!S_ISBLK(inode->i_mode)) { 154 if (hlist_unhashed(&inode->i_hash)) 155 goto out; 156 } 157 if (inode->i_state & (I_FREEING|I_CLEAR)) 158 goto out; 159 160 /* 161 * If the inode was already on s_dirty/s_io/s_more_io, don't 162 * reposition it (that would break s_dirty time-ordering). 163 */ 164 if (!was_dirty) { 165 inode->dirtied_when = jiffies; 166 list_move(&inode->i_list, &sb->s_dirty); 167 } 168 } 169 out: 170 spin_unlock(&inode_lock); 171 } 172 173 EXPORT_SYMBOL(__mark_inode_dirty); 174 175 static int write_inode(struct inode *inode, int sync) 176 { 177 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 178 return inode->i_sb->s_op->write_inode(inode, sync); 179 return 0; 180 } 181 182 /* 183 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 184 * furthest end of its superblock's dirty-inode list. 185 * 186 * Before stamping the inode's ->dirtied_when, we check to see whether it is 187 * already the most-recently-dirtied inode on the s_dirty list. If that is 188 * the case then the inode must have been redirtied while it was being written 189 * out and we don't reset its dirtied_when. 190 */ 191 static void redirty_tail(struct inode *inode) 192 { 193 struct super_block *sb = inode->i_sb; 194 195 if (!list_empty(&sb->s_dirty)) { 196 struct inode *tail_inode; 197 198 tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list); 199 if (!time_after_eq(inode->dirtied_when, 200 tail_inode->dirtied_when)) 201 inode->dirtied_when = jiffies; 202 } 203 list_move(&inode->i_list, &sb->s_dirty); 204 } 205 206 /* 207 * requeue inode for re-scanning after sb->s_io list is exhausted. 208 */ 209 static void requeue_io(struct inode *inode) 210 { 211 list_move(&inode->i_list, &inode->i_sb->s_more_io); 212 } 213 214 static void inode_sync_complete(struct inode *inode) 215 { 216 /* 217 * Prevent speculative execution through spin_unlock(&inode_lock); 218 */ 219 smp_mb(); 220 wake_up_bit(&inode->i_state, __I_SYNC); 221 } 222 223 /* 224 * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 225 */ 226 static void move_expired_inodes(struct list_head *delaying_queue, 227 struct list_head *dispatch_queue, 228 unsigned long *older_than_this) 229 { 230 while (!list_empty(delaying_queue)) { 231 struct inode *inode = list_entry(delaying_queue->prev, 232 struct inode, i_list); 233 if (older_than_this && 234 time_after(inode->dirtied_when, *older_than_this)) 235 break; 236 list_move(&inode->i_list, dispatch_queue); 237 } 238 } 239 240 /* 241 * Queue all expired dirty inodes for io, eldest first. 242 */ 243 static void queue_io(struct super_block *sb, 244 unsigned long *older_than_this) 245 { 246 list_splice_init(&sb->s_more_io, sb->s_io.prev); 247 move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this); 248 } 249 250 int sb_has_dirty_inodes(struct super_block *sb) 251 { 252 return !list_empty(&sb->s_dirty) || 253 !list_empty(&sb->s_io) || 254 !list_empty(&sb->s_more_io); 255 } 256 EXPORT_SYMBOL(sb_has_dirty_inodes); 257 258 /* 259 * Write a single inode's dirty pages and inode data out to disk. 260 * If `wait' is set, wait on the writeout. 261 * 262 * The whole writeout design is quite complex and fragile. We want to avoid 263 * starvation of particular inodes when others are being redirtied, prevent 264 * livelocks, etc. 265 * 266 * Called under inode_lock. 267 */ 268 static int 269 __sync_single_inode(struct inode *inode, struct writeback_control *wbc) 270 { 271 unsigned dirty; 272 struct address_space *mapping = inode->i_mapping; 273 int wait = wbc->sync_mode == WB_SYNC_ALL; 274 int ret; 275 276 BUG_ON(inode->i_state & I_SYNC); 277 WARN_ON(inode->i_state & I_NEW); 278 279 /* Set I_SYNC, reset I_DIRTY */ 280 dirty = inode->i_state & I_DIRTY; 281 inode->i_state |= I_SYNC; 282 inode->i_state &= ~I_DIRTY; 283 284 spin_unlock(&inode_lock); 285 286 ret = do_writepages(mapping, wbc); 287 288 /* Don't write the inode if only I_DIRTY_PAGES was set */ 289 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 290 int err = write_inode(inode, wait); 291 if (ret == 0) 292 ret = err; 293 } 294 295 if (wait) { 296 int err = filemap_fdatawait(mapping); 297 if (ret == 0) 298 ret = err; 299 } 300 301 spin_lock(&inode_lock); 302 WARN_ON(inode->i_state & I_NEW); 303 inode->i_state &= ~I_SYNC; 304 if (!(inode->i_state & I_FREEING)) { 305 if (!(inode->i_state & I_DIRTY) && 306 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 307 /* 308 * We didn't write back all the pages. nfs_writepages() 309 * sometimes bales out without doing anything. Redirty 310 * the inode; Move it from s_io onto s_more_io/s_dirty. 311 */ 312 /* 313 * akpm: if the caller was the kupdate function we put 314 * this inode at the head of s_dirty so it gets first 315 * consideration. Otherwise, move it to the tail, for 316 * the reasons described there. I'm not really sure 317 * how much sense this makes. Presumably I had a good 318 * reasons for doing it this way, and I'd rather not 319 * muck with it at present. 320 */ 321 if (wbc->for_kupdate) { 322 /* 323 * For the kupdate function we move the inode 324 * to s_more_io so it will get more writeout as 325 * soon as the queue becomes uncongested. 326 */ 327 inode->i_state |= I_DIRTY_PAGES; 328 if (wbc->nr_to_write <= 0) { 329 /* 330 * slice used up: queue for next turn 331 */ 332 requeue_io(inode); 333 } else { 334 /* 335 * somehow blocked: retry later 336 */ 337 redirty_tail(inode); 338 } 339 } else { 340 /* 341 * Otherwise fully redirty the inode so that 342 * other inodes on this superblock will get some 343 * writeout. Otherwise heavy writing to one 344 * file would indefinitely suspend writeout of 345 * all the other files. 346 */ 347 inode->i_state |= I_DIRTY_PAGES; 348 redirty_tail(inode); 349 } 350 } else if (inode->i_state & I_DIRTY) { 351 /* 352 * Someone redirtied the inode while were writing back 353 * the pages. 354 */ 355 redirty_tail(inode); 356 } else if (atomic_read(&inode->i_count)) { 357 /* 358 * The inode is clean, inuse 359 */ 360 list_move(&inode->i_list, &inode_in_use); 361 } else { 362 /* 363 * The inode is clean, unused 364 */ 365 list_move(&inode->i_list, &inode_unused); 366 } 367 } 368 inode_sync_complete(inode); 369 return ret; 370 } 371 372 /* 373 * Write out an inode's dirty pages. Called under inode_lock. Either the 374 * caller has ref on the inode (either via __iget or via syscall against an fd) 375 * or the inode has I_WILL_FREE set (via generic_forget_inode) 376 */ 377 static int 378 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 379 { 380 wait_queue_head_t *wqh; 381 382 if (!atomic_read(&inode->i_count)) 383 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 384 else 385 WARN_ON(inode->i_state & I_WILL_FREE); 386 387 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) { 388 /* 389 * We're skipping this inode because it's locked, and we're not 390 * doing writeback-for-data-integrity. Move it to s_more_io so 391 * that writeback can proceed with the other inodes on s_io. 392 * We'll have another go at writing back this inode when we 393 * completed a full scan of s_io. 394 */ 395 requeue_io(inode); 396 return 0; 397 } 398 399 /* 400 * It's a data-integrity sync. We must wait. 401 */ 402 if (inode->i_state & I_SYNC) { 403 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 404 405 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 406 do { 407 spin_unlock(&inode_lock); 408 __wait_on_bit(wqh, &wq, inode_wait, 409 TASK_UNINTERRUPTIBLE); 410 spin_lock(&inode_lock); 411 } while (inode->i_state & I_SYNC); 412 } 413 return __sync_single_inode(inode, wbc); 414 } 415 416 /* 417 * Write out a superblock's list of dirty inodes. A wait will be performed 418 * upon no inodes, all inodes or the final one, depending upon sync_mode. 419 * 420 * If older_than_this is non-NULL, then only write out inodes which 421 * had their first dirtying at a time earlier than *older_than_this. 422 * 423 * If we're a pdlfush thread, then implement pdflush collision avoidance 424 * against the entire list. 425 * 426 * If `bdi' is non-zero then we're being asked to writeback a specific queue. 427 * This function assumes that the blockdev superblock's inodes are backed by 428 * a variety of queues, so all inodes are searched. For other superblocks, 429 * assume that all inodes are backed by the same queue. 430 * 431 * FIXME: this linear search could get expensive with many fileystems. But 432 * how to fix? We need to go from an address_space to all inodes which share 433 * a queue with that address_space. (Easy: have a global "dirty superblocks" 434 * list). 435 * 436 * The inodes to be written are parked on sb->s_io. They are moved back onto 437 * sb->s_dirty as they are selected for writing. This way, none can be missed 438 * on the writer throttling path, and we get decent balancing between many 439 * throttled threads: we don't want them all piling up on inode_sync_wait. 440 */ 441 void generic_sync_sb_inodes(struct super_block *sb, 442 struct writeback_control *wbc) 443 { 444 const unsigned long start = jiffies; /* livelock avoidance */ 445 int sync = wbc->sync_mode == WB_SYNC_ALL; 446 447 spin_lock(&inode_lock); 448 if (!wbc->for_kupdate || list_empty(&sb->s_io)) 449 queue_io(sb, wbc->older_than_this); 450 451 while (!list_empty(&sb->s_io)) { 452 struct inode *inode = list_entry(sb->s_io.prev, 453 struct inode, i_list); 454 struct address_space *mapping = inode->i_mapping; 455 struct backing_dev_info *bdi = mapping->backing_dev_info; 456 long pages_skipped; 457 458 if (!bdi_cap_writeback_dirty(bdi)) { 459 redirty_tail(inode); 460 if (sb_is_blkdev_sb(sb)) { 461 /* 462 * Dirty memory-backed blockdev: the ramdisk 463 * driver does this. Skip just this inode 464 */ 465 continue; 466 } 467 /* 468 * Dirty memory-backed inode against a filesystem other 469 * than the kernel-internal bdev filesystem. Skip the 470 * entire superblock. 471 */ 472 break; 473 } 474 475 if (inode->i_state & I_NEW) { 476 requeue_io(inode); 477 continue; 478 } 479 480 if (wbc->nonblocking && bdi_write_congested(bdi)) { 481 wbc->encountered_congestion = 1; 482 if (!sb_is_blkdev_sb(sb)) 483 break; /* Skip a congested fs */ 484 requeue_io(inode); 485 continue; /* Skip a congested blockdev */ 486 } 487 488 if (wbc->bdi && bdi != wbc->bdi) { 489 if (!sb_is_blkdev_sb(sb)) 490 break; /* fs has the wrong queue */ 491 requeue_io(inode); 492 continue; /* blockdev has wrong queue */ 493 } 494 495 /* Was this inode dirtied after sync_sb_inodes was called? */ 496 if (time_after(inode->dirtied_when, start)) 497 break; 498 499 /* Is another pdflush already flushing this queue? */ 500 if (current_is_pdflush() && !writeback_acquire(bdi)) 501 break; 502 503 BUG_ON(inode->i_state & I_FREEING); 504 __iget(inode); 505 pages_skipped = wbc->pages_skipped; 506 __writeback_single_inode(inode, wbc); 507 if (current_is_pdflush()) 508 writeback_release(bdi); 509 if (wbc->pages_skipped != pages_skipped) { 510 /* 511 * writeback is not making progress due to locked 512 * buffers. Skip this inode for now. 513 */ 514 redirty_tail(inode); 515 } 516 spin_unlock(&inode_lock); 517 iput(inode); 518 cond_resched(); 519 spin_lock(&inode_lock); 520 if (wbc->nr_to_write <= 0) { 521 wbc->more_io = 1; 522 break; 523 } 524 if (!list_empty(&sb->s_more_io)) 525 wbc->more_io = 1; 526 } 527 528 if (sync) { 529 struct inode *inode, *old_inode = NULL; 530 531 /* 532 * Data integrity sync. Must wait for all pages under writeback, 533 * because there may have been pages dirtied before our sync 534 * call, but which had writeout started before we write it out. 535 * In which case, the inode may not be on the dirty list, but 536 * we still have to wait for that writeout. 537 */ 538 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 539 struct address_space *mapping; 540 541 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) 542 continue; 543 mapping = inode->i_mapping; 544 if (mapping->nrpages == 0) 545 continue; 546 __iget(inode); 547 spin_unlock(&inode_lock); 548 /* 549 * We hold a reference to 'inode' so it couldn't have 550 * been removed from s_inodes list while we dropped the 551 * inode_lock. We cannot iput the inode now as we can 552 * be holding the last reference and we cannot iput it 553 * under inode_lock. So we keep the reference and iput 554 * it later. 555 */ 556 iput(old_inode); 557 old_inode = inode; 558 559 filemap_fdatawait(mapping); 560 561 cond_resched(); 562 563 spin_lock(&inode_lock); 564 } 565 spin_unlock(&inode_lock); 566 iput(old_inode); 567 } else 568 spin_unlock(&inode_lock); 569 570 return; /* Leave any unwritten inodes on s_io */ 571 } 572 EXPORT_SYMBOL_GPL(generic_sync_sb_inodes); 573 574 static void sync_sb_inodes(struct super_block *sb, 575 struct writeback_control *wbc) 576 { 577 generic_sync_sb_inodes(sb, wbc); 578 } 579 580 /* 581 * Start writeback of dirty pagecache data against all unlocked inodes. 582 * 583 * Note: 584 * We don't need to grab a reference to superblock here. If it has non-empty 585 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed 586 * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all 587 * empty. Since __sync_single_inode() regains inode_lock before it finally moves 588 * inode from superblock lists we are OK. 589 * 590 * If `older_than_this' is non-zero then only flush inodes which have a 591 * flushtime older than *older_than_this. 592 * 593 * If `bdi' is non-zero then we will scan the first inode against each 594 * superblock until we find the matching ones. One group will be the dirty 595 * inodes against a filesystem. Then when we hit the dummy blockdev superblock, 596 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not 597 * super-efficient but we're about to do a ton of I/O... 598 */ 599 void 600 writeback_inodes(struct writeback_control *wbc) 601 { 602 struct super_block *sb; 603 604 might_sleep(); 605 spin_lock(&sb_lock); 606 restart: 607 list_for_each_entry_reverse(sb, &super_blocks, s_list) { 608 if (sb_has_dirty_inodes(sb)) { 609 /* we're making our own get_super here */ 610 sb->s_count++; 611 spin_unlock(&sb_lock); 612 /* 613 * If we can't get the readlock, there's no sense in 614 * waiting around, most of the time the FS is going to 615 * be unmounted by the time it is released. 616 */ 617 if (down_read_trylock(&sb->s_umount)) { 618 if (sb->s_root) 619 sync_sb_inodes(sb, wbc); 620 up_read(&sb->s_umount); 621 } 622 spin_lock(&sb_lock); 623 if (__put_super_and_need_restart(sb)) 624 goto restart; 625 } 626 if (wbc->nr_to_write <= 0) 627 break; 628 } 629 spin_unlock(&sb_lock); 630 } 631 632 /* 633 * writeback and wait upon the filesystem's dirty inodes. The caller will 634 * do this in two passes - one to write, and one to wait. 635 * 636 * A finite limit is set on the number of pages which will be written. 637 * To prevent infinite livelock of sys_sync(). 638 * 639 * We add in the number of potentially dirty inodes, because each inode write 640 * can dirty pagecache in the underlying blockdev. 641 */ 642 void sync_inodes_sb(struct super_block *sb, int wait) 643 { 644 struct writeback_control wbc = { 645 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 646 .range_start = 0, 647 .range_end = LLONG_MAX, 648 }; 649 650 if (!wait) { 651 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 652 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 653 654 wbc.nr_to_write = nr_dirty + nr_unstable + 655 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 656 } else 657 wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */ 658 659 sync_sb_inodes(sb, &wbc); 660 } 661 662 /** 663 * sync_inodes - writes all inodes to disk 664 * @wait: wait for completion 665 * 666 * sync_inodes() goes through each super block's dirty inode list, writes the 667 * inodes out, waits on the writeout and puts the inodes back on the normal 668 * list. 669 * 670 * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle 671 * part of the sync functions is that the blockdev "superblock" is processed 672 * last. This is because the write_inode() function of a typical fs will 673 * perform no I/O, but will mark buffers in the blockdev mapping as dirty. 674 * What we want to do is to perform all that dirtying first, and then write 675 * back all those inode blocks via the blockdev mapping in one sweep. So the 676 * additional (somewhat redundant) sync_blockdev() calls here are to make 677 * sure that really happens. Because if we call sync_inodes_sb(wait=1) with 678 * outstanding dirty inodes, the writeback goes block-at-a-time within the 679 * filesystem's write_inode(). This is extremely slow. 680 */ 681 static void __sync_inodes(int wait) 682 { 683 struct super_block *sb; 684 685 spin_lock(&sb_lock); 686 restart: 687 list_for_each_entry(sb, &super_blocks, s_list) { 688 sb->s_count++; 689 spin_unlock(&sb_lock); 690 down_read(&sb->s_umount); 691 if (sb->s_root) { 692 sync_inodes_sb(sb, wait); 693 sync_blockdev(sb->s_bdev); 694 } 695 up_read(&sb->s_umount); 696 spin_lock(&sb_lock); 697 if (__put_super_and_need_restart(sb)) 698 goto restart; 699 } 700 spin_unlock(&sb_lock); 701 } 702 703 void sync_inodes(int wait) 704 { 705 __sync_inodes(0); 706 707 if (wait) 708 __sync_inodes(1); 709 } 710 711 /** 712 * write_inode_now - write an inode to disk 713 * @inode: inode to write to disk 714 * @sync: whether the write should be synchronous or not 715 * 716 * This function commits an inode to disk immediately if it is dirty. This is 717 * primarily needed by knfsd. 718 * 719 * The caller must either have a ref on the inode or must have set I_WILL_FREE. 720 */ 721 int write_inode_now(struct inode *inode, int sync) 722 { 723 int ret; 724 struct writeback_control wbc = { 725 .nr_to_write = LONG_MAX, 726 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 727 .range_start = 0, 728 .range_end = LLONG_MAX, 729 }; 730 731 if (!mapping_cap_writeback_dirty(inode->i_mapping)) 732 wbc.nr_to_write = 0; 733 734 might_sleep(); 735 spin_lock(&inode_lock); 736 ret = __writeback_single_inode(inode, &wbc); 737 spin_unlock(&inode_lock); 738 if (sync) 739 inode_sync_wait(inode); 740 return ret; 741 } 742 EXPORT_SYMBOL(write_inode_now); 743 744 /** 745 * sync_inode - write an inode and its pages to disk. 746 * @inode: the inode to sync 747 * @wbc: controls the writeback mode 748 * 749 * sync_inode() will write an inode and its pages to disk. It will also 750 * correctly update the inode on its superblock's dirty inode lists and will 751 * update inode->i_state. 752 * 753 * The caller must have a ref on the inode. 754 */ 755 int sync_inode(struct inode *inode, struct writeback_control *wbc) 756 { 757 int ret; 758 759 spin_lock(&inode_lock); 760 ret = __writeback_single_inode(inode, wbc); 761 spin_unlock(&inode_lock); 762 return ret; 763 } 764 EXPORT_SYMBOL(sync_inode); 765 766 /** 767 * generic_osync_inode - flush all dirty data for a given inode to disk 768 * @inode: inode to write 769 * @mapping: the address_space that should be flushed 770 * @what: what to write and wait upon 771 * 772 * This can be called by file_write functions for files which have the 773 * O_SYNC flag set, to flush dirty writes to disk. 774 * 775 * @what is a bitmask, specifying which part of the inode's data should be 776 * written and waited upon. 777 * 778 * OSYNC_DATA: i_mapping's dirty data 779 * OSYNC_METADATA: the buffers at i_mapping->private_list 780 * OSYNC_INODE: the inode itself 781 */ 782 783 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what) 784 { 785 int err = 0; 786 int need_write_inode_now = 0; 787 int err2; 788 789 if (what & OSYNC_DATA) 790 err = filemap_fdatawrite(mapping); 791 if (what & (OSYNC_METADATA|OSYNC_DATA)) { 792 err2 = sync_mapping_buffers(mapping); 793 if (!err) 794 err = err2; 795 } 796 if (what & OSYNC_DATA) { 797 err2 = filemap_fdatawait(mapping); 798 if (!err) 799 err = err2; 800 } 801 802 spin_lock(&inode_lock); 803 if ((inode->i_state & I_DIRTY) && 804 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC))) 805 need_write_inode_now = 1; 806 spin_unlock(&inode_lock); 807 808 if (need_write_inode_now) { 809 err2 = write_inode_now(inode, 1); 810 if (!err) 811 err = err2; 812 } 813 else 814 inode_sync_wait(inode); 815 816 return err; 817 } 818 EXPORT_SYMBOL(generic_osync_inode); 819