11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 191da177e4SLinus Torvalds #include <linux/sched.h> 201da177e4SLinus Torvalds #include <linux/fs.h> 211da177e4SLinus Torvalds #include <linux/mm.h> 2203ba3782SJens Axboe #include <linux/kthread.h> 2303ba3782SJens Axboe #include <linux/freezer.h> 241da177e4SLinus Torvalds #include <linux/writeback.h> 251da177e4SLinus Torvalds #include <linux/blkdev.h> 261da177e4SLinus Torvalds #include <linux/backing-dev.h> 271da177e4SLinus Torvalds #include <linux/buffer_head.h> 2807f3f05cSDavid Howells #include "internal.h" 291da177e4SLinus Torvalds 3066f3b8e2SJens Axboe #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 31f11b00f3SAdrian Bunk 3203ba3782SJens Axboe /* 33d0bceac7SJens Axboe * We don't actually have pdflush, but this one is exported though /proc... 34d0bceac7SJens Axboe */ 35d0bceac7SJens Axboe int nr_pdflush_threads; 36d0bceac7SJens Axboe 37d0bceac7SJens Axboe /* 3803ba3782SJens Axboe * Work items for the bdi_writeback threads 39f11b00f3SAdrian Bunk */ 4003ba3782SJens Axboe struct bdi_work { 4103ba3782SJens Axboe struct list_head list; 4203ba3782SJens Axboe struct list_head wait_list; 4303ba3782SJens Axboe struct rcu_head rcu_head; 4403ba3782SJens Axboe 4503ba3782SJens Axboe unsigned long seen; 4603ba3782SJens Axboe atomic_t pending; 4703ba3782SJens Axboe 4803ba3782SJens Axboe struct super_block *sb; 4903ba3782SJens Axboe unsigned long nr_pages; 5003ba3782SJens Axboe enum writeback_sync_modes sync_mode; 5103ba3782SJens Axboe 5203ba3782SJens Axboe unsigned long state; 5303ba3782SJens Axboe }; 5403ba3782SJens Axboe 5503ba3782SJens Axboe enum { 5603ba3782SJens Axboe WS_USED_B = 0, 5703ba3782SJens Axboe WS_ONSTACK_B, 5803ba3782SJens Axboe }; 5903ba3782SJens Axboe 6003ba3782SJens Axboe #define WS_USED (1 << WS_USED_B) 6103ba3782SJens Axboe #define WS_ONSTACK (1 << WS_ONSTACK_B) 6203ba3782SJens Axboe 6303ba3782SJens Axboe static inline bool bdi_work_on_stack(struct bdi_work *work) 64f11b00f3SAdrian Bunk { 6503ba3782SJens Axboe return test_bit(WS_ONSTACK_B, &work->state); 6603ba3782SJens Axboe } 6703ba3782SJens Axboe 6803ba3782SJens Axboe static inline void bdi_work_init(struct bdi_work *work, 6903ba3782SJens Axboe struct writeback_control *wbc) 7003ba3782SJens Axboe { 7103ba3782SJens Axboe INIT_RCU_HEAD(&work->rcu_head); 7203ba3782SJens Axboe work->sb = wbc->sb; 7303ba3782SJens Axboe work->nr_pages = wbc->nr_to_write; 7403ba3782SJens Axboe work->sync_mode = wbc->sync_mode; 7503ba3782SJens Axboe work->state = WS_USED; 7603ba3782SJens Axboe } 7703ba3782SJens Axboe 7803ba3782SJens Axboe static inline void bdi_work_init_on_stack(struct bdi_work *work, 7903ba3782SJens Axboe struct writeback_control *wbc) 8003ba3782SJens Axboe { 8103ba3782SJens Axboe bdi_work_init(work, wbc); 8203ba3782SJens Axboe work->state |= WS_ONSTACK; 83f11b00f3SAdrian Bunk } 84f11b00f3SAdrian Bunk 85f11b00f3SAdrian Bunk /** 86f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 87f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 88f11b00f3SAdrian Bunk * 8903ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 9003ba3782SJens Axboe * backing device. 91f11b00f3SAdrian Bunk */ 92f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 93f11b00f3SAdrian Bunk { 9403ba3782SJens Axboe return !list_empty(&bdi->work_list); 95f11b00f3SAdrian Bunk } 96f11b00f3SAdrian Bunk 9703ba3782SJens Axboe static void bdi_work_clear(struct bdi_work *work) 98f11b00f3SAdrian Bunk { 9903ba3782SJens Axboe clear_bit(WS_USED_B, &work->state); 10003ba3782SJens Axboe smp_mb__after_clear_bit(); 10103ba3782SJens Axboe wake_up_bit(&work->state, WS_USED_B); 102f11b00f3SAdrian Bunk } 103f11b00f3SAdrian Bunk 10403ba3782SJens Axboe static void bdi_work_free(struct rcu_head *head) 1054195f73dSNick Piggin { 10603ba3782SJens Axboe struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); 1074195f73dSNick Piggin 10803ba3782SJens Axboe if (!bdi_work_on_stack(work)) 10903ba3782SJens Axboe kfree(work); 11003ba3782SJens Axboe else 11103ba3782SJens Axboe bdi_work_clear(work); 1124195f73dSNick Piggin } 1134195f73dSNick Piggin 11403ba3782SJens Axboe static void wb_work_complete(struct bdi_work *work) 1151da177e4SLinus Torvalds { 11603ba3782SJens Axboe const enum writeback_sync_modes sync_mode = work->sync_mode; 1171da177e4SLinus Torvalds 1181da177e4SLinus Torvalds /* 11903ba3782SJens Axboe * For allocated work, we can clear the done/seen bit right here. 12003ba3782SJens Axboe * For on-stack work, we need to postpone both the clear and free 12103ba3782SJens Axboe * to after the RCU grace period, since the stack could be invalidated 12203ba3782SJens Axboe * as soon as bdi_work_clear() has done the wakeup. 1231da177e4SLinus Torvalds */ 12403ba3782SJens Axboe if (!bdi_work_on_stack(work)) 12503ba3782SJens Axboe bdi_work_clear(work); 12603ba3782SJens Axboe if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work)) 12703ba3782SJens Axboe call_rcu(&work->rcu_head, bdi_work_free); 1281da177e4SLinus Torvalds } 1291da177e4SLinus Torvalds 13003ba3782SJens Axboe static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) 13103ba3782SJens Axboe { 1321da177e4SLinus Torvalds /* 13303ba3782SJens Axboe * The caller has retrieved the work arguments from this work, 13403ba3782SJens Axboe * drop our reference. If this is the last ref, delete and free it 13503ba3782SJens Axboe */ 13603ba3782SJens Axboe if (atomic_dec_and_test(&work->pending)) { 13703ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 13803ba3782SJens Axboe 13903ba3782SJens Axboe spin_lock(&bdi->wb_lock); 14003ba3782SJens Axboe list_del_rcu(&work->list); 14103ba3782SJens Axboe spin_unlock(&bdi->wb_lock); 14203ba3782SJens Axboe 14303ba3782SJens Axboe wb_work_complete(work); 14403ba3782SJens Axboe } 14503ba3782SJens Axboe } 14603ba3782SJens Axboe 14703ba3782SJens Axboe static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) 14803ba3782SJens Axboe { 14903ba3782SJens Axboe if (work) { 15003ba3782SJens Axboe work->seen = bdi->wb_mask; 15103ba3782SJens Axboe BUG_ON(!work->seen); 15203ba3782SJens Axboe atomic_set(&work->pending, bdi->wb_cnt); 15303ba3782SJens Axboe BUG_ON(!bdi->wb_cnt); 15403ba3782SJens Axboe 15503ba3782SJens Axboe /* 15603ba3782SJens Axboe * Make sure stores are seen before it appears on the list 1571da177e4SLinus Torvalds */ 1581da177e4SLinus Torvalds smp_mb(); 1591da177e4SLinus Torvalds 16003ba3782SJens Axboe spin_lock(&bdi->wb_lock); 16103ba3782SJens Axboe list_add_tail_rcu(&work->list, &bdi->work_list); 16203ba3782SJens Axboe spin_unlock(&bdi->wb_lock); 16303ba3782SJens Axboe } 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds /* 16603ba3782SJens Axboe * If the default thread isn't there, make sure we add it. When 16703ba3782SJens Axboe * it gets created and wakes up, we'll run this work. 1681da177e4SLinus Torvalds */ 16903ba3782SJens Axboe if (unlikely(list_empty_careful(&bdi->wb_list))) 17003ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 17103ba3782SJens Axboe else { 17203ba3782SJens Axboe struct bdi_writeback *wb = &bdi->wb; 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds /* 17503ba3782SJens Axboe * If we failed allocating the bdi work item, wake up the wb 17603ba3782SJens Axboe * thread always. As a safety precaution, it'll flush out 17703ba3782SJens Axboe * everything 1781da177e4SLinus Torvalds */ 17903ba3782SJens Axboe if (!wb_has_dirty_io(wb)) { 18003ba3782SJens Axboe if (work) 18103ba3782SJens Axboe wb_clear_pending(wb, work); 18203ba3782SJens Axboe } else if (wb->task) 18303ba3782SJens Axboe wake_up_process(wb->task); 1841da177e4SLinus Torvalds } 18503ba3782SJens Axboe } 1861da177e4SLinus Torvalds 1871da177e4SLinus Torvalds /* 18803ba3782SJens Axboe * Used for on-stack allocated work items. The caller needs to wait until 18903ba3782SJens Axboe * the wb threads have acked the work before it's safe to continue. 1901da177e4SLinus Torvalds */ 19103ba3782SJens Axboe static void bdi_wait_on_work_clear(struct bdi_work *work) 1921da177e4SLinus Torvalds { 19303ba3782SJens Axboe wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait, 19403ba3782SJens Axboe TASK_UNINTERRUPTIBLE); 19503ba3782SJens Axboe } 19603ba3782SJens Axboe 19703ba3782SJens Axboe static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc) 19803ba3782SJens Axboe { 19903ba3782SJens Axboe struct bdi_work *work; 20003ba3782SJens Axboe 20103ba3782SJens Axboe work = kmalloc(sizeof(*work), GFP_ATOMIC); 20203ba3782SJens Axboe if (work) 20303ba3782SJens Axboe bdi_work_init(work, wbc); 20403ba3782SJens Axboe 20503ba3782SJens Axboe return work; 20603ba3782SJens Axboe } 20703ba3782SJens Axboe 20803ba3782SJens Axboe void bdi_start_writeback(struct writeback_control *wbc) 20903ba3782SJens Axboe { 21003ba3782SJens Axboe const bool must_wait = wbc->sync_mode == WB_SYNC_ALL; 21103ba3782SJens Axboe struct bdi_work work_stack, *work = NULL; 21203ba3782SJens Axboe 21303ba3782SJens Axboe if (!must_wait) 21403ba3782SJens Axboe work = bdi_alloc_work(wbc); 21503ba3782SJens Axboe 21603ba3782SJens Axboe if (!work) { 21703ba3782SJens Axboe work = &work_stack; 21803ba3782SJens Axboe bdi_work_init_on_stack(work, wbc); 21903ba3782SJens Axboe } 22003ba3782SJens Axboe 22103ba3782SJens Axboe bdi_queue_work(wbc->bdi, work); 22203ba3782SJens Axboe 22303ba3782SJens Axboe /* 22403ba3782SJens Axboe * If the sync mode is WB_SYNC_ALL, block waiting for the work to 22503ba3782SJens Axboe * complete. If not, we only need to wait for the work to be started, 22603ba3782SJens Axboe * if we allocated it on-stack. We use the same mechanism, if the 22703ba3782SJens Axboe * wait bit is set in the bdi_work struct, then threads will not 22803ba3782SJens Axboe * clear pending until after they are done. 22903ba3782SJens Axboe * 23003ba3782SJens Axboe * Note that work == &work_stack if must_wait is true, so we don't 23103ba3782SJens Axboe * need to do call_rcu() here ever, since the completion path will 23203ba3782SJens Axboe * have done that for us. 23303ba3782SJens Axboe */ 23403ba3782SJens Axboe if (must_wait || work == &work_stack) { 23503ba3782SJens Axboe bdi_wait_on_work_clear(work); 23603ba3782SJens Axboe if (work != &work_stack) 23703ba3782SJens Axboe call_rcu(&work->rcu_head, bdi_work_free); 23803ba3782SJens Axboe } 2391da177e4SLinus Torvalds } 2401da177e4SLinus Torvalds 2411da177e4SLinus Torvalds /* 2426610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 2436610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 2446610a0bcSAndrew Morton * 2456610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 24666f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 2476610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 2486610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 2496610a0bcSAndrew Morton */ 2506610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 2516610a0bcSAndrew Morton { 25203ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 2536610a0bcSAndrew Morton 25403ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 25566f3b8e2SJens Axboe struct inode *tail; 2566610a0bcSAndrew Morton 25703ba3782SJens Axboe tail = list_entry(wb->b_dirty.next, struct inode, i_list); 25866f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2596610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2606610a0bcSAndrew Morton } 26103ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 2626610a0bcSAndrew Morton } 2636610a0bcSAndrew Morton 2646610a0bcSAndrew Morton /* 26566f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 266c986d1e2SAndrew Morton */ 2670e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 268c986d1e2SAndrew Morton { 26903ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 27003ba3782SJens Axboe 27103ba3782SJens Axboe list_move(&inode->i_list, &wb->b_more_io); 272c986d1e2SAndrew Morton } 273c986d1e2SAndrew Morton 2741c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2751c0eeaf5SJoern Engel { 2761c0eeaf5SJoern Engel /* 2771c0eeaf5SJoern Engel * Prevent speculative execution through spin_unlock(&inode_lock); 2781c0eeaf5SJoern Engel */ 2791c0eeaf5SJoern Engel smp_mb(); 2801c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2811c0eeaf5SJoern Engel } 2821c0eeaf5SJoern Engel 283d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 284d2caa3c5SJeff Layton { 285d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 286d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 287d2caa3c5SJeff Layton /* 288d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 289d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 290d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 291d2caa3c5SJeff Layton * from permanently stopping the whole pdflush writeback. 292d2caa3c5SJeff Layton */ 293d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 294d2caa3c5SJeff Layton #endif 295d2caa3c5SJeff Layton return ret; 296d2caa3c5SJeff Layton } 297d2caa3c5SJeff Layton 298c986d1e2SAndrew Morton /* 2992c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 3002c136579SFengguang Wu */ 3012c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 3022c136579SFengguang Wu struct list_head *dispatch_queue, 3032c136579SFengguang Wu unsigned long *older_than_this) 3042c136579SFengguang Wu { 3052c136579SFengguang Wu while (!list_empty(delaying_queue)) { 3062c136579SFengguang Wu struct inode *inode = list_entry(delaying_queue->prev, 3072c136579SFengguang Wu struct inode, i_list); 3082c136579SFengguang Wu if (older_than_this && 309d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 3102c136579SFengguang Wu break; 3112c136579SFengguang Wu list_move(&inode->i_list, dispatch_queue); 3122c136579SFengguang Wu } 3132c136579SFengguang Wu } 3142c136579SFengguang Wu 3152c136579SFengguang Wu /* 3162c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 3172c136579SFengguang Wu */ 31803ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 3192c136579SFengguang Wu { 32003ba3782SJens Axboe list_splice_init(&wb->b_more_io, wb->b_io.prev); 32103ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 32266f3b8e2SJens Axboe } 32366f3b8e2SJens Axboe 32403ba3782SJens Axboe static int write_inode(struct inode *inode, int sync) 32566f3b8e2SJens Axboe { 32603ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 32703ba3782SJens Axboe return inode->i_sb->s_op->write_inode(inode, sync); 32803ba3782SJens Axboe return 0; 32966f3b8e2SJens Axboe } 33008d8e974SFengguang Wu 3312c136579SFengguang Wu /* 33201c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 33301c03194SChristoph Hellwig */ 33401c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 33501c03194SChristoph Hellwig { 33601c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 33701c03194SChristoph Hellwig wait_queue_head_t *wqh; 33801c03194SChristoph Hellwig 33901c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 34001c03194SChristoph Hellwig do { 34101c03194SChristoph Hellwig spin_unlock(&inode_lock); 34201c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 34301c03194SChristoph Hellwig spin_lock(&inode_lock); 34401c03194SChristoph Hellwig } while (inode->i_state & I_SYNC); 34501c03194SChristoph Hellwig } 34601c03194SChristoph Hellwig 34701c03194SChristoph Hellwig /* 34801c03194SChristoph Hellwig * Write out an inode's dirty pages. Called under inode_lock. Either the 34901c03194SChristoph Hellwig * caller has ref on the inode (either via __iget or via syscall against an fd) 35001c03194SChristoph Hellwig * or the inode has I_WILL_FREE set (via generic_forget_inode) 35101c03194SChristoph Hellwig * 3521da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 3531da177e4SLinus Torvalds * 3541da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 3551da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 3561da177e4SLinus Torvalds * livelocks, etc. 3571da177e4SLinus Torvalds * 3581da177e4SLinus Torvalds * Called under inode_lock. 3591da177e4SLinus Torvalds */ 3601da177e4SLinus Torvalds static int 36101c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 3621da177e4SLinus Torvalds { 3631da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 3641da177e4SLinus Torvalds int wait = wbc->sync_mode == WB_SYNC_ALL; 36501c03194SChristoph Hellwig unsigned dirty; 3661da177e4SLinus Torvalds int ret; 3671da177e4SLinus Torvalds 36801c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 36901c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 37001c03194SChristoph Hellwig else 37101c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 37201c03194SChristoph Hellwig 37301c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 37401c03194SChristoph Hellwig /* 37501c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 37666f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 37701c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 37801c03194SChristoph Hellwig * 37901c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 38066f3b8e2SJens Axboe * completed a full scan of b_io. 38101c03194SChristoph Hellwig */ 38201c03194SChristoph Hellwig if (!wait) { 38301c03194SChristoph Hellwig requeue_io(inode); 38401c03194SChristoph Hellwig return 0; 38501c03194SChristoph Hellwig } 38601c03194SChristoph Hellwig 38701c03194SChristoph Hellwig /* 38801c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 38901c03194SChristoph Hellwig */ 39001c03194SChristoph Hellwig inode_wait_for_writeback(inode); 39101c03194SChristoph Hellwig } 39201c03194SChristoph Hellwig 3931c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 3941da177e4SLinus Torvalds 3951c0eeaf5SJoern Engel /* Set I_SYNC, reset I_DIRTY */ 3961da177e4SLinus Torvalds dirty = inode->i_state & I_DIRTY; 3971c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 3981da177e4SLinus Torvalds inode->i_state &= ~I_DIRTY; 3991da177e4SLinus Torvalds 4001da177e4SLinus Torvalds spin_unlock(&inode_lock); 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 4031da177e4SLinus Torvalds 4041da177e4SLinus Torvalds /* Don't write the inode if only I_DIRTY_PAGES was set */ 4051da177e4SLinus Torvalds if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 4061da177e4SLinus Torvalds int err = write_inode(inode, wait); 4071da177e4SLinus Torvalds if (ret == 0) 4081da177e4SLinus Torvalds ret = err; 4091da177e4SLinus Torvalds } 4101da177e4SLinus Torvalds 4111da177e4SLinus Torvalds if (wait) { 4121da177e4SLinus Torvalds int err = filemap_fdatawait(mapping); 4131da177e4SLinus Torvalds if (ret == 0) 4141da177e4SLinus Torvalds ret = err; 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 4171da177e4SLinus Torvalds spin_lock(&inode_lock); 4181c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 41984a89245SWu Fengguang if (!(inode->i_state & (I_FREEING | I_CLEAR))) { 4201da177e4SLinus Torvalds if (!(inode->i_state & I_DIRTY) && 4211da177e4SLinus Torvalds mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4221da177e4SLinus Torvalds /* 4231da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 4241da177e4SLinus Torvalds * sometimes bales out without doing anything. Redirty 42566f3b8e2SJens Axboe * the inode; Move it from b_io onto b_more_io/b_dirty. 4261b43ef91SAndrew Morton */ 4271b43ef91SAndrew Morton /* 4281b43ef91SAndrew Morton * akpm: if the caller was the kupdate function we put 42966f3b8e2SJens Axboe * this inode at the head of b_dirty so it gets first 4301b43ef91SAndrew Morton * consideration. Otherwise, move it to the tail, for 4311b43ef91SAndrew Morton * the reasons described there. I'm not really sure 4321b43ef91SAndrew Morton * how much sense this makes. Presumably I had a good 4331b43ef91SAndrew Morton * reasons for doing it this way, and I'd rather not 4341b43ef91SAndrew Morton * muck with it at present. 4351da177e4SLinus Torvalds */ 4361da177e4SLinus Torvalds if (wbc->for_kupdate) { 4371da177e4SLinus Torvalds /* 4382c136579SFengguang Wu * For the kupdate function we move the inode 43966f3b8e2SJens Axboe * to b_more_io so it will get more writeout as 4402c136579SFengguang Wu * soon as the queue becomes uncongested. 4411da177e4SLinus Torvalds */ 4421da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4438bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 4448bc3be27SFengguang Wu /* 4458bc3be27SFengguang Wu * slice used up: queue for next turn 4468bc3be27SFengguang Wu */ 4470e0f4fc2SKen Chen requeue_io(inode); 4481da177e4SLinus Torvalds } else { 4491da177e4SLinus Torvalds /* 4508bc3be27SFengguang Wu * somehow blocked: retry later 4518bc3be27SFengguang Wu */ 4528bc3be27SFengguang Wu redirty_tail(inode); 4538bc3be27SFengguang Wu } 4548bc3be27SFengguang Wu } else { 4558bc3be27SFengguang Wu /* 4561da177e4SLinus Torvalds * Otherwise fully redirty the inode so that 4571da177e4SLinus Torvalds * other inodes on this superblock will get some 4581da177e4SLinus Torvalds * writeout. Otherwise heavy writing to one 4591da177e4SLinus Torvalds * file would indefinitely suspend writeout of 4601da177e4SLinus Torvalds * all the other files. 4611da177e4SLinus Torvalds */ 4621da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4631b43ef91SAndrew Morton redirty_tail(inode); 4641da177e4SLinus Torvalds } 4651da177e4SLinus Torvalds } else if (inode->i_state & I_DIRTY) { 4661da177e4SLinus Torvalds /* 4671da177e4SLinus Torvalds * Someone redirtied the inode while were writing back 4681da177e4SLinus Torvalds * the pages. 4691da177e4SLinus Torvalds */ 4706610a0bcSAndrew Morton redirty_tail(inode); 4711da177e4SLinus Torvalds } else if (atomic_read(&inode->i_count)) { 4721da177e4SLinus Torvalds /* 4731da177e4SLinus Torvalds * The inode is clean, inuse 4741da177e4SLinus Torvalds */ 4751da177e4SLinus Torvalds list_move(&inode->i_list, &inode_in_use); 4761da177e4SLinus Torvalds } else { 4771da177e4SLinus Torvalds /* 4781da177e4SLinus Torvalds * The inode is clean, unused 4791da177e4SLinus Torvalds */ 4801da177e4SLinus Torvalds list_move(&inode->i_list, &inode_unused); 4811da177e4SLinus Torvalds } 4821da177e4SLinus Torvalds } 4831c0eeaf5SJoern Engel inode_sync_complete(inode); 4841da177e4SLinus Torvalds return ret; 4851da177e4SLinus Torvalds } 4861da177e4SLinus Torvalds 48703ba3782SJens Axboe /* 48803ba3782SJens Axboe * For WB_SYNC_NONE writeback, the caller does not have the sb pinned 48903ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 49003ba3782SJens Axboe * go away while we are writing inodes from it. 49103ba3782SJens Axboe * 49203ba3782SJens Axboe * Returns 0 if the super was successfully pinned (or pinning wasn't needed), 49303ba3782SJens Axboe * 1 if we failed. 49403ba3782SJens Axboe */ 49503ba3782SJens Axboe static int pin_sb_for_writeback(struct writeback_control *wbc, 49603ba3782SJens Axboe struct inode *inode) 4971da177e4SLinus Torvalds { 49803ba3782SJens Axboe struct super_block *sb = inode->i_sb; 49903ba3782SJens Axboe 50003ba3782SJens Axboe /* 50103ba3782SJens Axboe * Caller must already hold the ref for this 50203ba3782SJens Axboe */ 50303ba3782SJens Axboe if (wbc->sync_mode == WB_SYNC_ALL) { 50403ba3782SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 50503ba3782SJens Axboe return 0; 50603ba3782SJens Axboe } 50703ba3782SJens Axboe 50803ba3782SJens Axboe spin_lock(&sb_lock); 50903ba3782SJens Axboe sb->s_count++; 51003ba3782SJens Axboe if (down_read_trylock(&sb->s_umount)) { 51103ba3782SJens Axboe if (sb->s_root) { 51203ba3782SJens Axboe spin_unlock(&sb_lock); 51303ba3782SJens Axboe return 0; 51403ba3782SJens Axboe } 51503ba3782SJens Axboe /* 51603ba3782SJens Axboe * umounted, drop rwsem again and fall through to failure 51703ba3782SJens Axboe */ 51803ba3782SJens Axboe up_read(&sb->s_umount); 51903ba3782SJens Axboe } 52003ba3782SJens Axboe 52103ba3782SJens Axboe sb->s_count--; 52203ba3782SJens Axboe spin_unlock(&sb_lock); 52303ba3782SJens Axboe return 1; 52403ba3782SJens Axboe } 52503ba3782SJens Axboe 52603ba3782SJens Axboe static void unpin_sb_for_writeback(struct writeback_control *wbc, 52703ba3782SJens Axboe struct inode *inode) 52803ba3782SJens Axboe { 52903ba3782SJens Axboe struct super_block *sb = inode->i_sb; 53003ba3782SJens Axboe 53103ba3782SJens Axboe if (wbc->sync_mode == WB_SYNC_ALL) 53203ba3782SJens Axboe return; 53303ba3782SJens Axboe 53403ba3782SJens Axboe up_read(&sb->s_umount); 53503ba3782SJens Axboe put_super(sb); 53603ba3782SJens Axboe } 53703ba3782SJens Axboe 53803ba3782SJens Axboe static void writeback_inodes_wb(struct bdi_writeback *wb, 53903ba3782SJens Axboe struct writeback_control *wbc) 54003ba3782SJens Axboe { 54103ba3782SJens Axboe struct super_block *sb = wbc->sb; 54266f3b8e2SJens Axboe const int is_blkdev_sb = sb_is_blkdev_sb(sb); 5431da177e4SLinus Torvalds const unsigned long start = jiffies; /* livelock avoidance */ 5441da177e4SLinus Torvalds 545ae8547b0SHans Reiser spin_lock(&inode_lock); 5461da177e4SLinus Torvalds 54703ba3782SJens Axboe if (!wbc->for_kupdate || list_empty(&wb->b_io)) 54803ba3782SJens Axboe queue_io(wb, wbc->older_than_this); 54966f3b8e2SJens Axboe 55003ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 55103ba3782SJens Axboe struct inode *inode = list_entry(wb->b_io.prev, 5521da177e4SLinus Torvalds struct inode, i_list); 5531da177e4SLinus Torvalds long pages_skipped; 5541da177e4SLinus Torvalds 55566f3b8e2SJens Axboe /* 55666f3b8e2SJens Axboe * super block given and doesn't match, skip this inode 55766f3b8e2SJens Axboe */ 55866f3b8e2SJens Axboe if (sb && sb != inode->i_sb) { 55966f3b8e2SJens Axboe redirty_tail(inode); 56066f3b8e2SJens Axboe continue; 56166f3b8e2SJens Axboe } 56266f3b8e2SJens Axboe 56303ba3782SJens Axboe if (!bdi_cap_writeback_dirty(wb->bdi)) { 5649852a0e7SAndrew Morton redirty_tail(inode); 56566f3b8e2SJens Axboe if (is_blkdev_sb) { 5661da177e4SLinus Torvalds /* 5671da177e4SLinus Torvalds * Dirty memory-backed blockdev: the ramdisk 5681da177e4SLinus Torvalds * driver does this. Skip just this inode 5691da177e4SLinus Torvalds */ 5701da177e4SLinus Torvalds continue; 5711da177e4SLinus Torvalds } 5721da177e4SLinus Torvalds /* 5731da177e4SLinus Torvalds * Dirty memory-backed inode against a filesystem other 5741da177e4SLinus Torvalds * than the kernel-internal bdev filesystem. Skip the 5751da177e4SLinus Torvalds * entire superblock. 5761da177e4SLinus Torvalds */ 5771da177e4SLinus Torvalds break; 5781da177e4SLinus Torvalds } 5791da177e4SLinus Torvalds 58084a89245SWu Fengguang if (inode->i_state & (I_NEW | I_WILL_FREE)) { 5817ef0d737SNick Piggin requeue_io(inode); 5827ef0d737SNick Piggin continue; 5837ef0d737SNick Piggin } 5847ef0d737SNick Piggin 58503ba3782SJens Axboe if (wbc->nonblocking && bdi_write_congested(wb->bdi)) { 5861da177e4SLinus Torvalds wbc->encountered_congestion = 1; 58766f3b8e2SJens Axboe if (!is_blkdev_sb) 5881da177e4SLinus Torvalds break; /* Skip a congested fs */ 5890e0f4fc2SKen Chen requeue_io(inode); 5901da177e4SLinus Torvalds continue; /* Skip a congested blockdev */ 5911da177e4SLinus Torvalds } 5921da177e4SLinus Torvalds 593d2caa3c5SJeff Layton /* 594d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 595d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 596d2caa3c5SJeff Layton */ 597d2caa3c5SJeff Layton if (inode_dirtied_after(inode, start)) 5981da177e4SLinus Torvalds break; 5991da177e4SLinus Torvalds 60003ba3782SJens Axboe if (pin_sb_for_writeback(wbc, inode)) { 60103ba3782SJens Axboe requeue_io(inode); 60203ba3782SJens Axboe continue; 60303ba3782SJens Axboe } 6041da177e4SLinus Torvalds 60584a89245SWu Fengguang BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); 6061da177e4SLinus Torvalds __iget(inode); 6071da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 60801c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 60903ba3782SJens Axboe unpin_sb_for_writeback(wbc, inode); 6101da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 6111da177e4SLinus Torvalds /* 6121da177e4SLinus Torvalds * writeback is not making progress due to locked 6131da177e4SLinus Torvalds * buffers. Skip this inode for now. 6141da177e4SLinus Torvalds */ 615f57b9b7bSAndrew Morton redirty_tail(inode); 6161da177e4SLinus Torvalds } 6171da177e4SLinus Torvalds spin_unlock(&inode_lock); 6181da177e4SLinus Torvalds iput(inode); 6194ffc8444SOGAWA Hirofumi cond_resched(); 6201da177e4SLinus Torvalds spin_lock(&inode_lock); 6218bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 6228bc3be27SFengguang Wu wbc->more_io = 1; 6231da177e4SLinus Torvalds break; 6241da177e4SLinus Torvalds } 62503ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 6268bc3be27SFengguang Wu wbc->more_io = 1; 6278bc3be27SFengguang Wu } 62838f21977SNick Piggin 62966f3b8e2SJens Axboe spin_unlock(&inode_lock); 63066f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 63166f3b8e2SJens Axboe } 63266f3b8e2SJens Axboe 63303ba3782SJens Axboe void writeback_inodes_wbc(struct writeback_control *wbc) 63403ba3782SJens Axboe { 63503ba3782SJens Axboe struct backing_dev_info *bdi = wbc->bdi; 63603ba3782SJens Axboe 63703ba3782SJens Axboe writeback_inodes_wb(&bdi->wb, wbc); 63803ba3782SJens Axboe } 63903ba3782SJens Axboe 64003ba3782SJens Axboe /* 64103ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 64203ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 64303ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 64403ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 64503ba3782SJens Axboe * the dirty each time it has written this many pages. 64603ba3782SJens Axboe */ 64703ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 64803ba3782SJens Axboe 64903ba3782SJens Axboe static inline bool over_bground_thresh(void) 65003ba3782SJens Axboe { 65103ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 65203ba3782SJens Axboe 65303ba3782SJens Axboe get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); 65403ba3782SJens Axboe 65503ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 65603ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) >= background_thresh); 65703ba3782SJens Axboe } 65803ba3782SJens Axboe 65903ba3782SJens Axboe /* 66003ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 66103ba3782SJens Axboe * 66203ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 66303ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 66403ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 66503ba3782SJens Axboe * older than a specific point in time. 66603ba3782SJens Axboe * 66703ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 66803ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 66903ba3782SJens Axboe * one-second gap. 67003ba3782SJens Axboe * 67103ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 67203ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 67303ba3782SJens Axboe */ 67403ba3782SJens Axboe static long wb_writeback(struct bdi_writeback *wb, long nr_pages, 67503ba3782SJens Axboe struct super_block *sb, 67603ba3782SJens Axboe enum writeback_sync_modes sync_mode, int for_kupdate) 67703ba3782SJens Axboe { 67803ba3782SJens Axboe struct writeback_control wbc = { 67903ba3782SJens Axboe .bdi = wb->bdi, 68003ba3782SJens Axboe .sb = sb, 68103ba3782SJens Axboe .sync_mode = sync_mode, 68203ba3782SJens Axboe .older_than_this = NULL, 68303ba3782SJens Axboe .for_kupdate = for_kupdate, 68403ba3782SJens Axboe .range_cyclic = 1, 68503ba3782SJens Axboe }; 68603ba3782SJens Axboe unsigned long oldest_jif; 68703ba3782SJens Axboe long wrote = 0; 68803ba3782SJens Axboe 68903ba3782SJens Axboe if (wbc.for_kupdate) { 69003ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 69103ba3782SJens Axboe oldest_jif = jiffies - 69203ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 69303ba3782SJens Axboe } 69403ba3782SJens Axboe 69503ba3782SJens Axboe for (;;) { 69603ba3782SJens Axboe /* 69703ba3782SJens Axboe * Don't flush anything for non-integrity writeback where 69803ba3782SJens Axboe * no nr_pages was given 69903ba3782SJens Axboe */ 70003ba3782SJens Axboe if (!for_kupdate && nr_pages <= 0 && sync_mode == WB_SYNC_NONE) 70103ba3782SJens Axboe break; 70203ba3782SJens Axboe 70303ba3782SJens Axboe /* 70403ba3782SJens Axboe * If no specific pages were given and this is just a 70503ba3782SJens Axboe * periodic background writeout and we are below the 70603ba3782SJens Axboe * background dirty threshold, don't do anything 70703ba3782SJens Axboe */ 70803ba3782SJens Axboe if (for_kupdate && nr_pages <= 0 && !over_bground_thresh()) 70903ba3782SJens Axboe break; 71003ba3782SJens Axboe 71103ba3782SJens Axboe wbc.more_io = 0; 71203ba3782SJens Axboe wbc.encountered_congestion = 0; 71303ba3782SJens Axboe wbc.nr_to_write = MAX_WRITEBACK_PAGES; 71403ba3782SJens Axboe wbc.pages_skipped = 0; 71503ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 71603ba3782SJens Axboe nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 71703ba3782SJens Axboe wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 71803ba3782SJens Axboe 71903ba3782SJens Axboe /* 72003ba3782SJens Axboe * If we ran out of stuff to write, bail unless more_io got set 72103ba3782SJens Axboe */ 72203ba3782SJens Axboe if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { 72303ba3782SJens Axboe if (wbc.more_io && !wbc.for_kupdate) 72403ba3782SJens Axboe continue; 72503ba3782SJens Axboe break; 72603ba3782SJens Axboe } 72703ba3782SJens Axboe } 72803ba3782SJens Axboe 72903ba3782SJens Axboe return wrote; 73003ba3782SJens Axboe } 73103ba3782SJens Axboe 73203ba3782SJens Axboe /* 73303ba3782SJens Axboe * Return the next bdi_work struct that hasn't been processed by this 73403ba3782SJens Axboe * wb thread yet 73503ba3782SJens Axboe */ 73603ba3782SJens Axboe static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, 73703ba3782SJens Axboe struct bdi_writeback *wb) 73803ba3782SJens Axboe { 73903ba3782SJens Axboe struct bdi_work *work, *ret = NULL; 74003ba3782SJens Axboe 74103ba3782SJens Axboe rcu_read_lock(); 74203ba3782SJens Axboe 74303ba3782SJens Axboe list_for_each_entry_rcu(work, &bdi->work_list, list) { 74403ba3782SJens Axboe if (!test_and_clear_bit(wb->nr, &work->seen)) 74503ba3782SJens Axboe continue; 74603ba3782SJens Axboe 74703ba3782SJens Axboe ret = work; 74803ba3782SJens Axboe break; 74903ba3782SJens Axboe } 75003ba3782SJens Axboe 75103ba3782SJens Axboe rcu_read_unlock(); 75203ba3782SJens Axboe return ret; 75303ba3782SJens Axboe } 75403ba3782SJens Axboe 75503ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 75603ba3782SJens Axboe { 75703ba3782SJens Axboe unsigned long expired; 75803ba3782SJens Axboe long nr_pages; 75903ba3782SJens Axboe 76003ba3782SJens Axboe expired = wb->last_old_flush + 76103ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 76203ba3782SJens Axboe if (time_before(jiffies, expired)) 76303ba3782SJens Axboe return 0; 76403ba3782SJens Axboe 76503ba3782SJens Axboe wb->last_old_flush = jiffies; 76603ba3782SJens Axboe nr_pages = global_page_state(NR_FILE_DIRTY) + 76703ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) + 76803ba3782SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 76903ba3782SJens Axboe 77003ba3782SJens Axboe if (nr_pages) 77103ba3782SJens Axboe return wb_writeback(wb, nr_pages, NULL, WB_SYNC_NONE, 1); 77203ba3782SJens Axboe 77303ba3782SJens Axboe return 0; 77403ba3782SJens Axboe } 77503ba3782SJens Axboe 77603ba3782SJens Axboe /* 77703ba3782SJens Axboe * Retrieve work items and do the writeback they describe 77803ba3782SJens Axboe */ 77903ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 78003ba3782SJens Axboe { 78103ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 78203ba3782SJens Axboe struct bdi_work *work; 78303ba3782SJens Axboe long nr_pages, wrote = 0; 78403ba3782SJens Axboe 78503ba3782SJens Axboe while ((work = get_next_work_item(bdi, wb)) != NULL) { 78603ba3782SJens Axboe enum writeback_sync_modes sync_mode; 78703ba3782SJens Axboe 78803ba3782SJens Axboe nr_pages = work->nr_pages; 78903ba3782SJens Axboe 79003ba3782SJens Axboe /* 79103ba3782SJens Axboe * Override sync mode, in case we must wait for completion 79203ba3782SJens Axboe */ 79303ba3782SJens Axboe if (force_wait) 79403ba3782SJens Axboe work->sync_mode = sync_mode = WB_SYNC_ALL; 79503ba3782SJens Axboe else 79603ba3782SJens Axboe sync_mode = work->sync_mode; 79703ba3782SJens Axboe 79803ba3782SJens Axboe /* 79903ba3782SJens Axboe * If this isn't a data integrity operation, just notify 80003ba3782SJens Axboe * that we have seen this work and we are now starting it. 80103ba3782SJens Axboe */ 80203ba3782SJens Axboe if (sync_mode == WB_SYNC_NONE) 80303ba3782SJens Axboe wb_clear_pending(wb, work); 80403ba3782SJens Axboe 80503ba3782SJens Axboe wrote += wb_writeback(wb, nr_pages, work->sb, sync_mode, 0); 80603ba3782SJens Axboe 80703ba3782SJens Axboe /* 80803ba3782SJens Axboe * This is a data integrity writeback, so only do the 80903ba3782SJens Axboe * notification when we have completed the work. 81003ba3782SJens Axboe */ 81103ba3782SJens Axboe if (sync_mode == WB_SYNC_ALL) 81203ba3782SJens Axboe wb_clear_pending(wb, work); 81303ba3782SJens Axboe } 81403ba3782SJens Axboe 81503ba3782SJens Axboe /* 81603ba3782SJens Axboe * Check for periodic writeback, kupdated() style 81703ba3782SJens Axboe */ 81803ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 81903ba3782SJens Axboe 82003ba3782SJens Axboe return wrote; 82103ba3782SJens Axboe } 82203ba3782SJens Axboe 82303ba3782SJens Axboe /* 82403ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 82503ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 82603ba3782SJens Axboe */ 82703ba3782SJens Axboe int bdi_writeback_task(struct bdi_writeback *wb) 82803ba3782SJens Axboe { 82903ba3782SJens Axboe unsigned long last_active = jiffies; 83003ba3782SJens Axboe unsigned long wait_jiffies = -1UL; 83103ba3782SJens Axboe long pages_written; 83203ba3782SJens Axboe 83303ba3782SJens Axboe while (!kthread_should_stop()) { 83403ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 83503ba3782SJens Axboe 83603ba3782SJens Axboe if (pages_written) 83703ba3782SJens Axboe last_active = jiffies; 83803ba3782SJens Axboe else if (wait_jiffies != -1UL) { 83903ba3782SJens Axboe unsigned long max_idle; 84003ba3782SJens Axboe 84103ba3782SJens Axboe /* 84203ba3782SJens Axboe * Longest period of inactivity that we tolerate. If we 84303ba3782SJens Axboe * see dirty data again later, the task will get 84403ba3782SJens Axboe * recreated automatically. 84503ba3782SJens Axboe */ 84603ba3782SJens Axboe max_idle = max(5UL * 60 * HZ, wait_jiffies); 84703ba3782SJens Axboe if (time_after(jiffies, max_idle + last_active)) 84803ba3782SJens Axboe break; 84903ba3782SJens Axboe } 85003ba3782SJens Axboe 85103ba3782SJens Axboe wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); 85203ba3782SJens Axboe set_current_state(TASK_INTERRUPTIBLE); 85303ba3782SJens Axboe schedule_timeout(wait_jiffies); 85403ba3782SJens Axboe try_to_freeze(); 85503ba3782SJens Axboe } 85603ba3782SJens Axboe 85703ba3782SJens Axboe return 0; 85803ba3782SJens Axboe } 85903ba3782SJens Axboe 86003ba3782SJens Axboe /* 86103ba3782SJens Axboe * Schedule writeback for all backing devices. Expensive! If this is a data 86203ba3782SJens Axboe * integrity operation, writeback will be complete when this returns. If 86303ba3782SJens Axboe * we are simply called for WB_SYNC_NONE, then writeback will merely be 86403ba3782SJens Axboe * scheduled to run. 86503ba3782SJens Axboe */ 86603ba3782SJens Axboe static void bdi_writeback_all(struct writeback_control *wbc) 86703ba3782SJens Axboe { 86803ba3782SJens Axboe const bool must_wait = wbc->sync_mode == WB_SYNC_ALL; 86903ba3782SJens Axboe struct backing_dev_info *bdi; 87003ba3782SJens Axboe struct bdi_work *work; 87103ba3782SJens Axboe LIST_HEAD(list); 87203ba3782SJens Axboe 87303ba3782SJens Axboe restart: 87403ba3782SJens Axboe spin_lock(&bdi_lock); 87503ba3782SJens Axboe 87603ba3782SJens Axboe list_for_each_entry(bdi, &bdi_list, bdi_list) { 87703ba3782SJens Axboe struct bdi_work *work; 87803ba3782SJens Axboe 87903ba3782SJens Axboe if (!bdi_has_dirty_io(bdi)) 88003ba3782SJens Axboe continue; 88103ba3782SJens Axboe 88203ba3782SJens Axboe /* 88303ba3782SJens Axboe * If work allocation fails, do the writes inline. We drop 88403ba3782SJens Axboe * the lock and restart the list writeout. This should be OK, 88503ba3782SJens Axboe * since this happens rarely and because the writeout should 88603ba3782SJens Axboe * eventually make more free memory available. 88703ba3782SJens Axboe */ 88803ba3782SJens Axboe work = bdi_alloc_work(wbc); 88903ba3782SJens Axboe if (!work) { 89003ba3782SJens Axboe struct writeback_control __wbc; 89103ba3782SJens Axboe 89203ba3782SJens Axboe /* 89303ba3782SJens Axboe * Not a data integrity writeout, just continue 89403ba3782SJens Axboe */ 89503ba3782SJens Axboe if (!must_wait) 89603ba3782SJens Axboe continue; 89703ba3782SJens Axboe 89803ba3782SJens Axboe spin_unlock(&bdi_lock); 89903ba3782SJens Axboe __wbc = *wbc; 90003ba3782SJens Axboe __wbc.bdi = bdi; 90103ba3782SJens Axboe writeback_inodes_wbc(&__wbc); 90203ba3782SJens Axboe goto restart; 90303ba3782SJens Axboe } 90403ba3782SJens Axboe if (must_wait) 90503ba3782SJens Axboe list_add_tail(&work->wait_list, &list); 90603ba3782SJens Axboe 90703ba3782SJens Axboe bdi_queue_work(bdi, work); 90803ba3782SJens Axboe } 90903ba3782SJens Axboe 91003ba3782SJens Axboe spin_unlock(&bdi_lock); 91103ba3782SJens Axboe 91203ba3782SJens Axboe /* 91303ba3782SJens Axboe * If this is for WB_SYNC_ALL, wait for pending work to complete 91403ba3782SJens Axboe * before returning. 91503ba3782SJens Axboe */ 91603ba3782SJens Axboe while (!list_empty(&list)) { 91703ba3782SJens Axboe work = list_entry(list.next, struct bdi_work, wait_list); 91803ba3782SJens Axboe list_del(&work->wait_list); 91903ba3782SJens Axboe bdi_wait_on_work_clear(work); 92003ba3782SJens Axboe call_rcu(&work->rcu_head, bdi_work_free); 92103ba3782SJens Axboe } 92203ba3782SJens Axboe } 92303ba3782SJens Axboe 92403ba3782SJens Axboe /* 92503ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 92603ba3782SJens Axboe * the whole world. 92703ba3782SJens Axboe */ 92803ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 92903ba3782SJens Axboe { 93003ba3782SJens Axboe struct writeback_control wbc = { 93103ba3782SJens Axboe .sync_mode = WB_SYNC_NONE, 93203ba3782SJens Axboe .older_than_this = NULL, 93303ba3782SJens Axboe .range_cyclic = 1, 93403ba3782SJens Axboe }; 93503ba3782SJens Axboe 93603ba3782SJens Axboe if (nr_pages == 0) 93703ba3782SJens Axboe nr_pages = global_page_state(NR_FILE_DIRTY) + 93803ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 93903ba3782SJens Axboe wbc.nr_to_write = nr_pages; 94003ba3782SJens Axboe bdi_writeback_all(&wbc); 94103ba3782SJens Axboe } 94203ba3782SJens Axboe 94303ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 94403ba3782SJens Axboe { 94503ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 94603ba3782SJens Axboe struct dentry *dentry; 94703ba3782SJens Axboe const char *name = "?"; 94803ba3782SJens Axboe 94903ba3782SJens Axboe dentry = d_find_alias(inode); 95003ba3782SJens Axboe if (dentry) { 95103ba3782SJens Axboe spin_lock(&dentry->d_lock); 95203ba3782SJens Axboe name = (const char *) dentry->d_name.name; 95303ba3782SJens Axboe } 95403ba3782SJens Axboe printk(KERN_DEBUG 95503ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 95603ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 95703ba3782SJens Axboe name, inode->i_sb->s_id); 95803ba3782SJens Axboe if (dentry) { 95903ba3782SJens Axboe spin_unlock(&dentry->d_lock); 96003ba3782SJens Axboe dput(dentry); 96103ba3782SJens Axboe } 96203ba3782SJens Axboe } 96303ba3782SJens Axboe } 96403ba3782SJens Axboe 96503ba3782SJens Axboe /** 96603ba3782SJens Axboe * __mark_inode_dirty - internal function 96703ba3782SJens Axboe * @inode: inode to mark 96803ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 96903ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 97003ba3782SJens Axboe * mark_inode_dirty_sync. 97103ba3782SJens Axboe * 97203ba3782SJens Axboe * Put the inode on the super block's dirty list. 97303ba3782SJens Axboe * 97403ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 97503ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 97603ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 97703ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 97803ba3782SJens Axboe * 97903ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 98003ba3782SJens Axboe * them dirty. 98103ba3782SJens Axboe * 98203ba3782SJens Axboe * This function *must* be atomic for the I_DIRTY_PAGES case - 98303ba3782SJens Axboe * set_page_dirty() is called under spinlock in several places. 98403ba3782SJens Axboe * 98503ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 98603ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 98703ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 98803ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 98903ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 99003ba3782SJens Axboe * blockdev inode. 99103ba3782SJens Axboe */ 99203ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 99303ba3782SJens Axboe { 99403ba3782SJens Axboe struct super_block *sb = inode->i_sb; 99503ba3782SJens Axboe 99603ba3782SJens Axboe /* 99703ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 99803ba3782SJens Axboe * dirty the inode itself 99903ba3782SJens Axboe */ 100003ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 100103ba3782SJens Axboe if (sb->s_op->dirty_inode) 100203ba3782SJens Axboe sb->s_op->dirty_inode(inode); 100303ba3782SJens Axboe } 100403ba3782SJens Axboe 100503ba3782SJens Axboe /* 100603ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 100703ba3782SJens Axboe * -- mikulas 100803ba3782SJens Axboe */ 100903ba3782SJens Axboe smp_mb(); 101003ba3782SJens Axboe 101103ba3782SJens Axboe /* avoid the locking if we can */ 101203ba3782SJens Axboe if ((inode->i_state & flags) == flags) 101303ba3782SJens Axboe return; 101403ba3782SJens Axboe 101503ba3782SJens Axboe if (unlikely(block_dump)) 101603ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 101703ba3782SJens Axboe 101803ba3782SJens Axboe spin_lock(&inode_lock); 101903ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 102003ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 102103ba3782SJens Axboe 102203ba3782SJens Axboe inode->i_state |= flags; 102303ba3782SJens Axboe 102403ba3782SJens Axboe /* 102503ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 102603ba3782SJens Axboe * The unlocker will place the inode on the appropriate 102703ba3782SJens Axboe * superblock list, based upon its state. 102803ba3782SJens Axboe */ 102903ba3782SJens Axboe if (inode->i_state & I_SYNC) 103003ba3782SJens Axboe goto out; 103103ba3782SJens Axboe 103203ba3782SJens Axboe /* 103303ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 103403ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 103503ba3782SJens Axboe */ 103603ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 103703ba3782SJens Axboe if (hlist_unhashed(&inode->i_hash)) 103803ba3782SJens Axboe goto out; 103903ba3782SJens Axboe } 104003ba3782SJens Axboe if (inode->i_state & (I_FREEING|I_CLEAR)) 104103ba3782SJens Axboe goto out; 104203ba3782SJens Axboe 104303ba3782SJens Axboe /* 104403ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 104503ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 104603ba3782SJens Axboe */ 104703ba3782SJens Axboe if (!was_dirty) { 104803ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1049500b067cSJens Axboe struct backing_dev_info *bdi = wb->bdi; 1050500b067cSJens Axboe 1051500b067cSJens Axboe if (bdi_cap_writeback_dirty(bdi) && 1052500b067cSJens Axboe !test_bit(BDI_registered, &bdi->state)) { 1053500b067cSJens Axboe WARN_ON(1); 1054500b067cSJens Axboe printk(KERN_ERR "bdi-%s not registered\n", 1055500b067cSJens Axboe bdi->name); 1056500b067cSJens Axboe } 105703ba3782SJens Axboe 105803ba3782SJens Axboe inode->dirtied_when = jiffies; 105903ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 106003ba3782SJens Axboe } 106103ba3782SJens Axboe } 106203ba3782SJens Axboe out: 106303ba3782SJens Axboe spin_unlock(&inode_lock); 106403ba3782SJens Axboe } 106503ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 106603ba3782SJens Axboe 106766f3b8e2SJens Axboe /* 106866f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 106966f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 107066f3b8e2SJens Axboe * 107166f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 107266f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 107366f3b8e2SJens Axboe * 107466f3b8e2SJens Axboe * If we're a pdlfush thread, then implement pdflush collision avoidance 107566f3b8e2SJens Axboe * against the entire list. 107666f3b8e2SJens Axboe * 107766f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 107866f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 107966f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 108066f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 108166f3b8e2SJens Axboe * 108266f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 108366f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 108466f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 108566f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 108666f3b8e2SJens Axboe */ 108703ba3782SJens Axboe static void wait_sb_inodes(struct writeback_control *wbc) 108866f3b8e2SJens Axboe { 108938f21977SNick Piggin struct inode *inode, *old_inode = NULL; 109038f21977SNick Piggin 109103ba3782SJens Axboe /* 109203ba3782SJens Axboe * We need to be protected against the filesystem going from 109303ba3782SJens Axboe * r/o to r/w or vice versa. 109403ba3782SJens Axboe */ 109503ba3782SJens Axboe WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount)); 109603ba3782SJens Axboe 109766f3b8e2SJens Axboe spin_lock(&inode_lock); 109866f3b8e2SJens Axboe 109938f21977SNick Piggin /* 110038f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 110138f21977SNick Piggin * because there may have been pages dirtied before our sync 110238f21977SNick Piggin * call, but which had writeout started before we write it out. 110338f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 110438f21977SNick Piggin * we still have to wait for that writeout. 110538f21977SNick Piggin */ 110603ba3782SJens Axboe list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) { 110738f21977SNick Piggin struct address_space *mapping; 110838f21977SNick Piggin 110903ba3782SJens Axboe if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 111038f21977SNick Piggin continue; 111138f21977SNick Piggin mapping = inode->i_mapping; 111238f21977SNick Piggin if (mapping->nrpages == 0) 111338f21977SNick Piggin continue; 111438f21977SNick Piggin __iget(inode); 1115ae8547b0SHans Reiser spin_unlock(&inode_lock); 111638f21977SNick Piggin /* 111738f21977SNick Piggin * We hold a reference to 'inode' so it couldn't have 111838f21977SNick Piggin * been removed from s_inodes list while we dropped the 111938f21977SNick Piggin * inode_lock. We cannot iput the inode now as we can 112038f21977SNick Piggin * be holding the last reference and we cannot iput it 112138f21977SNick Piggin * under inode_lock. So we keep the reference and iput 112238f21977SNick Piggin * it later. 112338f21977SNick Piggin */ 112438f21977SNick Piggin iput(old_inode); 112538f21977SNick Piggin old_inode = inode; 112638f21977SNick Piggin 112738f21977SNick Piggin filemap_fdatawait(mapping); 112838f21977SNick Piggin 112938f21977SNick Piggin cond_resched(); 113038f21977SNick Piggin 113138f21977SNick Piggin spin_lock(&inode_lock); 113238f21977SNick Piggin } 113338f21977SNick Piggin spin_unlock(&inode_lock); 113438f21977SNick Piggin iput(old_inode); 113566f3b8e2SJens Axboe } 11361da177e4SLinus Torvalds 1137d8a8559cSJens Axboe /** 1138d8a8559cSJens Axboe * writeback_inodes_sb - writeback dirty inodes from given super_block 1139d8a8559cSJens Axboe * @sb: the superblock 11401da177e4SLinus Torvalds * 1141d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1142d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 1143d8a8559cSJens Axboe * for IO completion of submitted IO. The number of pages submitted is 1144d8a8559cSJens Axboe * returned. 11451da177e4SLinus Torvalds */ 1146d8a8559cSJens Axboe long writeback_inodes_sb(struct super_block *sb) 11471da177e4SLinus Torvalds { 11481da177e4SLinus Torvalds struct writeback_control wbc = { 114903ba3782SJens Axboe .sb = sb, 1150d8a8559cSJens Axboe .sync_mode = WB_SYNC_NONE, 1151111ebb6eSOGAWA Hirofumi .range_start = 0, 1152111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 11531da177e4SLinus Torvalds }; 1154b1e7a8fdSChristoph Lameter unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 1155fd39fc85SChristoph Lameter unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 1156d8a8559cSJens Axboe long nr_to_write; 11571da177e4SLinus Torvalds 1158d8a8559cSJens Axboe nr_to_write = nr_dirty + nr_unstable + 115938f21977SNick Piggin (inodes_stat.nr_inodes - inodes_stat.nr_unused); 116038f21977SNick Piggin 1161d8a8559cSJens Axboe wbc.nr_to_write = nr_to_write; 116203ba3782SJens Axboe bdi_writeback_all(&wbc); 1163d8a8559cSJens Axboe return nr_to_write - wbc.nr_to_write; 11641da177e4SLinus Torvalds } 1165d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1166d8a8559cSJens Axboe 1167d8a8559cSJens Axboe /** 1168d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1169d8a8559cSJens Axboe * @sb: the superblock 1170d8a8559cSJens Axboe * 1171d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1172d8a8559cSJens Axboe * super_block. The number of pages synced is returned. 1173d8a8559cSJens Axboe */ 1174d8a8559cSJens Axboe long sync_inodes_sb(struct super_block *sb) 1175d8a8559cSJens Axboe { 1176d8a8559cSJens Axboe struct writeback_control wbc = { 117703ba3782SJens Axboe .sb = sb, 1178d8a8559cSJens Axboe .sync_mode = WB_SYNC_ALL, 1179d8a8559cSJens Axboe .range_start = 0, 1180d8a8559cSJens Axboe .range_end = LLONG_MAX, 1181d8a8559cSJens Axboe }; 1182d8a8559cSJens Axboe long nr_to_write = LONG_MAX; /* doesn't actually matter */ 1183d8a8559cSJens Axboe 1184d8a8559cSJens Axboe wbc.nr_to_write = nr_to_write; 118503ba3782SJens Axboe bdi_writeback_all(&wbc); 118603ba3782SJens Axboe wait_sb_inodes(&wbc); 1187d8a8559cSJens Axboe return nr_to_write - wbc.nr_to_write; 1188d8a8559cSJens Axboe } 1189d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 11901da177e4SLinus Torvalds 11911da177e4SLinus Torvalds /** 11921da177e4SLinus Torvalds * write_inode_now - write an inode to disk 11931da177e4SLinus Torvalds * @inode: inode to write to disk 11941da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 11951da177e4SLinus Torvalds * 11967f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 11977f04c26dSAndrea Arcangeli * primarily needed by knfsd. 11987f04c26dSAndrea Arcangeli * 11997f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 12001da177e4SLinus Torvalds */ 12011da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 12021da177e4SLinus Torvalds { 12031da177e4SLinus Torvalds int ret; 12041da177e4SLinus Torvalds struct writeback_control wbc = { 12051da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 120618914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1207111ebb6eSOGAWA Hirofumi .range_start = 0, 1208111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 12091da177e4SLinus Torvalds }; 12101da177e4SLinus Torvalds 12111da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 121249364ce2SAndrew Morton wbc.nr_to_write = 0; 12131da177e4SLinus Torvalds 12141da177e4SLinus Torvalds might_sleep(); 12151da177e4SLinus Torvalds spin_lock(&inode_lock); 121601c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 12171da177e4SLinus Torvalds spin_unlock(&inode_lock); 12181da177e4SLinus Torvalds if (sync) 12191c0eeaf5SJoern Engel inode_sync_wait(inode); 12201da177e4SLinus Torvalds return ret; 12211da177e4SLinus Torvalds } 12221da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 12231da177e4SLinus Torvalds 12241da177e4SLinus Torvalds /** 12251da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 12261da177e4SLinus Torvalds * @inode: the inode to sync 12271da177e4SLinus Torvalds * @wbc: controls the writeback mode 12281da177e4SLinus Torvalds * 12291da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 12301da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 12311da177e4SLinus Torvalds * update inode->i_state. 12321da177e4SLinus Torvalds * 12331da177e4SLinus Torvalds * The caller must have a ref on the inode. 12341da177e4SLinus Torvalds */ 12351da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 12361da177e4SLinus Torvalds { 12371da177e4SLinus Torvalds int ret; 12381da177e4SLinus Torvalds 12391da177e4SLinus Torvalds spin_lock(&inode_lock); 124001c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 12411da177e4SLinus Torvalds spin_unlock(&inode_lock); 12421da177e4SLinus Torvalds return ret; 12431da177e4SLinus Torvalds } 12441da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 12451da177e4SLinus Torvalds 12461da177e4SLinus Torvalds /** 12471da177e4SLinus Torvalds * generic_osync_inode - flush all dirty data for a given inode to disk 12481da177e4SLinus Torvalds * @inode: inode to write 124967be2dd1SMartin Waitz * @mapping: the address_space that should be flushed 12501da177e4SLinus Torvalds * @what: what to write and wait upon 12511da177e4SLinus Torvalds * 12521da177e4SLinus Torvalds * This can be called by file_write functions for files which have the 12531da177e4SLinus Torvalds * O_SYNC flag set, to flush dirty writes to disk. 12541da177e4SLinus Torvalds * 12551da177e4SLinus Torvalds * @what is a bitmask, specifying which part of the inode's data should be 1256b8887e6eSRandy Dunlap * written and waited upon. 12571da177e4SLinus Torvalds * 12581da177e4SLinus Torvalds * OSYNC_DATA: i_mapping's dirty data 12591da177e4SLinus Torvalds * OSYNC_METADATA: the buffers at i_mapping->private_list 12601da177e4SLinus Torvalds * OSYNC_INODE: the inode itself 12611da177e4SLinus Torvalds */ 12621da177e4SLinus Torvalds 12631da177e4SLinus Torvalds int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what) 12641da177e4SLinus Torvalds { 12651da177e4SLinus Torvalds int err = 0; 12661da177e4SLinus Torvalds int need_write_inode_now = 0; 12671da177e4SLinus Torvalds int err2; 12681da177e4SLinus Torvalds 12691da177e4SLinus Torvalds if (what & OSYNC_DATA) 12701da177e4SLinus Torvalds err = filemap_fdatawrite(mapping); 12711da177e4SLinus Torvalds if (what & (OSYNC_METADATA|OSYNC_DATA)) { 12721da177e4SLinus Torvalds err2 = sync_mapping_buffers(mapping); 12731da177e4SLinus Torvalds if (!err) 12741da177e4SLinus Torvalds err = err2; 12751da177e4SLinus Torvalds } 12761da177e4SLinus Torvalds if (what & OSYNC_DATA) { 12771da177e4SLinus Torvalds err2 = filemap_fdatawait(mapping); 12781da177e4SLinus Torvalds if (!err) 12791da177e4SLinus Torvalds err = err2; 12801da177e4SLinus Torvalds } 12811da177e4SLinus Torvalds 12821da177e4SLinus Torvalds spin_lock(&inode_lock); 12831da177e4SLinus Torvalds if ((inode->i_state & I_DIRTY) && 12841da177e4SLinus Torvalds ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC))) 12851da177e4SLinus Torvalds need_write_inode_now = 1; 12861da177e4SLinus Torvalds spin_unlock(&inode_lock); 12871da177e4SLinus Torvalds 12881da177e4SLinus Torvalds if (need_write_inode_now) { 12891da177e4SLinus Torvalds err2 = write_inode_now(inode, 1); 12901da177e4SLinus Torvalds if (!err) 12911da177e4SLinus Torvalds err = err2; 12921da177e4SLinus Torvalds } 12931da177e4SLinus Torvalds else 12941c0eeaf5SJoern Engel inode_sync_wait(inode); 12951da177e4SLinus Torvalds 12961da177e4SLinus Torvalds return err; 12971da177e4SLinus Torvalds } 12981da177e4SLinus Torvalds EXPORT_SYMBOL(generic_osync_inode); 1299