11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 2907f3f05cSDavid Howells #include "internal.h" 301da177e4SLinus Torvalds 3166f3b8e2SJens Axboe #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 32f11b00f3SAdrian Bunk 3303ba3782SJens Axboe /* 34d0bceac7SJens Axboe * We don't actually have pdflush, but this one is exported though /proc... 35d0bceac7SJens Axboe */ 36d0bceac7SJens Axboe int nr_pdflush_threads; 37d0bceac7SJens Axboe 38d0bceac7SJens Axboe /* 39c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 40c4a77a6cSJens Axboe */ 41c4a77a6cSJens Axboe struct wb_writeback_args { 42c4a77a6cSJens Axboe long nr_pages; 43c4a77a6cSJens Axboe struct super_block *sb; 44c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 4552957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4652957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4752957fe1SH Hartley Sweeten unsigned int for_background:1; 48c4a77a6cSJens Axboe }; 49c4a77a6cSJens Axboe 50c4a77a6cSJens Axboe /* 5103ba3782SJens Axboe * Work items for the bdi_writeback threads 52f11b00f3SAdrian Bunk */ 5303ba3782SJens Axboe struct bdi_work { 548010c3b6SJens Axboe struct list_head list; /* pending work list */ 558010c3b6SJens Axboe struct rcu_head rcu_head; /* for RCU free/clear of work */ 5603ba3782SJens Axboe 578010c3b6SJens Axboe unsigned long seen; /* threads that have seen this work */ 588010c3b6SJens Axboe atomic_t pending; /* number of threads still to do work */ 5903ba3782SJens Axboe 608010c3b6SJens Axboe struct wb_writeback_args args; /* writeback arguments */ 6103ba3782SJens Axboe 628010c3b6SJens Axboe unsigned long state; /* flag bits, see WS_* */ 6303ba3782SJens Axboe }; 6403ba3782SJens Axboe 6503ba3782SJens Axboe enum { 667f0e7bedSChristoph Hellwig WS_INPROGRESS = 0, 677f0e7bedSChristoph Hellwig WS_ONSTACK, 6803ba3782SJens Axboe }; 6903ba3782SJens Axboe 7003ba3782SJens Axboe static inline void bdi_work_init(struct bdi_work *work, 71b6e51316SJens Axboe struct wb_writeback_args *args) 7203ba3782SJens Axboe { 7303ba3782SJens Axboe INIT_RCU_HEAD(&work->rcu_head); 74b6e51316SJens Axboe work->args = *args; 757f0e7bedSChristoph Hellwig __set_bit(WS_INPROGRESS, &work->state); 7603ba3782SJens Axboe } 7703ba3782SJens Axboe 78f11b00f3SAdrian Bunk /** 79f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 80f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 81f11b00f3SAdrian Bunk * 8203ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 8303ba3782SJens Axboe * backing device. 84f11b00f3SAdrian Bunk */ 85f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 86f11b00f3SAdrian Bunk { 8703ba3782SJens Axboe return !list_empty(&bdi->work_list); 88f11b00f3SAdrian Bunk } 89f11b00f3SAdrian Bunk 9003ba3782SJens Axboe static void bdi_work_free(struct rcu_head *head) 914195f73dSNick Piggin { 9203ba3782SJens Axboe struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); 934195f73dSNick Piggin 947f0e7bedSChristoph Hellwig clear_bit(WS_INPROGRESS, &work->state); 957f0e7bedSChristoph Hellwig smp_mb__after_clear_bit(); 967f0e7bedSChristoph Hellwig wake_up_bit(&work->state, WS_INPROGRESS); 977f0e7bedSChristoph Hellwig 987f0e7bedSChristoph Hellwig if (!test_bit(WS_ONSTACK, &work->state)) 9903ba3782SJens Axboe kfree(work); 1001da177e4SLinus Torvalds } 1011da177e4SLinus Torvalds 10203ba3782SJens Axboe static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) 10303ba3782SJens Axboe { 1041da177e4SLinus Torvalds /* 10503ba3782SJens Axboe * The caller has retrieved the work arguments from this work, 10603ba3782SJens Axboe * drop our reference. If this is the last ref, delete and free it 10703ba3782SJens Axboe */ 10803ba3782SJens Axboe if (atomic_dec_and_test(&work->pending)) { 10903ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 11003ba3782SJens Axboe 11103ba3782SJens Axboe spin_lock(&bdi->wb_lock); 11203ba3782SJens Axboe list_del_rcu(&work->list); 11303ba3782SJens Axboe spin_unlock(&bdi->wb_lock); 11403ba3782SJens Axboe 1157f0e7bedSChristoph Hellwig call_rcu(&work->rcu_head, bdi_work_free); 11603ba3782SJens Axboe } 11703ba3782SJens Axboe } 11803ba3782SJens Axboe 11903ba3782SJens Axboe static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) 12003ba3782SJens Axboe { 12103ba3782SJens Axboe work->seen = bdi->wb_mask; 12203ba3782SJens Axboe BUG_ON(!work->seen); 12303ba3782SJens Axboe atomic_set(&work->pending, bdi->wb_cnt); 12403ba3782SJens Axboe BUG_ON(!bdi->wb_cnt); 12503ba3782SJens Axboe 12603ba3782SJens Axboe /* 127deed62edSNick Piggin * list_add_tail_rcu() contains the necessary barriers to 128deed62edSNick Piggin * make sure the above stores are seen before the item is 129deed62edSNick Piggin * noticed on the list 1301da177e4SLinus Torvalds */ 13103ba3782SJens Axboe spin_lock(&bdi->wb_lock); 13203ba3782SJens Axboe list_add_tail_rcu(&work->list, &bdi->work_list); 13303ba3782SJens Axboe spin_unlock(&bdi->wb_lock); 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds /* 13603ba3782SJens Axboe * If the default thread isn't there, make sure we add it. When 13703ba3782SJens Axboe * it gets created and wakes up, we'll run this work. 1381da177e4SLinus Torvalds */ 13903ba3782SJens Axboe if (unlikely(list_empty_careful(&bdi->wb_list))) 14003ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 14103ba3782SJens Axboe else { 14203ba3782SJens Axboe struct bdi_writeback *wb = &bdi->wb; 1431da177e4SLinus Torvalds 1441ef7d9aaSNick Piggin if (wb->task) 14503ba3782SJens Axboe wake_up_process(wb->task); 1461da177e4SLinus Torvalds } 14703ba3782SJens Axboe } 1481da177e4SLinus Torvalds 1491da177e4SLinus Torvalds /* 15003ba3782SJens Axboe * Used for on-stack allocated work items. The caller needs to wait until 15103ba3782SJens Axboe * the wb threads have acked the work before it's safe to continue. 1521da177e4SLinus Torvalds */ 1537f0e7bedSChristoph Hellwig static void bdi_wait_on_work_done(struct bdi_work *work) 1541da177e4SLinus Torvalds { 1557f0e7bedSChristoph Hellwig wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait, 15603ba3782SJens Axboe TASK_UNINTERRUPTIBLE); 15703ba3782SJens Axboe } 15803ba3782SJens Axboe 159f11fcae8SJens Axboe static void bdi_alloc_queue_work(struct backing_dev_info *bdi, 160f17625b3SJens Axboe struct wb_writeback_args *args) 16103ba3782SJens Axboe { 16203ba3782SJens Axboe struct bdi_work *work; 16303ba3782SJens Axboe 164bcddc3f0SJens Axboe /* 165bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 166bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 167bcddc3f0SJens Axboe */ 16803ba3782SJens Axboe work = kmalloc(sizeof(*work), GFP_ATOMIC); 169bcddc3f0SJens Axboe if (work) { 170b6e51316SJens Axboe bdi_work_init(work, args); 171f11fcae8SJens Axboe bdi_queue_work(bdi, work); 172bcddc3f0SJens Axboe } else { 173bcddc3f0SJens Axboe struct bdi_writeback *wb = &bdi->wb; 174bcddc3f0SJens Axboe 175bcddc3f0SJens Axboe if (wb->task) 176bcddc3f0SJens Axboe wake_up_process(wb->task); 177bcddc3f0SJens Axboe } 17803ba3782SJens Axboe } 17903ba3782SJens Axboe 180b6e51316SJens Axboe /** 1813c4d7165SChristoph Hellwig * bdi_queue_work_onstack - start and wait for writeback 18206d738faSRandy Dunlap * @args: parameters to control the work queue writeback 183b6e51316SJens Axboe * 184b6e51316SJens Axboe * Description: 1853c4d7165SChristoph Hellwig * This function initiates writeback and waits for the operation to 1863c4d7165SChristoph Hellwig * complete. Callers must hold the sb s_umount semaphore for 187b6e51316SJens Axboe * reading, to avoid having the super disappear before we are done. 18803ba3782SJens Axboe */ 1893c4d7165SChristoph Hellwig static void bdi_queue_work_onstack(struct wb_writeback_args *args) 190b6e51316SJens Axboe { 191f0fad8a5SChristoph Hellwig struct bdi_work work; 192f0fad8a5SChristoph Hellwig 1933c4d7165SChristoph Hellwig bdi_work_init(&work, args); 1947f0e7bedSChristoph Hellwig __set_bit(WS_ONSTACK, &work.state); 195f0fad8a5SChristoph Hellwig 1963c4d7165SChristoph Hellwig bdi_queue_work(args->sb->s_bdi, &work); 1977f0e7bedSChristoph Hellwig bdi_wait_on_work_done(&work); 19803ba3782SJens Axboe } 199b6e51316SJens Axboe 200b6e51316SJens Axboe /** 201b6e51316SJens Axboe * bdi_start_writeback - start writeback 202b6e51316SJens Axboe * @bdi: the backing device to write from 203b6e51316SJens Axboe * @nr_pages: the number of pages to write 204b6e51316SJens Axboe * 205b6e51316SJens Axboe * Description: 206b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 207b6e51316SJens Axboe * started when this function returns, we make no guarentees on 2080e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 209b6e51316SJens Axboe * 210b6e51316SJens Axboe */ 211c5444198SChristoph Hellwig void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 212b6e51316SJens Axboe { 213b6e51316SJens Axboe struct wb_writeback_args args = { 214b6e51316SJens Axboe .sync_mode = WB_SYNC_NONE, 215b6e51316SJens Axboe .nr_pages = nr_pages, 216b6e51316SJens Axboe .range_cyclic = 1, 217b6e51316SJens Axboe }; 218b6e51316SJens Axboe 219c5444198SChristoph Hellwig bdi_alloc_queue_work(bdi, &args); 220d3ddec76SWu Fengguang } 221d3ddec76SWu Fengguang 222c5444198SChristoph Hellwig /** 223c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 224c5444198SChristoph Hellwig * @bdi: the backing device to write from 225c5444198SChristoph Hellwig * 226c5444198SChristoph Hellwig * Description: 227c5444198SChristoph Hellwig * This does WB_SYNC_NONE background writeback. The IO is only 228c5444198SChristoph Hellwig * started when this function returns, we make no guarentees on 229c5444198SChristoph Hellwig * completion. Caller need not hold sb s_umount semaphore. 230c5444198SChristoph Hellwig */ 231c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 232c5444198SChristoph Hellwig { 233c5444198SChristoph Hellwig struct wb_writeback_args args = { 234c5444198SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 235c5444198SChristoph Hellwig .nr_pages = LONG_MAX, 236c5444198SChristoph Hellwig .for_background = 1, 237c5444198SChristoph Hellwig .range_cyclic = 1, 238c5444198SChristoph Hellwig }; 239f17625b3SJens Axboe bdi_alloc_queue_work(bdi, &args); 2401da177e4SLinus Torvalds } 2411da177e4SLinus Torvalds 2421da177e4SLinus Torvalds /* 2436610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 2446610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 2456610a0bcSAndrew Morton * 2466610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 24766f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 2486610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 2496610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 2506610a0bcSAndrew Morton */ 2516610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 2526610a0bcSAndrew Morton { 25303ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 2546610a0bcSAndrew Morton 25503ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 25666f3b8e2SJens Axboe struct inode *tail; 2576610a0bcSAndrew Morton 25803ba3782SJens Axboe tail = list_entry(wb->b_dirty.next, struct inode, i_list); 25966f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2606610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2616610a0bcSAndrew Morton } 26203ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 2636610a0bcSAndrew Morton } 2646610a0bcSAndrew Morton 2656610a0bcSAndrew Morton /* 26666f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 267c986d1e2SAndrew Morton */ 2680e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 269c986d1e2SAndrew Morton { 27003ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 27103ba3782SJens Axboe 27203ba3782SJens Axboe list_move(&inode->i_list, &wb->b_more_io); 273c986d1e2SAndrew Morton } 274c986d1e2SAndrew Morton 2751c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2761c0eeaf5SJoern Engel { 2771c0eeaf5SJoern Engel /* 2781c0eeaf5SJoern Engel * Prevent speculative execution through spin_unlock(&inode_lock); 2791c0eeaf5SJoern Engel */ 2801c0eeaf5SJoern Engel smp_mb(); 2811c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2821c0eeaf5SJoern Engel } 2831c0eeaf5SJoern Engel 284d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 285d2caa3c5SJeff Layton { 286d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 287d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 288d2caa3c5SJeff Layton /* 289d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 290d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 291d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2925b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 293d2caa3c5SJeff Layton */ 294d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 295d2caa3c5SJeff Layton #endif 296d2caa3c5SJeff Layton return ret; 297d2caa3c5SJeff Layton } 298d2caa3c5SJeff Layton 299c986d1e2SAndrew Morton /* 3002c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 3012c136579SFengguang Wu */ 3022c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 3032c136579SFengguang Wu struct list_head *dispatch_queue, 3042c136579SFengguang Wu unsigned long *older_than_this) 3052c136579SFengguang Wu { 3065c03449dSShaohua Li LIST_HEAD(tmp); 3075c03449dSShaohua Li struct list_head *pos, *node; 308cf137307SJens Axboe struct super_block *sb = NULL; 3095c03449dSShaohua Li struct inode *inode; 310cf137307SJens Axboe int do_sb_sort = 0; 3115c03449dSShaohua Li 3122c136579SFengguang Wu while (!list_empty(delaying_queue)) { 3135c03449dSShaohua Li inode = list_entry(delaying_queue->prev, struct inode, i_list); 3142c136579SFengguang Wu if (older_than_this && 315d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 3162c136579SFengguang Wu break; 317cf137307SJens Axboe if (sb && sb != inode->i_sb) 318cf137307SJens Axboe do_sb_sort = 1; 319cf137307SJens Axboe sb = inode->i_sb; 3205c03449dSShaohua Li list_move(&inode->i_list, &tmp); 3215c03449dSShaohua Li } 3225c03449dSShaohua Li 323cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 324cf137307SJens Axboe if (!do_sb_sort) { 325cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 326cf137307SJens Axboe return; 327cf137307SJens Axboe } 328cf137307SJens Axboe 3295c03449dSShaohua Li /* Move inodes from one superblock together */ 3305c03449dSShaohua Li while (!list_empty(&tmp)) { 3315c03449dSShaohua Li inode = list_entry(tmp.prev, struct inode, i_list); 3325c03449dSShaohua Li sb = inode->i_sb; 3335c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 3345c03449dSShaohua Li inode = list_entry(pos, struct inode, i_list); 3355c03449dSShaohua Li if (inode->i_sb == sb) 3362c136579SFengguang Wu list_move(&inode->i_list, dispatch_queue); 3372c136579SFengguang Wu } 3382c136579SFengguang Wu } 3395c03449dSShaohua Li } 3402c136579SFengguang Wu 3412c136579SFengguang Wu /* 3422c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 3432c136579SFengguang Wu */ 34403ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 3452c136579SFengguang Wu { 34603ba3782SJens Axboe list_splice_init(&wb->b_more_io, wb->b_io.prev); 34703ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 34866f3b8e2SJens Axboe } 34966f3b8e2SJens Axboe 350a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 35166f3b8e2SJens Axboe { 35203ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 353a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 35403ba3782SJens Axboe return 0; 35566f3b8e2SJens Axboe } 35608d8e974SFengguang Wu 3572c136579SFengguang Wu /* 35801c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 35901c03194SChristoph Hellwig */ 36001c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 36101c03194SChristoph Hellwig { 36201c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 36301c03194SChristoph Hellwig wait_queue_head_t *wqh; 36401c03194SChristoph Hellwig 36501c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 36658a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 36701c03194SChristoph Hellwig spin_unlock(&inode_lock); 36801c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 36901c03194SChristoph Hellwig spin_lock(&inode_lock); 37058a9d3d8SRichard Kennedy } 37101c03194SChristoph Hellwig } 37201c03194SChristoph Hellwig 37301c03194SChristoph Hellwig /* 37401c03194SChristoph Hellwig * Write out an inode's dirty pages. Called under inode_lock. Either the 37501c03194SChristoph Hellwig * caller has ref on the inode (either via __iget or via syscall against an fd) 37601c03194SChristoph Hellwig * or the inode has I_WILL_FREE set (via generic_forget_inode) 37701c03194SChristoph Hellwig * 3781da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 3791da177e4SLinus Torvalds * 3801da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 3811da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 3821da177e4SLinus Torvalds * livelocks, etc. 3831da177e4SLinus Torvalds * 3841da177e4SLinus Torvalds * Called under inode_lock. 3851da177e4SLinus Torvalds */ 3861da177e4SLinus Torvalds static int 38701c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 3881da177e4SLinus Torvalds { 3891da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 39001c03194SChristoph Hellwig unsigned dirty; 3911da177e4SLinus Torvalds int ret; 3921da177e4SLinus Torvalds 39301c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 39401c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 39501c03194SChristoph Hellwig else 39601c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 39701c03194SChristoph Hellwig 39801c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 39901c03194SChristoph Hellwig /* 40001c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 40166f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 40201c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 40301c03194SChristoph Hellwig * 40401c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 40566f3b8e2SJens Axboe * completed a full scan of b_io. 40601c03194SChristoph Hellwig */ 407a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 40801c03194SChristoph Hellwig requeue_io(inode); 40901c03194SChristoph Hellwig return 0; 41001c03194SChristoph Hellwig } 41101c03194SChristoph Hellwig 41201c03194SChristoph Hellwig /* 41301c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 41401c03194SChristoph Hellwig */ 41501c03194SChristoph Hellwig inode_wait_for_writeback(inode); 41601c03194SChristoph Hellwig } 41701c03194SChristoph Hellwig 4181c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 4191da177e4SLinus Torvalds 4205547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 4211c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 4225547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 4231da177e4SLinus Torvalds spin_unlock(&inode_lock); 4241da177e4SLinus Torvalds 4251da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 4261da177e4SLinus Torvalds 42726821ed4SChristoph Hellwig /* 42826821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 42926821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 43026821ed4SChristoph Hellwig * I/O completion. 43126821ed4SChristoph Hellwig */ 432a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 43326821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 4341da177e4SLinus Torvalds if (ret == 0) 4351da177e4SLinus Torvalds ret = err; 4361da177e4SLinus Torvalds } 4371da177e4SLinus Torvalds 4385547e8aaSDmitry Monakhov /* 4395547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 4405547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 4415547e8aaSDmitry Monakhov * write_inode() 4425547e8aaSDmitry Monakhov */ 4435547e8aaSDmitry Monakhov spin_lock(&inode_lock); 4445547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 4455547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 4465547e8aaSDmitry Monakhov spin_unlock(&inode_lock); 44726821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 44826821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 449a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 4501da177e4SLinus Torvalds if (ret == 0) 4511da177e4SLinus Torvalds ret = err; 4521da177e4SLinus Torvalds } 4531da177e4SLinus Torvalds 4541da177e4SLinus Torvalds spin_lock(&inode_lock); 4551c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 45684a89245SWu Fengguang if (!(inode->i_state & (I_FREEING | I_CLEAR))) { 457b3af9468SWu Fengguang if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) { 458ae1b7f7dSWu Fengguang /* 459b3af9468SWu Fengguang * More pages get dirtied by a fast dirtier. 460b3af9468SWu Fengguang */ 461b3af9468SWu Fengguang goto select_queue; 462b3af9468SWu Fengguang } else if (inode->i_state & I_DIRTY) { 463b3af9468SWu Fengguang /* 464b3af9468SWu Fengguang * At least XFS will redirty the inode during the 465b3af9468SWu Fengguang * writeback (delalloc) and on io completion (isize). 466ae1b7f7dSWu Fengguang */ 467ae1b7f7dSWu Fengguang redirty_tail(inode); 468ae1b7f7dSWu Fengguang } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4691da177e4SLinus Torvalds /* 4701da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 4711da177e4SLinus Torvalds * sometimes bales out without doing anything. Redirty 47266f3b8e2SJens Axboe * the inode; Move it from b_io onto b_more_io/b_dirty. 4731b43ef91SAndrew Morton */ 4741b43ef91SAndrew Morton /* 4751b43ef91SAndrew Morton * akpm: if the caller was the kupdate function we put 47666f3b8e2SJens Axboe * this inode at the head of b_dirty so it gets first 4771b43ef91SAndrew Morton * consideration. Otherwise, move it to the tail, for 4781b43ef91SAndrew Morton * the reasons described there. I'm not really sure 4791b43ef91SAndrew Morton * how much sense this makes. Presumably I had a good 4801b43ef91SAndrew Morton * reasons for doing it this way, and I'd rather not 4811b43ef91SAndrew Morton * muck with it at present. 4821da177e4SLinus Torvalds */ 4831da177e4SLinus Torvalds if (wbc->for_kupdate) { 4841da177e4SLinus Torvalds /* 4852c136579SFengguang Wu * For the kupdate function we move the inode 48666f3b8e2SJens Axboe * to b_more_io so it will get more writeout as 4872c136579SFengguang Wu * soon as the queue becomes uncongested. 4881da177e4SLinus Torvalds */ 4891da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 490b3af9468SWu Fengguang select_queue: 4918bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 4928bc3be27SFengguang Wu /* 4938bc3be27SFengguang Wu * slice used up: queue for next turn 4948bc3be27SFengguang Wu */ 4950e0f4fc2SKen Chen requeue_io(inode); 4961da177e4SLinus Torvalds } else { 4971da177e4SLinus Torvalds /* 4988bc3be27SFengguang Wu * somehow blocked: retry later 4998bc3be27SFengguang Wu */ 5008bc3be27SFengguang Wu redirty_tail(inode); 5018bc3be27SFengguang Wu } 5028bc3be27SFengguang Wu } else { 5038bc3be27SFengguang Wu /* 5041da177e4SLinus Torvalds * Otherwise fully redirty the inode so that 5051da177e4SLinus Torvalds * other inodes on this superblock will get some 5061da177e4SLinus Torvalds * writeout. Otherwise heavy writing to one 5071da177e4SLinus Torvalds * file would indefinitely suspend writeout of 5081da177e4SLinus Torvalds * all the other files. 5091da177e4SLinus Torvalds */ 5101da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 5111b43ef91SAndrew Morton redirty_tail(inode); 5121da177e4SLinus Torvalds } 5131da177e4SLinus Torvalds } else if (atomic_read(&inode->i_count)) { 5141da177e4SLinus Torvalds /* 5151da177e4SLinus Torvalds * The inode is clean, inuse 5161da177e4SLinus Torvalds */ 5171da177e4SLinus Torvalds list_move(&inode->i_list, &inode_in_use); 5181da177e4SLinus Torvalds } else { 5191da177e4SLinus Torvalds /* 5201da177e4SLinus Torvalds * The inode is clean, unused 5211da177e4SLinus Torvalds */ 5221da177e4SLinus Torvalds list_move(&inode->i_list, &inode_unused); 5231da177e4SLinus Torvalds } 5241da177e4SLinus Torvalds } 5251c0eeaf5SJoern Engel inode_sync_complete(inode); 5261da177e4SLinus Torvalds return ret; 5271da177e4SLinus Torvalds } 5281da177e4SLinus Torvalds 52903ba3782SJens Axboe /* 530d19de7edSChristoph Hellwig * For background writeback the caller does not have the sb pinned 53103ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 53203ba3782SJens Axboe * go away while we are writing inodes from it. 53303ba3782SJens Axboe */ 534d19de7edSChristoph Hellwig static bool pin_sb_for_writeback(struct super_block *sb) 5351da177e4SLinus Torvalds { 53603ba3782SJens Axboe spin_lock(&sb_lock); 53729cb4859SChristoph Hellwig if (list_empty(&sb->s_instances)) { 53803ba3782SJens Axboe spin_unlock(&sb_lock); 53929cb4859SChristoph Hellwig return false; 54003ba3782SJens Axboe } 54129cb4859SChristoph Hellwig 54229cb4859SChristoph Hellwig sb->s_count++; 54329cb4859SChristoph Hellwig spin_unlock(&sb_lock); 54429cb4859SChristoph Hellwig 54529cb4859SChristoph Hellwig if (down_read_trylock(&sb->s_umount)) { 54629cb4859SChristoph Hellwig if (sb->s_root) 54729cb4859SChristoph Hellwig return true; 54803ba3782SJens Axboe up_read(&sb->s_umount); 54903ba3782SJens Axboe } 55029cb4859SChristoph Hellwig 55129cb4859SChristoph Hellwig put_super(sb); 552d19de7edSChristoph Hellwig return false; 55303ba3782SJens Axboe } 55403ba3782SJens Axboe 555f11c9c5cSEdward Shishkin /* 556f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 557edadfb10SChristoph Hellwig * 558edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 559f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 560f11c9c5cSEdward Shishkin * in reverse order. 561edadfb10SChristoph Hellwig * 562f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 563f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 564f11c9c5cSEdward Shishkin */ 565edadfb10SChristoph Hellwig static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 566edadfb10SChristoph Hellwig struct writeback_control *wbc, bool only_this_sb) 56703ba3782SJens Axboe { 56803ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 569f11c9c5cSEdward Shishkin long pages_skipped; 57003ba3782SJens Axboe struct inode *inode = list_entry(wb->b_io.prev, 5711da177e4SLinus Torvalds struct inode, i_list); 572edadfb10SChristoph Hellwig 573edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 574edadfb10SChristoph Hellwig if (only_this_sb) { 575edadfb10SChristoph Hellwig /* 576edadfb10SChristoph Hellwig * We only want to write back data for this 577edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 578edadfb10SChristoph Hellwig * to it back onto the dirty list. 579edadfb10SChristoph Hellwig */ 58066f3b8e2SJens Axboe redirty_tail(inode); 58166f3b8e2SJens Axboe continue; 58266f3b8e2SJens Axboe } 583edadfb10SChristoph Hellwig 584edadfb10SChristoph Hellwig /* 585edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 586edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 587edadfb10SChristoph Hellwig * pin the next superblock. 588edadfb10SChristoph Hellwig */ 589f11c9c5cSEdward Shishkin return 0; 590edadfb10SChristoph Hellwig } 591edadfb10SChristoph Hellwig 59284a89245SWu Fengguang if (inode->i_state & (I_NEW | I_WILL_FREE)) { 5937ef0d737SNick Piggin requeue_io(inode); 5947ef0d737SNick Piggin continue; 5957ef0d737SNick Piggin } 596d2caa3c5SJeff Layton /* 597d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 598d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 599d2caa3c5SJeff Layton */ 600f11c9c5cSEdward Shishkin if (inode_dirtied_after(inode, wbc->wb_start)) 601f11c9c5cSEdward Shishkin return 1; 6021da177e4SLinus Torvalds 60384a89245SWu Fengguang BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); 6041da177e4SLinus Torvalds __iget(inode); 6051da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 60601c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 6071da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 6081da177e4SLinus Torvalds /* 6091da177e4SLinus Torvalds * writeback is not making progress due to locked 6101da177e4SLinus Torvalds * buffers. Skip this inode for now. 6111da177e4SLinus Torvalds */ 612f57b9b7bSAndrew Morton redirty_tail(inode); 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds spin_unlock(&inode_lock); 6151da177e4SLinus Torvalds iput(inode); 6164ffc8444SOGAWA Hirofumi cond_resched(); 6171da177e4SLinus Torvalds spin_lock(&inode_lock); 6188bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 6198bc3be27SFengguang Wu wbc->more_io = 1; 620f11c9c5cSEdward Shishkin return 1; 6211da177e4SLinus Torvalds } 62203ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 6238bc3be27SFengguang Wu wbc->more_io = 1; 6248bc3be27SFengguang Wu } 625f11c9c5cSEdward Shishkin /* b_io is empty */ 626f11c9c5cSEdward Shishkin return 1; 627f11c9c5cSEdward Shishkin } 62838f21977SNick Piggin 6299c3a8ee8SChristoph Hellwig void writeback_inodes_wb(struct bdi_writeback *wb, 630f11c9c5cSEdward Shishkin struct writeback_control *wbc) 631f11c9c5cSEdward Shishkin { 632f11c9c5cSEdward Shishkin int ret = 0; 6339ecc2738SJens Axboe 634f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 635f11c9c5cSEdward Shishkin spin_lock(&inode_lock); 636f11c9c5cSEdward Shishkin if (!wbc->for_kupdate || list_empty(&wb->b_io)) 637f11c9c5cSEdward Shishkin queue_io(wb, wbc->older_than_this); 638f11c9c5cSEdward Shishkin 639f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 640f11c9c5cSEdward Shishkin struct inode *inode = list_entry(wb->b_io.prev, 641f11c9c5cSEdward Shishkin struct inode, i_list); 642f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 643f11c9c5cSEdward Shishkin 644334132aeSChristoph Hellwig if (!pin_sb_for_writeback(sb)) { 645334132aeSChristoph Hellwig requeue_io(inode); 646d19de7edSChristoph Hellwig continue; 647334132aeSChristoph Hellwig } 648edadfb10SChristoph Hellwig ret = writeback_sb_inodes(sb, wb, wbc, false); 649d19de7edSChristoph Hellwig drop_super(sb); 650f11c9c5cSEdward Shishkin 651f11c9c5cSEdward Shishkin if (ret) 652f11c9c5cSEdward Shishkin break; 653f11c9c5cSEdward Shishkin } 65466f3b8e2SJens Axboe spin_unlock(&inode_lock); 65566f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 65666f3b8e2SJens Axboe } 65766f3b8e2SJens Axboe 658edadfb10SChristoph Hellwig static void __writeback_inodes_sb(struct super_block *sb, 659edadfb10SChristoph Hellwig struct bdi_writeback *wb, struct writeback_control *wbc) 660edadfb10SChristoph Hellwig { 661edadfb10SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 662edadfb10SChristoph Hellwig 663edadfb10SChristoph Hellwig wbc->wb_start = jiffies; /* livelock avoidance */ 664edadfb10SChristoph Hellwig spin_lock(&inode_lock); 665edadfb10SChristoph Hellwig if (!wbc->for_kupdate || list_empty(&wb->b_io)) 666edadfb10SChristoph Hellwig queue_io(wb, wbc->older_than_this); 667edadfb10SChristoph Hellwig writeback_sb_inodes(sb, wb, wbc, true); 668edadfb10SChristoph Hellwig spin_unlock(&inode_lock); 669edadfb10SChristoph Hellwig } 670edadfb10SChristoph Hellwig 67103ba3782SJens Axboe /* 67203ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 67303ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 67403ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 67503ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 67603ba3782SJens Axboe * the dirty each time it has written this many pages. 67703ba3782SJens Axboe */ 67803ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 67903ba3782SJens Axboe 68003ba3782SJens Axboe static inline bool over_bground_thresh(void) 68103ba3782SJens Axboe { 68203ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 68303ba3782SJens Axboe 68403ba3782SJens Axboe get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); 68503ba3782SJens Axboe 68603ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 68703ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) >= background_thresh); 68803ba3782SJens Axboe } 68903ba3782SJens Axboe 69003ba3782SJens Axboe /* 69103ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 69203ba3782SJens Axboe * 69303ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 69403ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 69503ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 69603ba3782SJens Axboe * older than a specific point in time. 69703ba3782SJens Axboe * 69803ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 69903ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 70003ba3782SJens Axboe * one-second gap. 70103ba3782SJens Axboe * 70203ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 70303ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 70403ba3782SJens Axboe */ 705c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 706c4a77a6cSJens Axboe struct wb_writeback_args *args) 70703ba3782SJens Axboe { 70803ba3782SJens Axboe struct writeback_control wbc = { 709c4a77a6cSJens Axboe .sync_mode = args->sync_mode, 71003ba3782SJens Axboe .older_than_this = NULL, 711c4a77a6cSJens Axboe .for_kupdate = args->for_kupdate, 712b17621feSWu Fengguang .for_background = args->for_background, 713c4a77a6cSJens Axboe .range_cyclic = args->range_cyclic, 71403ba3782SJens Axboe }; 71503ba3782SJens Axboe unsigned long oldest_jif; 71603ba3782SJens Axboe long wrote = 0; 717a5989bdcSJan Kara struct inode *inode; 71803ba3782SJens Axboe 71903ba3782SJens Axboe if (wbc.for_kupdate) { 72003ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 72103ba3782SJens Axboe oldest_jif = jiffies - 72203ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 72303ba3782SJens Axboe } 724c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 725c4a77a6cSJens Axboe wbc.range_start = 0; 726c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 727c4a77a6cSJens Axboe } 72803ba3782SJens Axboe 72903ba3782SJens Axboe for (;;) { 73003ba3782SJens Axboe /* 731d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 73203ba3782SJens Axboe */ 733d3ddec76SWu Fengguang if (args->nr_pages <= 0) 73403ba3782SJens Axboe break; 73503ba3782SJens Axboe 73603ba3782SJens Axboe /* 737d3ddec76SWu Fengguang * For background writeout, stop when we are below the 738d3ddec76SWu Fengguang * background dirty threshold 73903ba3782SJens Axboe */ 740d3ddec76SWu Fengguang if (args->for_background && !over_bground_thresh()) 74103ba3782SJens Axboe break; 74203ba3782SJens Axboe 74303ba3782SJens Axboe wbc.more_io = 0; 74403ba3782SJens Axboe wbc.nr_to_write = MAX_WRITEBACK_PAGES; 74503ba3782SJens Axboe wbc.pages_skipped = 0; 746edadfb10SChristoph Hellwig if (args->sb) 747edadfb10SChristoph Hellwig __writeback_inodes_sb(args->sb, wb, &wbc); 748edadfb10SChristoph Hellwig else 74903ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 750c4a77a6cSJens Axboe args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 75103ba3782SJens Axboe wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 75203ba3782SJens Axboe 75303ba3782SJens Axboe /* 75471fd05a8SJens Axboe * If we consumed everything, see if we have more 75503ba3782SJens Axboe */ 75671fd05a8SJens Axboe if (wbc.nr_to_write <= 0) 75771fd05a8SJens Axboe continue; 75871fd05a8SJens Axboe /* 75971fd05a8SJens Axboe * Didn't write everything and we don't have more IO, bail 76071fd05a8SJens Axboe */ 76171fd05a8SJens Axboe if (!wbc.more_io) 76271fd05a8SJens Axboe break; 76371fd05a8SJens Axboe /* 76471fd05a8SJens Axboe * Did we write something? Try for more 76571fd05a8SJens Axboe */ 766a5989bdcSJan Kara if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) 76703ba3782SJens Axboe continue; 768a5989bdcSJan Kara /* 769a5989bdcSJan Kara * Nothing written. Wait for some inode to 770a5989bdcSJan Kara * become available for writeback. Otherwise 771a5989bdcSJan Kara * we'll just busyloop. 772a5989bdcSJan Kara */ 773a5989bdcSJan Kara spin_lock(&inode_lock); 774a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 77571fd05a8SJens Axboe inode = list_entry(wb->b_more_io.prev, 776a5989bdcSJan Kara struct inode, i_list); 777a5989bdcSJan Kara inode_wait_for_writeback(inode); 778a5989bdcSJan Kara } 779a5989bdcSJan Kara spin_unlock(&inode_lock); 78003ba3782SJens Axboe } 78103ba3782SJens Axboe 78203ba3782SJens Axboe return wrote; 78303ba3782SJens Axboe } 78403ba3782SJens Axboe 78503ba3782SJens Axboe /* 78603ba3782SJens Axboe * Return the next bdi_work struct that hasn't been processed by this 7878010c3b6SJens Axboe * wb thread yet. ->seen is initially set for each thread that exists 7888010c3b6SJens Axboe * for this device, when a thread first notices a piece of work it 7898010c3b6SJens Axboe * clears its bit. Depending on writeback type, the thread will notify 7908010c3b6SJens Axboe * completion on either receiving the work (WB_SYNC_NONE) or after 7918010c3b6SJens Axboe * it is done (WB_SYNC_ALL). 79203ba3782SJens Axboe */ 79303ba3782SJens Axboe static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, 79403ba3782SJens Axboe struct bdi_writeback *wb) 79503ba3782SJens Axboe { 79603ba3782SJens Axboe struct bdi_work *work, *ret = NULL; 79703ba3782SJens Axboe 79803ba3782SJens Axboe rcu_read_lock(); 79903ba3782SJens Axboe 80003ba3782SJens Axboe list_for_each_entry_rcu(work, &bdi->work_list, list) { 80177fad5e6SNick Piggin if (!test_bit(wb->nr, &work->seen)) 80203ba3782SJens Axboe continue; 80377fad5e6SNick Piggin clear_bit(wb->nr, &work->seen); 80403ba3782SJens Axboe 80503ba3782SJens Axboe ret = work; 80603ba3782SJens Axboe break; 80703ba3782SJens Axboe } 80803ba3782SJens Axboe 80903ba3782SJens Axboe rcu_read_unlock(); 81003ba3782SJens Axboe return ret; 81103ba3782SJens Axboe } 81203ba3782SJens Axboe 81303ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 81403ba3782SJens Axboe { 81503ba3782SJens Axboe unsigned long expired; 81603ba3782SJens Axboe long nr_pages; 81703ba3782SJens Axboe 81869b62d01SJens Axboe /* 81969b62d01SJens Axboe * When set to zero, disable periodic writeback 82069b62d01SJens Axboe */ 82169b62d01SJens Axboe if (!dirty_writeback_interval) 82269b62d01SJens Axboe return 0; 82369b62d01SJens Axboe 82403ba3782SJens Axboe expired = wb->last_old_flush + 82503ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 82603ba3782SJens Axboe if (time_before(jiffies, expired)) 82703ba3782SJens Axboe return 0; 82803ba3782SJens Axboe 82903ba3782SJens Axboe wb->last_old_flush = jiffies; 83003ba3782SJens Axboe nr_pages = global_page_state(NR_FILE_DIRTY) + 83103ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) + 83203ba3782SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 83303ba3782SJens Axboe 834c4a77a6cSJens Axboe if (nr_pages) { 835c4a77a6cSJens Axboe struct wb_writeback_args args = { 836c4a77a6cSJens Axboe .nr_pages = nr_pages, 837c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 838c4a77a6cSJens Axboe .for_kupdate = 1, 839c4a77a6cSJens Axboe .range_cyclic = 1, 840c4a77a6cSJens Axboe }; 841c4a77a6cSJens Axboe 842c4a77a6cSJens Axboe return wb_writeback(wb, &args); 843c4a77a6cSJens Axboe } 84403ba3782SJens Axboe 84503ba3782SJens Axboe return 0; 84603ba3782SJens Axboe } 84703ba3782SJens Axboe 84803ba3782SJens Axboe /* 84903ba3782SJens Axboe * Retrieve work items and do the writeback they describe 85003ba3782SJens Axboe */ 85103ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 85203ba3782SJens Axboe { 85303ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 85403ba3782SJens Axboe struct bdi_work *work; 855c4a77a6cSJens Axboe long wrote = 0; 85603ba3782SJens Axboe 85703ba3782SJens Axboe while ((work = get_next_work_item(bdi, wb)) != NULL) { 858c4a77a6cSJens Axboe struct wb_writeback_args args = work->args; 85903ba3782SJens Axboe 86003ba3782SJens Axboe /* 86103ba3782SJens Axboe * Override sync mode, in case we must wait for completion 86203ba3782SJens Axboe */ 86303ba3782SJens Axboe if (force_wait) 864c4a77a6cSJens Axboe work->args.sync_mode = args.sync_mode = WB_SYNC_ALL; 86503ba3782SJens Axboe 86603ba3782SJens Axboe /* 86703ba3782SJens Axboe * If this isn't a data integrity operation, just notify 86803ba3782SJens Axboe * that we have seen this work and we are now starting it. 86903ba3782SJens Axboe */ 8707f0e7bedSChristoph Hellwig if (!test_bit(WS_ONSTACK, &work->state)) 87103ba3782SJens Axboe wb_clear_pending(wb, work); 87203ba3782SJens Axboe 873c4a77a6cSJens Axboe wrote += wb_writeback(wb, &args); 87403ba3782SJens Axboe 87503ba3782SJens Axboe /* 87603ba3782SJens Axboe * This is a data integrity writeback, so only do the 87703ba3782SJens Axboe * notification when we have completed the work. 87803ba3782SJens Axboe */ 8797f0e7bedSChristoph Hellwig if (test_bit(WS_ONSTACK, &work->state)) 88003ba3782SJens Axboe wb_clear_pending(wb, work); 88103ba3782SJens Axboe } 88203ba3782SJens Axboe 88303ba3782SJens Axboe /* 88403ba3782SJens Axboe * Check for periodic writeback, kupdated() style 88503ba3782SJens Axboe */ 88603ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 88703ba3782SJens Axboe 88803ba3782SJens Axboe return wrote; 88903ba3782SJens Axboe } 89003ba3782SJens Axboe 89103ba3782SJens Axboe /* 89203ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 89303ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 89403ba3782SJens Axboe */ 89503ba3782SJens Axboe int bdi_writeback_task(struct bdi_writeback *wb) 89603ba3782SJens Axboe { 89703ba3782SJens Axboe unsigned long last_active = jiffies; 89803ba3782SJens Axboe unsigned long wait_jiffies = -1UL; 89903ba3782SJens Axboe long pages_written; 90003ba3782SJens Axboe 90103ba3782SJens Axboe while (!kthread_should_stop()) { 90203ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 90303ba3782SJens Axboe 90403ba3782SJens Axboe if (pages_written) 90503ba3782SJens Axboe last_active = jiffies; 90603ba3782SJens Axboe else if (wait_jiffies != -1UL) { 90703ba3782SJens Axboe unsigned long max_idle; 90803ba3782SJens Axboe 90903ba3782SJens Axboe /* 91003ba3782SJens Axboe * Longest period of inactivity that we tolerate. If we 91103ba3782SJens Axboe * see dirty data again later, the task will get 91203ba3782SJens Axboe * recreated automatically. 91303ba3782SJens Axboe */ 91403ba3782SJens Axboe max_idle = max(5UL * 60 * HZ, wait_jiffies); 91503ba3782SJens Axboe if (time_after(jiffies, max_idle + last_active)) 91603ba3782SJens Axboe break; 91703ba3782SJens Axboe } 91803ba3782SJens Axboe 91969b62d01SJens Axboe if (dirty_writeback_interval) { 92003ba3782SJens Axboe wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); 92149db0414SJens Axboe schedule_timeout_interruptible(wait_jiffies); 922f9eadbbdSJens Axboe } else { 923f9eadbbdSJens Axboe set_current_state(TASK_INTERRUPTIBLE); 924f9eadbbdSJens Axboe if (list_empty_careful(&wb->bdi->work_list) && 925f9eadbbdSJens Axboe !kthread_should_stop()) 92669b62d01SJens Axboe schedule(); 927f9eadbbdSJens Axboe __set_current_state(TASK_RUNNING); 928f9eadbbdSJens Axboe } 92969b62d01SJens Axboe 93003ba3782SJens Axboe try_to_freeze(); 93103ba3782SJens Axboe } 93203ba3782SJens Axboe 93303ba3782SJens Axboe return 0; 93403ba3782SJens Axboe } 93503ba3782SJens Axboe 93603ba3782SJens Axboe /* 93703ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 93803ba3782SJens Axboe * the whole world. 93903ba3782SJens Axboe */ 94003ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 94103ba3782SJens Axboe { 942b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 943b8c2f347SChristoph Hellwig struct wb_writeback_args args = { 944b8c2f347SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 945b8c2f347SChristoph Hellwig }; 946b8c2f347SChristoph Hellwig 947b8c2f347SChristoph Hellwig if (nr_pages) { 948b8c2f347SChristoph Hellwig args.nr_pages = nr_pages; 949b8c2f347SChristoph Hellwig } else { 950b8c2f347SChristoph Hellwig args.nr_pages = global_page_state(NR_FILE_DIRTY) + 95103ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 952b8c2f347SChristoph Hellwig } 953b8c2f347SChristoph Hellwig 954b8c2f347SChristoph Hellwig rcu_read_lock(); 955b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 956b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 957b8c2f347SChristoph Hellwig continue; 958b8c2f347SChristoph Hellwig bdi_alloc_queue_work(bdi, &args); 959b8c2f347SChristoph Hellwig } 960b8c2f347SChristoph Hellwig rcu_read_unlock(); 96103ba3782SJens Axboe } 96203ba3782SJens Axboe 96303ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 96403ba3782SJens Axboe { 96503ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 96603ba3782SJens Axboe struct dentry *dentry; 96703ba3782SJens Axboe const char *name = "?"; 96803ba3782SJens Axboe 96903ba3782SJens Axboe dentry = d_find_alias(inode); 97003ba3782SJens Axboe if (dentry) { 97103ba3782SJens Axboe spin_lock(&dentry->d_lock); 97203ba3782SJens Axboe name = (const char *) dentry->d_name.name; 97303ba3782SJens Axboe } 97403ba3782SJens Axboe printk(KERN_DEBUG 97503ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 97603ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 97703ba3782SJens Axboe name, inode->i_sb->s_id); 97803ba3782SJens Axboe if (dentry) { 97903ba3782SJens Axboe spin_unlock(&dentry->d_lock); 98003ba3782SJens Axboe dput(dentry); 98103ba3782SJens Axboe } 98203ba3782SJens Axboe } 98303ba3782SJens Axboe } 98403ba3782SJens Axboe 98503ba3782SJens Axboe /** 98603ba3782SJens Axboe * __mark_inode_dirty - internal function 98703ba3782SJens Axboe * @inode: inode to mark 98803ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 98903ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 99003ba3782SJens Axboe * mark_inode_dirty_sync. 99103ba3782SJens Axboe * 99203ba3782SJens Axboe * Put the inode on the super block's dirty list. 99303ba3782SJens Axboe * 99403ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 99503ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 99603ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 99703ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 99803ba3782SJens Axboe * 99903ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 100003ba3782SJens Axboe * them dirty. 100103ba3782SJens Axboe * 100203ba3782SJens Axboe * This function *must* be atomic for the I_DIRTY_PAGES case - 100303ba3782SJens Axboe * set_page_dirty() is called under spinlock in several places. 100403ba3782SJens Axboe * 100503ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 100603ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 100703ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 100803ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 100903ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 101003ba3782SJens Axboe * blockdev inode. 101103ba3782SJens Axboe */ 101203ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 101303ba3782SJens Axboe { 101403ba3782SJens Axboe struct super_block *sb = inode->i_sb; 101503ba3782SJens Axboe 101603ba3782SJens Axboe /* 101703ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 101803ba3782SJens Axboe * dirty the inode itself 101903ba3782SJens Axboe */ 102003ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 102103ba3782SJens Axboe if (sb->s_op->dirty_inode) 102203ba3782SJens Axboe sb->s_op->dirty_inode(inode); 102303ba3782SJens Axboe } 102403ba3782SJens Axboe 102503ba3782SJens Axboe /* 102603ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 102703ba3782SJens Axboe * -- mikulas 102803ba3782SJens Axboe */ 102903ba3782SJens Axboe smp_mb(); 103003ba3782SJens Axboe 103103ba3782SJens Axboe /* avoid the locking if we can */ 103203ba3782SJens Axboe if ((inode->i_state & flags) == flags) 103303ba3782SJens Axboe return; 103403ba3782SJens Axboe 103503ba3782SJens Axboe if (unlikely(block_dump)) 103603ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 103703ba3782SJens Axboe 103803ba3782SJens Axboe spin_lock(&inode_lock); 103903ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 104003ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 104103ba3782SJens Axboe 104203ba3782SJens Axboe inode->i_state |= flags; 104303ba3782SJens Axboe 104403ba3782SJens Axboe /* 104503ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 104603ba3782SJens Axboe * The unlocker will place the inode on the appropriate 104703ba3782SJens Axboe * superblock list, based upon its state. 104803ba3782SJens Axboe */ 104903ba3782SJens Axboe if (inode->i_state & I_SYNC) 105003ba3782SJens Axboe goto out; 105103ba3782SJens Axboe 105203ba3782SJens Axboe /* 105303ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 105403ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 105503ba3782SJens Axboe */ 105603ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 105703ba3782SJens Axboe if (hlist_unhashed(&inode->i_hash)) 105803ba3782SJens Axboe goto out; 105903ba3782SJens Axboe } 106003ba3782SJens Axboe if (inode->i_state & (I_FREEING|I_CLEAR)) 106103ba3782SJens Axboe goto out; 106203ba3782SJens Axboe 106303ba3782SJens Axboe /* 106403ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 106503ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 106603ba3782SJens Axboe */ 106703ba3782SJens Axboe if (!was_dirty) { 106803ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1069500b067cSJens Axboe struct backing_dev_info *bdi = wb->bdi; 1070500b067cSJens Axboe 1071500b067cSJens Axboe if (bdi_cap_writeback_dirty(bdi) && 1072500b067cSJens Axboe !test_bit(BDI_registered, &bdi->state)) { 1073500b067cSJens Axboe WARN_ON(1); 1074500b067cSJens Axboe printk(KERN_ERR "bdi-%s not registered\n", 1075500b067cSJens Axboe bdi->name); 1076500b067cSJens Axboe } 107703ba3782SJens Axboe 107803ba3782SJens Axboe inode->dirtied_when = jiffies; 107903ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 108003ba3782SJens Axboe } 108103ba3782SJens Axboe } 108203ba3782SJens Axboe out: 108303ba3782SJens Axboe spin_unlock(&inode_lock); 108403ba3782SJens Axboe } 108503ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 108603ba3782SJens Axboe 108766f3b8e2SJens Axboe /* 108866f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 108966f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 109066f3b8e2SJens Axboe * 109166f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 109266f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 109366f3b8e2SJens Axboe * 109466f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 109566f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 109666f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 109766f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 109866f3b8e2SJens Axboe * 109966f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 110066f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 110166f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 110266f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 110366f3b8e2SJens Axboe */ 1104b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 110566f3b8e2SJens Axboe { 110638f21977SNick Piggin struct inode *inode, *old_inode = NULL; 110738f21977SNick Piggin 110803ba3782SJens Axboe /* 110903ba3782SJens Axboe * We need to be protected against the filesystem going from 111003ba3782SJens Axboe * r/o to r/w or vice versa. 111103ba3782SJens Axboe */ 1112b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 111303ba3782SJens Axboe 111466f3b8e2SJens Axboe spin_lock(&inode_lock); 111566f3b8e2SJens Axboe 111638f21977SNick Piggin /* 111738f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 111838f21977SNick Piggin * because there may have been pages dirtied before our sync 111938f21977SNick Piggin * call, but which had writeout started before we write it out. 112038f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 112138f21977SNick Piggin * we still have to wait for that writeout. 112238f21977SNick Piggin */ 1123b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 112438f21977SNick Piggin struct address_space *mapping; 112538f21977SNick Piggin 112603ba3782SJens Axboe if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 112738f21977SNick Piggin continue; 112838f21977SNick Piggin mapping = inode->i_mapping; 112938f21977SNick Piggin if (mapping->nrpages == 0) 113038f21977SNick Piggin continue; 113138f21977SNick Piggin __iget(inode); 1132ae8547b0SHans Reiser spin_unlock(&inode_lock); 113338f21977SNick Piggin /* 113438f21977SNick Piggin * We hold a reference to 'inode' so it couldn't have 113538f21977SNick Piggin * been removed from s_inodes list while we dropped the 113638f21977SNick Piggin * inode_lock. We cannot iput the inode now as we can 113738f21977SNick Piggin * be holding the last reference and we cannot iput it 113838f21977SNick Piggin * under inode_lock. So we keep the reference and iput 113938f21977SNick Piggin * it later. 114038f21977SNick Piggin */ 114138f21977SNick Piggin iput(old_inode); 114238f21977SNick Piggin old_inode = inode; 114338f21977SNick Piggin 114438f21977SNick Piggin filemap_fdatawait(mapping); 114538f21977SNick Piggin 114638f21977SNick Piggin cond_resched(); 114738f21977SNick Piggin 114838f21977SNick Piggin spin_lock(&inode_lock); 114938f21977SNick Piggin } 115038f21977SNick Piggin spin_unlock(&inode_lock); 115138f21977SNick Piggin iput(old_inode); 115266f3b8e2SJens Axboe } 11531da177e4SLinus Torvalds 1154d8a8559cSJens Axboe /** 1155d8a8559cSJens Axboe * writeback_inodes_sb - writeback dirty inodes from given super_block 1156d8a8559cSJens Axboe * @sb: the superblock 11571da177e4SLinus Torvalds * 1158d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1159d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 1160d8a8559cSJens Axboe * for IO completion of submitted IO. The number of pages submitted is 1161d8a8559cSJens Axboe * returned. 11621da177e4SLinus Torvalds */ 1163b6e51316SJens Axboe void writeback_inodes_sb(struct super_block *sb) 11641da177e4SLinus Torvalds { 11650e3c9a22SJens Axboe unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 11660e3c9a22SJens Axboe unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 11673c4d7165SChristoph Hellwig struct wb_writeback_args args = { 11683c4d7165SChristoph Hellwig .sb = sb, 11693c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 11703c4d7165SChristoph Hellwig }; 11710e3c9a22SJens Axboe 1172cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1173cf37e972SChristoph Hellwig 11743c4d7165SChristoph Hellwig args.nr_pages = nr_dirty + nr_unstable + 11750e3c9a22SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 11760e3c9a22SJens Axboe 11773c4d7165SChristoph Hellwig bdi_queue_work_onstack(&args); 11781da177e4SLinus Torvalds } 1179d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1180d8a8559cSJens Axboe 1181d8a8559cSJens Axboe /** 118217bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 118317bd55d0SEric Sandeen * @sb: the superblock 118417bd55d0SEric Sandeen * 118517bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 118617bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 118717bd55d0SEric Sandeen */ 118817bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 118917bd55d0SEric Sandeen { 119017bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1191cf37e972SChristoph Hellwig down_read(&sb->s_umount); 119217bd55d0SEric Sandeen writeback_inodes_sb(sb); 1193cf37e972SChristoph Hellwig up_read(&sb->s_umount); 119417bd55d0SEric Sandeen return 1; 119517bd55d0SEric Sandeen } else 119617bd55d0SEric Sandeen return 0; 119717bd55d0SEric Sandeen } 119817bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 119917bd55d0SEric Sandeen 120017bd55d0SEric Sandeen /** 1201d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1202d8a8559cSJens Axboe * @sb: the superblock 1203d8a8559cSJens Axboe * 1204d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1205d8a8559cSJens Axboe * super_block. The number of pages synced is returned. 1206d8a8559cSJens Axboe */ 1207b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1208d8a8559cSJens Axboe { 12093c4d7165SChristoph Hellwig struct wb_writeback_args args = { 12103c4d7165SChristoph Hellwig .sb = sb, 12113c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 12123c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 12133c4d7165SChristoph Hellwig .range_cyclic = 0, 12143c4d7165SChristoph Hellwig }; 12153c4d7165SChristoph Hellwig 1216cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1217cf37e972SChristoph Hellwig 12183c4d7165SChristoph Hellwig bdi_queue_work_onstack(&args); 1219b6e51316SJens Axboe wait_sb_inodes(sb); 1220d8a8559cSJens Axboe } 1221d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 12221da177e4SLinus Torvalds 12231da177e4SLinus Torvalds /** 12241da177e4SLinus Torvalds * write_inode_now - write an inode to disk 12251da177e4SLinus Torvalds * @inode: inode to write to disk 12261da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 12271da177e4SLinus Torvalds * 12287f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 12297f04c26dSAndrea Arcangeli * primarily needed by knfsd. 12307f04c26dSAndrea Arcangeli * 12317f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 12321da177e4SLinus Torvalds */ 12331da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 12341da177e4SLinus Torvalds { 12351da177e4SLinus Torvalds int ret; 12361da177e4SLinus Torvalds struct writeback_control wbc = { 12371da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 123818914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1239111ebb6eSOGAWA Hirofumi .range_start = 0, 1240111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 12411da177e4SLinus Torvalds }; 12421da177e4SLinus Torvalds 12431da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 124449364ce2SAndrew Morton wbc.nr_to_write = 0; 12451da177e4SLinus Torvalds 12461da177e4SLinus Torvalds might_sleep(); 12471da177e4SLinus Torvalds spin_lock(&inode_lock); 124801c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 12491da177e4SLinus Torvalds spin_unlock(&inode_lock); 12501da177e4SLinus Torvalds if (sync) 12511c0eeaf5SJoern Engel inode_sync_wait(inode); 12521da177e4SLinus Torvalds return ret; 12531da177e4SLinus Torvalds } 12541da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 12551da177e4SLinus Torvalds 12561da177e4SLinus Torvalds /** 12571da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 12581da177e4SLinus Torvalds * @inode: the inode to sync 12591da177e4SLinus Torvalds * @wbc: controls the writeback mode 12601da177e4SLinus Torvalds * 12611da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 12621da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 12631da177e4SLinus Torvalds * update inode->i_state. 12641da177e4SLinus Torvalds * 12651da177e4SLinus Torvalds * The caller must have a ref on the inode. 12661da177e4SLinus Torvalds */ 12671da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 12681da177e4SLinus Torvalds { 12691da177e4SLinus Torvalds int ret; 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds spin_lock(&inode_lock); 127201c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 12731da177e4SLinus Torvalds spin_unlock(&inode_lock); 12741da177e4SLinus Torvalds return ret; 12751da177e4SLinus Torvalds } 12761da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1277