11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 2907f3f05cSDavid Howells #include "internal.h" 301da177e4SLinus Torvalds 3166f3b8e2SJens Axboe #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 32f11b00f3SAdrian Bunk 3303ba3782SJens Axboe /* 34d0bceac7SJens Axboe * We don't actually have pdflush, but this one is exported though /proc... 35d0bceac7SJens Axboe */ 36d0bceac7SJens Axboe int nr_pdflush_threads; 37d0bceac7SJens Axboe 38d0bceac7SJens Axboe /* 39c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 40c4a77a6cSJens Axboe */ 4183ba7b07SChristoph Hellwig struct wb_writeback_work { 42c4a77a6cSJens Axboe long nr_pages; 43c4a77a6cSJens Axboe struct super_block *sb; 44c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 4552957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4652957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4752957fe1SH Hartley Sweeten unsigned int for_background:1; 48c4a77a6cSJens Axboe 498010c3b6SJens Axboe struct list_head list; /* pending work list */ 5083ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 5103ba3782SJens Axboe }; 5203ba3782SJens Axboe 53f11b00f3SAdrian Bunk /** 54f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 55f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 56f11b00f3SAdrian Bunk * 5703ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 5803ba3782SJens Axboe * backing device. 59f11b00f3SAdrian Bunk */ 60f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 61f11b00f3SAdrian Bunk { 6203ba3782SJens Axboe return !list_empty(&bdi->work_list); 63f11b00f3SAdrian Bunk } 64f11b00f3SAdrian Bunk 6583ba7b07SChristoph Hellwig static void bdi_queue_work(struct backing_dev_info *bdi, 6683ba7b07SChristoph Hellwig struct wb_writeback_work *work) 674195f73dSNick Piggin { 6803ba3782SJens Axboe spin_lock(&bdi->wb_lock); 6983ba7b07SChristoph Hellwig list_add_tail(&work->list, &bdi->work_list); 7003ba3782SJens Axboe spin_unlock(&bdi->wb_lock); 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds /* 7303ba3782SJens Axboe * If the default thread isn't there, make sure we add it. When 7403ba3782SJens Axboe * it gets created and wakes up, we'll run this work. 751da177e4SLinus Torvalds */ 7603ba3782SJens Axboe if (unlikely(list_empty_careful(&bdi->wb_list))) 7703ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 7803ba3782SJens Axboe else { 7903ba3782SJens Axboe struct bdi_writeback *wb = &bdi->wb; 801da177e4SLinus Torvalds 811ef7d9aaSNick Piggin if (wb->task) 8203ba3782SJens Axboe wake_up_process(wb->task); 831da177e4SLinus Torvalds } 8403ba3782SJens Axboe } 851da177e4SLinus Torvalds 8683ba7b07SChristoph Hellwig static void 8783ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 8883ba7b07SChristoph Hellwig bool range_cyclic, bool for_background) 891da177e4SLinus Torvalds { 9083ba7b07SChristoph Hellwig struct wb_writeback_work *work; 9103ba3782SJens Axboe 92bcddc3f0SJens Axboe /* 93bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 94bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 95bcddc3f0SJens Axboe */ 9683ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 9783ba7b07SChristoph Hellwig if (!work) { 9883ba7b07SChristoph Hellwig if (bdi->wb.task) 9983ba7b07SChristoph Hellwig wake_up_process(bdi->wb.task); 10083ba7b07SChristoph Hellwig return; 10183ba7b07SChristoph Hellwig } 10283ba7b07SChristoph Hellwig 10383ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 10483ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 10583ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 10683ba7b07SChristoph Hellwig work->for_background = for_background; 10783ba7b07SChristoph Hellwig 108f11fcae8SJens Axboe bdi_queue_work(bdi, work); 10903ba3782SJens Axboe } 110b6e51316SJens Axboe 111b6e51316SJens Axboe /** 112b6e51316SJens Axboe * bdi_start_writeback - start writeback 113b6e51316SJens Axboe * @bdi: the backing device to write from 114b6e51316SJens Axboe * @nr_pages: the number of pages to write 115b6e51316SJens Axboe * 116b6e51316SJens Axboe * Description: 117b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 118b6e51316SJens Axboe * started when this function returns, we make no guarentees on 1190e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 120b6e51316SJens Axboe * 121b6e51316SJens Axboe */ 122c5444198SChristoph Hellwig void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 123b6e51316SJens Axboe { 12483ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, nr_pages, true, false); 125d3ddec76SWu Fengguang } 126d3ddec76SWu Fengguang 127c5444198SChristoph Hellwig /** 128c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 129c5444198SChristoph Hellwig * @bdi: the backing device to write from 130c5444198SChristoph Hellwig * 131c5444198SChristoph Hellwig * Description: 132c5444198SChristoph Hellwig * This does WB_SYNC_NONE background writeback. The IO is only 133c5444198SChristoph Hellwig * started when this function returns, we make no guarentees on 134c5444198SChristoph Hellwig * completion. Caller need not hold sb s_umount semaphore. 135c5444198SChristoph Hellwig */ 136c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 137c5444198SChristoph Hellwig { 13883ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, LONG_MAX, true, true); 1391da177e4SLinus Torvalds } 1401da177e4SLinus Torvalds 1411da177e4SLinus Torvalds /* 1426610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1436610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1446610a0bcSAndrew Morton * 1456610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 14666f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 1476610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 1486610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 1496610a0bcSAndrew Morton */ 1506610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 1516610a0bcSAndrew Morton { 15203ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1536610a0bcSAndrew Morton 15403ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 15566f3b8e2SJens Axboe struct inode *tail; 1566610a0bcSAndrew Morton 15703ba3782SJens Axboe tail = list_entry(wb->b_dirty.next, struct inode, i_list); 15866f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 1596610a0bcSAndrew Morton inode->dirtied_when = jiffies; 1606610a0bcSAndrew Morton } 16103ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 1626610a0bcSAndrew Morton } 1636610a0bcSAndrew Morton 1646610a0bcSAndrew Morton /* 16566f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 166c986d1e2SAndrew Morton */ 1670e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 168c986d1e2SAndrew Morton { 16903ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 17003ba3782SJens Axboe 17103ba3782SJens Axboe list_move(&inode->i_list, &wb->b_more_io); 172c986d1e2SAndrew Morton } 173c986d1e2SAndrew Morton 1741c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 1751c0eeaf5SJoern Engel { 1761c0eeaf5SJoern Engel /* 1771c0eeaf5SJoern Engel * Prevent speculative execution through spin_unlock(&inode_lock); 1781c0eeaf5SJoern Engel */ 1791c0eeaf5SJoern Engel smp_mb(); 1801c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 1811c0eeaf5SJoern Engel } 1821c0eeaf5SJoern Engel 183d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 184d2caa3c5SJeff Layton { 185d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 186d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 187d2caa3c5SJeff Layton /* 188d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 189d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 190d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 1915b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 192d2caa3c5SJeff Layton */ 193d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 194d2caa3c5SJeff Layton #endif 195d2caa3c5SJeff Layton return ret; 196d2caa3c5SJeff Layton } 197d2caa3c5SJeff Layton 198c986d1e2SAndrew Morton /* 1992c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 2002c136579SFengguang Wu */ 2012c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 2022c136579SFengguang Wu struct list_head *dispatch_queue, 2032c136579SFengguang Wu unsigned long *older_than_this) 2042c136579SFengguang Wu { 2055c03449dSShaohua Li LIST_HEAD(tmp); 2065c03449dSShaohua Li struct list_head *pos, *node; 207cf137307SJens Axboe struct super_block *sb = NULL; 2085c03449dSShaohua Li struct inode *inode; 209cf137307SJens Axboe int do_sb_sort = 0; 2105c03449dSShaohua Li 2112c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2125c03449dSShaohua Li inode = list_entry(delaying_queue->prev, struct inode, i_list); 2132c136579SFengguang Wu if (older_than_this && 214d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 2152c136579SFengguang Wu break; 216cf137307SJens Axboe if (sb && sb != inode->i_sb) 217cf137307SJens Axboe do_sb_sort = 1; 218cf137307SJens Axboe sb = inode->i_sb; 2195c03449dSShaohua Li list_move(&inode->i_list, &tmp); 2205c03449dSShaohua Li } 2215c03449dSShaohua Li 222cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 223cf137307SJens Axboe if (!do_sb_sort) { 224cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 225cf137307SJens Axboe return; 226cf137307SJens Axboe } 227cf137307SJens Axboe 2285c03449dSShaohua Li /* Move inodes from one superblock together */ 2295c03449dSShaohua Li while (!list_empty(&tmp)) { 2305c03449dSShaohua Li inode = list_entry(tmp.prev, struct inode, i_list); 2315c03449dSShaohua Li sb = inode->i_sb; 2325c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2335c03449dSShaohua Li inode = list_entry(pos, struct inode, i_list); 2345c03449dSShaohua Li if (inode->i_sb == sb) 2352c136579SFengguang Wu list_move(&inode->i_list, dispatch_queue); 2362c136579SFengguang Wu } 2372c136579SFengguang Wu } 2385c03449dSShaohua Li } 2392c136579SFengguang Wu 2402c136579SFengguang Wu /* 2412c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 2422c136579SFengguang Wu */ 24303ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 2442c136579SFengguang Wu { 24503ba3782SJens Axboe list_splice_init(&wb->b_more_io, wb->b_io.prev); 24603ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 24766f3b8e2SJens Axboe } 24866f3b8e2SJens Axboe 249a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 25066f3b8e2SJens Axboe { 25103ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 252a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 25303ba3782SJens Axboe return 0; 25466f3b8e2SJens Axboe } 25508d8e974SFengguang Wu 2562c136579SFengguang Wu /* 25701c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 25801c03194SChristoph Hellwig */ 25901c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 26001c03194SChristoph Hellwig { 26101c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 26201c03194SChristoph Hellwig wait_queue_head_t *wqh; 26301c03194SChristoph Hellwig 26401c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 26558a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 26601c03194SChristoph Hellwig spin_unlock(&inode_lock); 26701c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 26801c03194SChristoph Hellwig spin_lock(&inode_lock); 26958a9d3d8SRichard Kennedy } 27001c03194SChristoph Hellwig } 27101c03194SChristoph Hellwig 27201c03194SChristoph Hellwig /* 27301c03194SChristoph Hellwig * Write out an inode's dirty pages. Called under inode_lock. Either the 27401c03194SChristoph Hellwig * caller has ref on the inode (either via __iget or via syscall against an fd) 27501c03194SChristoph Hellwig * or the inode has I_WILL_FREE set (via generic_forget_inode) 27601c03194SChristoph Hellwig * 2771da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 2781da177e4SLinus Torvalds * 2791da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 2801da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 2811da177e4SLinus Torvalds * livelocks, etc. 2821da177e4SLinus Torvalds * 2831da177e4SLinus Torvalds * Called under inode_lock. 2841da177e4SLinus Torvalds */ 2851da177e4SLinus Torvalds static int 28601c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 2871da177e4SLinus Torvalds { 2881da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 28901c03194SChristoph Hellwig unsigned dirty; 2901da177e4SLinus Torvalds int ret; 2911da177e4SLinus Torvalds 29201c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 29301c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 29401c03194SChristoph Hellwig else 29501c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 29601c03194SChristoph Hellwig 29701c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 29801c03194SChristoph Hellwig /* 29901c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 30066f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 30101c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 30201c03194SChristoph Hellwig * 30301c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 30466f3b8e2SJens Axboe * completed a full scan of b_io. 30501c03194SChristoph Hellwig */ 306a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 30701c03194SChristoph Hellwig requeue_io(inode); 30801c03194SChristoph Hellwig return 0; 30901c03194SChristoph Hellwig } 31001c03194SChristoph Hellwig 31101c03194SChristoph Hellwig /* 31201c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 31301c03194SChristoph Hellwig */ 31401c03194SChristoph Hellwig inode_wait_for_writeback(inode); 31501c03194SChristoph Hellwig } 31601c03194SChristoph Hellwig 3171c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 3181da177e4SLinus Torvalds 3195547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 3201c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 3215547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 3221da177e4SLinus Torvalds spin_unlock(&inode_lock); 3231da177e4SLinus Torvalds 3241da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 3251da177e4SLinus Torvalds 32626821ed4SChristoph Hellwig /* 32726821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 32826821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 32926821ed4SChristoph Hellwig * I/O completion. 33026821ed4SChristoph Hellwig */ 331a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 33226821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 3331da177e4SLinus Torvalds if (ret == 0) 3341da177e4SLinus Torvalds ret = err; 3351da177e4SLinus Torvalds } 3361da177e4SLinus Torvalds 3375547e8aaSDmitry Monakhov /* 3385547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 3395547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 3405547e8aaSDmitry Monakhov * write_inode() 3415547e8aaSDmitry Monakhov */ 3425547e8aaSDmitry Monakhov spin_lock(&inode_lock); 3435547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 3445547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 3455547e8aaSDmitry Monakhov spin_unlock(&inode_lock); 34626821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 34726821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 348a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 3491da177e4SLinus Torvalds if (ret == 0) 3501da177e4SLinus Torvalds ret = err; 3511da177e4SLinus Torvalds } 3521da177e4SLinus Torvalds 3531da177e4SLinus Torvalds spin_lock(&inode_lock); 3541c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 355a4ffdde6SAl Viro if (!(inode->i_state & I_FREEING)) { 356b3af9468SWu Fengguang if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) { 357ae1b7f7dSWu Fengguang /* 358b3af9468SWu Fengguang * More pages get dirtied by a fast dirtier. 359b3af9468SWu Fengguang */ 360b3af9468SWu Fengguang goto select_queue; 361b3af9468SWu Fengguang } else if (inode->i_state & I_DIRTY) { 362b3af9468SWu Fengguang /* 363b3af9468SWu Fengguang * At least XFS will redirty the inode during the 364b3af9468SWu Fengguang * writeback (delalloc) and on io completion (isize). 365ae1b7f7dSWu Fengguang */ 366ae1b7f7dSWu Fengguang redirty_tail(inode); 367ae1b7f7dSWu Fengguang } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 3681da177e4SLinus Torvalds /* 3691da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 3701da177e4SLinus Torvalds * sometimes bales out without doing anything. Redirty 37166f3b8e2SJens Axboe * the inode; Move it from b_io onto b_more_io/b_dirty. 3721b43ef91SAndrew Morton */ 3731b43ef91SAndrew Morton /* 3741b43ef91SAndrew Morton * akpm: if the caller was the kupdate function we put 37566f3b8e2SJens Axboe * this inode at the head of b_dirty so it gets first 3761b43ef91SAndrew Morton * consideration. Otherwise, move it to the tail, for 3771b43ef91SAndrew Morton * the reasons described there. I'm not really sure 3781b43ef91SAndrew Morton * how much sense this makes. Presumably I had a good 3791b43ef91SAndrew Morton * reasons for doing it this way, and I'd rather not 3801b43ef91SAndrew Morton * muck with it at present. 3811da177e4SLinus Torvalds */ 3821da177e4SLinus Torvalds if (wbc->for_kupdate) { 3831da177e4SLinus Torvalds /* 3842c136579SFengguang Wu * For the kupdate function we move the inode 38566f3b8e2SJens Axboe * to b_more_io so it will get more writeout as 3862c136579SFengguang Wu * soon as the queue becomes uncongested. 3871da177e4SLinus Torvalds */ 3881da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 389b3af9468SWu Fengguang select_queue: 3908bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 3918bc3be27SFengguang Wu /* 3928bc3be27SFengguang Wu * slice used up: queue for next turn 3938bc3be27SFengguang Wu */ 3940e0f4fc2SKen Chen requeue_io(inode); 3951da177e4SLinus Torvalds } else { 3961da177e4SLinus Torvalds /* 3978bc3be27SFengguang Wu * somehow blocked: retry later 3988bc3be27SFengguang Wu */ 3998bc3be27SFengguang Wu redirty_tail(inode); 4008bc3be27SFengguang Wu } 4018bc3be27SFengguang Wu } else { 4028bc3be27SFengguang Wu /* 4031da177e4SLinus Torvalds * Otherwise fully redirty the inode so that 4041da177e4SLinus Torvalds * other inodes on this superblock will get some 4051da177e4SLinus Torvalds * writeout. Otherwise heavy writing to one 4061da177e4SLinus Torvalds * file would indefinitely suspend writeout of 4071da177e4SLinus Torvalds * all the other files. 4081da177e4SLinus Torvalds */ 4091da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4101b43ef91SAndrew Morton redirty_tail(inode); 4111da177e4SLinus Torvalds } 4121da177e4SLinus Torvalds } else if (atomic_read(&inode->i_count)) { 4131da177e4SLinus Torvalds /* 4141da177e4SLinus Torvalds * The inode is clean, inuse 4151da177e4SLinus Torvalds */ 4161da177e4SLinus Torvalds list_move(&inode->i_list, &inode_in_use); 4171da177e4SLinus Torvalds } else { 4181da177e4SLinus Torvalds /* 4191da177e4SLinus Torvalds * The inode is clean, unused 4201da177e4SLinus Torvalds */ 4211da177e4SLinus Torvalds list_move(&inode->i_list, &inode_unused); 4221da177e4SLinus Torvalds } 4231da177e4SLinus Torvalds } 4241c0eeaf5SJoern Engel inode_sync_complete(inode); 4251da177e4SLinus Torvalds return ret; 4261da177e4SLinus Torvalds } 4271da177e4SLinus Torvalds 42803ba3782SJens Axboe /* 429d19de7edSChristoph Hellwig * For background writeback the caller does not have the sb pinned 43003ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 43103ba3782SJens Axboe * go away while we are writing inodes from it. 43203ba3782SJens Axboe */ 433d19de7edSChristoph Hellwig static bool pin_sb_for_writeback(struct super_block *sb) 4341da177e4SLinus Torvalds { 43503ba3782SJens Axboe spin_lock(&sb_lock); 43629cb4859SChristoph Hellwig if (list_empty(&sb->s_instances)) { 43703ba3782SJens Axboe spin_unlock(&sb_lock); 43829cb4859SChristoph Hellwig return false; 43903ba3782SJens Axboe } 44029cb4859SChristoph Hellwig 44129cb4859SChristoph Hellwig sb->s_count++; 44229cb4859SChristoph Hellwig spin_unlock(&sb_lock); 44329cb4859SChristoph Hellwig 44429cb4859SChristoph Hellwig if (down_read_trylock(&sb->s_umount)) { 44529cb4859SChristoph Hellwig if (sb->s_root) 44629cb4859SChristoph Hellwig return true; 44703ba3782SJens Axboe up_read(&sb->s_umount); 44803ba3782SJens Axboe } 44929cb4859SChristoph Hellwig 45029cb4859SChristoph Hellwig put_super(sb); 451d19de7edSChristoph Hellwig return false; 45203ba3782SJens Axboe } 45303ba3782SJens Axboe 454f11c9c5cSEdward Shishkin /* 455f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 456edadfb10SChristoph Hellwig * 457edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 458f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 459f11c9c5cSEdward Shishkin * in reverse order. 460edadfb10SChristoph Hellwig * 461f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 462f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 463f11c9c5cSEdward Shishkin */ 464edadfb10SChristoph Hellwig static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 465edadfb10SChristoph Hellwig struct writeback_control *wbc, bool only_this_sb) 46603ba3782SJens Axboe { 46703ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 468f11c9c5cSEdward Shishkin long pages_skipped; 46903ba3782SJens Axboe struct inode *inode = list_entry(wb->b_io.prev, 4701da177e4SLinus Torvalds struct inode, i_list); 471edadfb10SChristoph Hellwig 472edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 473edadfb10SChristoph Hellwig if (only_this_sb) { 474edadfb10SChristoph Hellwig /* 475edadfb10SChristoph Hellwig * We only want to write back data for this 476edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 477edadfb10SChristoph Hellwig * to it back onto the dirty list. 478edadfb10SChristoph Hellwig */ 47966f3b8e2SJens Axboe redirty_tail(inode); 48066f3b8e2SJens Axboe continue; 48166f3b8e2SJens Axboe } 482edadfb10SChristoph Hellwig 483edadfb10SChristoph Hellwig /* 484edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 485edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 486edadfb10SChristoph Hellwig * pin the next superblock. 487edadfb10SChristoph Hellwig */ 488f11c9c5cSEdward Shishkin return 0; 489edadfb10SChristoph Hellwig } 490edadfb10SChristoph Hellwig 49184a89245SWu Fengguang if (inode->i_state & (I_NEW | I_WILL_FREE)) { 4927ef0d737SNick Piggin requeue_io(inode); 4937ef0d737SNick Piggin continue; 4947ef0d737SNick Piggin } 495d2caa3c5SJeff Layton /* 496d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 497d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 498d2caa3c5SJeff Layton */ 499f11c9c5cSEdward Shishkin if (inode_dirtied_after(inode, wbc->wb_start)) 500f11c9c5cSEdward Shishkin return 1; 5011da177e4SLinus Torvalds 502a4ffdde6SAl Viro BUG_ON(inode->i_state & I_FREEING); 5031da177e4SLinus Torvalds __iget(inode); 5041da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 50501c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 5061da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 5071da177e4SLinus Torvalds /* 5081da177e4SLinus Torvalds * writeback is not making progress due to locked 5091da177e4SLinus Torvalds * buffers. Skip this inode for now. 5101da177e4SLinus Torvalds */ 511f57b9b7bSAndrew Morton redirty_tail(inode); 5121da177e4SLinus Torvalds } 5131da177e4SLinus Torvalds spin_unlock(&inode_lock); 5141da177e4SLinus Torvalds iput(inode); 5154ffc8444SOGAWA Hirofumi cond_resched(); 5161da177e4SLinus Torvalds spin_lock(&inode_lock); 5178bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 5188bc3be27SFengguang Wu wbc->more_io = 1; 519f11c9c5cSEdward Shishkin return 1; 5201da177e4SLinus Torvalds } 52103ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 5228bc3be27SFengguang Wu wbc->more_io = 1; 5238bc3be27SFengguang Wu } 524f11c9c5cSEdward Shishkin /* b_io is empty */ 525f11c9c5cSEdward Shishkin return 1; 526f11c9c5cSEdward Shishkin } 52738f21977SNick Piggin 5289c3a8ee8SChristoph Hellwig void writeback_inodes_wb(struct bdi_writeback *wb, 529f11c9c5cSEdward Shishkin struct writeback_control *wbc) 530f11c9c5cSEdward Shishkin { 531f11c9c5cSEdward Shishkin int ret = 0; 5329ecc2738SJens Axboe 533f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 534f11c9c5cSEdward Shishkin spin_lock(&inode_lock); 535f11c9c5cSEdward Shishkin if (!wbc->for_kupdate || list_empty(&wb->b_io)) 536f11c9c5cSEdward Shishkin queue_io(wb, wbc->older_than_this); 537f11c9c5cSEdward Shishkin 538f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 539f11c9c5cSEdward Shishkin struct inode *inode = list_entry(wb->b_io.prev, 540f11c9c5cSEdward Shishkin struct inode, i_list); 541f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 542f11c9c5cSEdward Shishkin 543334132aeSChristoph Hellwig if (!pin_sb_for_writeback(sb)) { 544334132aeSChristoph Hellwig requeue_io(inode); 545d19de7edSChristoph Hellwig continue; 546334132aeSChristoph Hellwig } 547edadfb10SChristoph Hellwig ret = writeback_sb_inodes(sb, wb, wbc, false); 548d19de7edSChristoph Hellwig drop_super(sb); 549f11c9c5cSEdward Shishkin 550f11c9c5cSEdward Shishkin if (ret) 551f11c9c5cSEdward Shishkin break; 552f11c9c5cSEdward Shishkin } 55366f3b8e2SJens Axboe spin_unlock(&inode_lock); 55466f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 55566f3b8e2SJens Axboe } 55666f3b8e2SJens Axboe 557edadfb10SChristoph Hellwig static void __writeback_inodes_sb(struct super_block *sb, 558edadfb10SChristoph Hellwig struct bdi_writeback *wb, struct writeback_control *wbc) 559edadfb10SChristoph Hellwig { 560edadfb10SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 561edadfb10SChristoph Hellwig 562edadfb10SChristoph Hellwig wbc->wb_start = jiffies; /* livelock avoidance */ 563edadfb10SChristoph Hellwig spin_lock(&inode_lock); 564edadfb10SChristoph Hellwig if (!wbc->for_kupdate || list_empty(&wb->b_io)) 565edadfb10SChristoph Hellwig queue_io(wb, wbc->older_than_this); 566edadfb10SChristoph Hellwig writeback_sb_inodes(sb, wb, wbc, true); 567edadfb10SChristoph Hellwig spin_unlock(&inode_lock); 568edadfb10SChristoph Hellwig } 569edadfb10SChristoph Hellwig 57003ba3782SJens Axboe /* 57103ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 57203ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 57303ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 57403ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 57503ba3782SJens Axboe * the dirty each time it has written this many pages. 57603ba3782SJens Axboe */ 57703ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 57803ba3782SJens Axboe 57903ba3782SJens Axboe static inline bool over_bground_thresh(void) 58003ba3782SJens Axboe { 58103ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 58203ba3782SJens Axboe 58303ba3782SJens Axboe get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); 58403ba3782SJens Axboe 58503ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 58603ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) >= background_thresh); 58703ba3782SJens Axboe } 58803ba3782SJens Axboe 58903ba3782SJens Axboe /* 59003ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 59103ba3782SJens Axboe * 59203ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 59303ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 59403ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 59503ba3782SJens Axboe * older than a specific point in time. 59603ba3782SJens Axboe * 59703ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 59803ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 59903ba3782SJens Axboe * one-second gap. 60003ba3782SJens Axboe * 60103ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 60203ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 60303ba3782SJens Axboe */ 604c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 60583ba7b07SChristoph Hellwig struct wb_writeback_work *work) 60603ba3782SJens Axboe { 60703ba3782SJens Axboe struct writeback_control wbc = { 60883ba7b07SChristoph Hellwig .sync_mode = work->sync_mode, 60903ba3782SJens Axboe .older_than_this = NULL, 61083ba7b07SChristoph Hellwig .for_kupdate = work->for_kupdate, 61183ba7b07SChristoph Hellwig .for_background = work->for_background, 61283ba7b07SChristoph Hellwig .range_cyclic = work->range_cyclic, 61303ba3782SJens Axboe }; 61403ba3782SJens Axboe unsigned long oldest_jif; 61503ba3782SJens Axboe long wrote = 0; 616a5989bdcSJan Kara struct inode *inode; 61703ba3782SJens Axboe 61803ba3782SJens Axboe if (wbc.for_kupdate) { 61903ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 62003ba3782SJens Axboe oldest_jif = jiffies - 62103ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 62203ba3782SJens Axboe } 623c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 624c4a77a6cSJens Axboe wbc.range_start = 0; 625c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 626c4a77a6cSJens Axboe } 62703ba3782SJens Axboe 62803ba3782SJens Axboe for (;;) { 62903ba3782SJens Axboe /* 630d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 63103ba3782SJens Axboe */ 63283ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 63303ba3782SJens Axboe break; 63403ba3782SJens Axboe 63503ba3782SJens Axboe /* 636d3ddec76SWu Fengguang * For background writeout, stop when we are below the 637d3ddec76SWu Fengguang * background dirty threshold 63803ba3782SJens Axboe */ 63983ba7b07SChristoph Hellwig if (work->for_background && !over_bground_thresh()) 64003ba3782SJens Axboe break; 64103ba3782SJens Axboe 64203ba3782SJens Axboe wbc.more_io = 0; 64303ba3782SJens Axboe wbc.nr_to_write = MAX_WRITEBACK_PAGES; 64403ba3782SJens Axboe wbc.pages_skipped = 0; 64583ba7b07SChristoph Hellwig if (work->sb) 64683ba7b07SChristoph Hellwig __writeback_inodes_sb(work->sb, wb, &wbc); 647edadfb10SChristoph Hellwig else 64803ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 64983ba7b07SChristoph Hellwig work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 65003ba3782SJens Axboe wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 65103ba3782SJens Axboe 65203ba3782SJens Axboe /* 65371fd05a8SJens Axboe * If we consumed everything, see if we have more 65403ba3782SJens Axboe */ 65571fd05a8SJens Axboe if (wbc.nr_to_write <= 0) 65671fd05a8SJens Axboe continue; 65771fd05a8SJens Axboe /* 65871fd05a8SJens Axboe * Didn't write everything and we don't have more IO, bail 65971fd05a8SJens Axboe */ 66071fd05a8SJens Axboe if (!wbc.more_io) 66171fd05a8SJens Axboe break; 66271fd05a8SJens Axboe /* 66371fd05a8SJens Axboe * Did we write something? Try for more 66471fd05a8SJens Axboe */ 665a5989bdcSJan Kara if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) 66603ba3782SJens Axboe continue; 667a5989bdcSJan Kara /* 668a5989bdcSJan Kara * Nothing written. Wait for some inode to 669a5989bdcSJan Kara * become available for writeback. Otherwise 670a5989bdcSJan Kara * we'll just busyloop. 671a5989bdcSJan Kara */ 672a5989bdcSJan Kara spin_lock(&inode_lock); 673a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 67471fd05a8SJens Axboe inode = list_entry(wb->b_more_io.prev, 675a5989bdcSJan Kara struct inode, i_list); 676a5989bdcSJan Kara inode_wait_for_writeback(inode); 677a5989bdcSJan Kara } 678a5989bdcSJan Kara spin_unlock(&inode_lock); 67903ba3782SJens Axboe } 68003ba3782SJens Axboe 68103ba3782SJens Axboe return wrote; 68203ba3782SJens Axboe } 68303ba3782SJens Axboe 68403ba3782SJens Axboe /* 68583ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 68603ba3782SJens Axboe */ 68783ba7b07SChristoph Hellwig static struct wb_writeback_work * 68883ba7b07SChristoph Hellwig get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb) 68903ba3782SJens Axboe { 69083ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 69103ba3782SJens Axboe 69283ba7b07SChristoph Hellwig spin_lock(&bdi->wb_lock); 69383ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 69483ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 69583ba7b07SChristoph Hellwig struct wb_writeback_work, list); 69683ba7b07SChristoph Hellwig list_del_init(&work->list); 69703ba3782SJens Axboe } 69883ba7b07SChristoph Hellwig spin_unlock(&bdi->wb_lock); 69983ba7b07SChristoph Hellwig return work; 70003ba3782SJens Axboe } 70103ba3782SJens Axboe 70203ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 70303ba3782SJens Axboe { 70403ba3782SJens Axboe unsigned long expired; 70503ba3782SJens Axboe long nr_pages; 70603ba3782SJens Axboe 70769b62d01SJens Axboe /* 70869b62d01SJens Axboe * When set to zero, disable periodic writeback 70969b62d01SJens Axboe */ 71069b62d01SJens Axboe if (!dirty_writeback_interval) 71169b62d01SJens Axboe return 0; 71269b62d01SJens Axboe 71303ba3782SJens Axboe expired = wb->last_old_flush + 71403ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 71503ba3782SJens Axboe if (time_before(jiffies, expired)) 71603ba3782SJens Axboe return 0; 71703ba3782SJens Axboe 71803ba3782SJens Axboe wb->last_old_flush = jiffies; 71903ba3782SJens Axboe nr_pages = global_page_state(NR_FILE_DIRTY) + 72003ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) + 72103ba3782SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 72203ba3782SJens Axboe 723c4a77a6cSJens Axboe if (nr_pages) { 72483ba7b07SChristoph Hellwig struct wb_writeback_work work = { 725c4a77a6cSJens Axboe .nr_pages = nr_pages, 726c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 727c4a77a6cSJens Axboe .for_kupdate = 1, 728c4a77a6cSJens Axboe .range_cyclic = 1, 729c4a77a6cSJens Axboe }; 730c4a77a6cSJens Axboe 73183ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 732c4a77a6cSJens Axboe } 73303ba3782SJens Axboe 73403ba3782SJens Axboe return 0; 73503ba3782SJens Axboe } 73603ba3782SJens Axboe 73703ba3782SJens Axboe /* 73803ba3782SJens Axboe * Retrieve work items and do the writeback they describe 73903ba3782SJens Axboe */ 74003ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 74103ba3782SJens Axboe { 74203ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 74383ba7b07SChristoph Hellwig struct wb_writeback_work *work; 744c4a77a6cSJens Axboe long wrote = 0; 74503ba3782SJens Axboe 74603ba3782SJens Axboe while ((work = get_next_work_item(bdi, wb)) != NULL) { 74703ba3782SJens Axboe /* 74803ba3782SJens Axboe * Override sync mode, in case we must wait for completion 74983ba7b07SChristoph Hellwig * because this thread is exiting now. 75003ba3782SJens Axboe */ 75103ba3782SJens Axboe if (force_wait) 75283ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 75383ba7b07SChristoph Hellwig 75483ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 75503ba3782SJens Axboe 75603ba3782SJens Axboe /* 75783ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 75883ba7b07SChristoph Hellwig * work item, otherwise just free it. 75903ba3782SJens Axboe */ 76083ba7b07SChristoph Hellwig if (work->done) 76183ba7b07SChristoph Hellwig complete(work->done); 76283ba7b07SChristoph Hellwig else 76383ba7b07SChristoph Hellwig kfree(work); 76403ba3782SJens Axboe } 76503ba3782SJens Axboe 76603ba3782SJens Axboe /* 76703ba3782SJens Axboe * Check for periodic writeback, kupdated() style 76803ba3782SJens Axboe */ 76903ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 77003ba3782SJens Axboe 77103ba3782SJens Axboe return wrote; 77203ba3782SJens Axboe } 77303ba3782SJens Axboe 77403ba3782SJens Axboe /* 77503ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 77603ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 77703ba3782SJens Axboe */ 77803ba3782SJens Axboe int bdi_writeback_task(struct bdi_writeback *wb) 77903ba3782SJens Axboe { 78003ba3782SJens Axboe unsigned long last_active = jiffies; 78103ba3782SJens Axboe unsigned long wait_jiffies = -1UL; 78203ba3782SJens Axboe long pages_written; 78303ba3782SJens Axboe 78403ba3782SJens Axboe while (!kthread_should_stop()) { 78503ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 78603ba3782SJens Axboe 78703ba3782SJens Axboe if (pages_written) 78803ba3782SJens Axboe last_active = jiffies; 78903ba3782SJens Axboe else if (wait_jiffies != -1UL) { 79003ba3782SJens Axboe unsigned long max_idle; 79103ba3782SJens Axboe 79203ba3782SJens Axboe /* 79303ba3782SJens Axboe * Longest period of inactivity that we tolerate. If we 79403ba3782SJens Axboe * see dirty data again later, the task will get 79503ba3782SJens Axboe * recreated automatically. 79603ba3782SJens Axboe */ 79703ba3782SJens Axboe max_idle = max(5UL * 60 * HZ, wait_jiffies); 79803ba3782SJens Axboe if (time_after(jiffies, max_idle + last_active)) 79903ba3782SJens Axboe break; 80003ba3782SJens Axboe } 80103ba3782SJens Axboe 80269b62d01SJens Axboe if (dirty_writeback_interval) { 80303ba3782SJens Axboe wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); 80449db0414SJens Axboe schedule_timeout_interruptible(wait_jiffies); 805f9eadbbdSJens Axboe } else { 806f9eadbbdSJens Axboe set_current_state(TASK_INTERRUPTIBLE); 807f9eadbbdSJens Axboe if (list_empty_careful(&wb->bdi->work_list) && 808f9eadbbdSJens Axboe !kthread_should_stop()) 80969b62d01SJens Axboe schedule(); 810f9eadbbdSJens Axboe __set_current_state(TASK_RUNNING); 811f9eadbbdSJens Axboe } 81269b62d01SJens Axboe 81303ba3782SJens Axboe try_to_freeze(); 81403ba3782SJens Axboe } 81503ba3782SJens Axboe 81603ba3782SJens Axboe return 0; 81703ba3782SJens Axboe } 81803ba3782SJens Axboe 81903ba3782SJens Axboe /* 82003ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 82103ba3782SJens Axboe * the whole world. 82203ba3782SJens Axboe */ 82303ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 82403ba3782SJens Axboe { 825b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 826b8c2f347SChristoph Hellwig 82783ba7b07SChristoph Hellwig if (!nr_pages) { 82883ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 82903ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 830b8c2f347SChristoph Hellwig } 831b8c2f347SChristoph Hellwig 832b8c2f347SChristoph Hellwig rcu_read_lock(); 833b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 834b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 835b8c2f347SChristoph Hellwig continue; 83683ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, nr_pages, false, false); 837b8c2f347SChristoph Hellwig } 838b8c2f347SChristoph Hellwig rcu_read_unlock(); 83903ba3782SJens Axboe } 84003ba3782SJens Axboe 84103ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 84203ba3782SJens Axboe { 84303ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 84403ba3782SJens Axboe struct dentry *dentry; 84503ba3782SJens Axboe const char *name = "?"; 84603ba3782SJens Axboe 84703ba3782SJens Axboe dentry = d_find_alias(inode); 84803ba3782SJens Axboe if (dentry) { 84903ba3782SJens Axboe spin_lock(&dentry->d_lock); 85003ba3782SJens Axboe name = (const char *) dentry->d_name.name; 85103ba3782SJens Axboe } 85203ba3782SJens Axboe printk(KERN_DEBUG 85303ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 85403ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 85503ba3782SJens Axboe name, inode->i_sb->s_id); 85603ba3782SJens Axboe if (dentry) { 85703ba3782SJens Axboe spin_unlock(&dentry->d_lock); 85803ba3782SJens Axboe dput(dentry); 85903ba3782SJens Axboe } 86003ba3782SJens Axboe } 86103ba3782SJens Axboe } 86203ba3782SJens Axboe 86303ba3782SJens Axboe /** 86403ba3782SJens Axboe * __mark_inode_dirty - internal function 86503ba3782SJens Axboe * @inode: inode to mark 86603ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 86703ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 86803ba3782SJens Axboe * mark_inode_dirty_sync. 86903ba3782SJens Axboe * 87003ba3782SJens Axboe * Put the inode on the super block's dirty list. 87103ba3782SJens Axboe * 87203ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 87303ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 87403ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 87503ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 87603ba3782SJens Axboe * 87703ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 87803ba3782SJens Axboe * them dirty. 87903ba3782SJens Axboe * 88003ba3782SJens Axboe * This function *must* be atomic for the I_DIRTY_PAGES case - 88103ba3782SJens Axboe * set_page_dirty() is called under spinlock in several places. 88203ba3782SJens Axboe * 88303ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 88403ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 88503ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 88603ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 88703ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 88803ba3782SJens Axboe * blockdev inode. 88903ba3782SJens Axboe */ 89003ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 89103ba3782SJens Axboe { 89203ba3782SJens Axboe struct super_block *sb = inode->i_sb; 89303ba3782SJens Axboe 89403ba3782SJens Axboe /* 89503ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 89603ba3782SJens Axboe * dirty the inode itself 89703ba3782SJens Axboe */ 89803ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 89903ba3782SJens Axboe if (sb->s_op->dirty_inode) 90003ba3782SJens Axboe sb->s_op->dirty_inode(inode); 90103ba3782SJens Axboe } 90203ba3782SJens Axboe 90303ba3782SJens Axboe /* 90403ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 90503ba3782SJens Axboe * -- mikulas 90603ba3782SJens Axboe */ 90703ba3782SJens Axboe smp_mb(); 90803ba3782SJens Axboe 90903ba3782SJens Axboe /* avoid the locking if we can */ 91003ba3782SJens Axboe if ((inode->i_state & flags) == flags) 91103ba3782SJens Axboe return; 91203ba3782SJens Axboe 91303ba3782SJens Axboe if (unlikely(block_dump)) 91403ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 91503ba3782SJens Axboe 91603ba3782SJens Axboe spin_lock(&inode_lock); 91703ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 91803ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 91903ba3782SJens Axboe 92003ba3782SJens Axboe inode->i_state |= flags; 92103ba3782SJens Axboe 92203ba3782SJens Axboe /* 92303ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 92403ba3782SJens Axboe * The unlocker will place the inode on the appropriate 92503ba3782SJens Axboe * superblock list, based upon its state. 92603ba3782SJens Axboe */ 92703ba3782SJens Axboe if (inode->i_state & I_SYNC) 92803ba3782SJens Axboe goto out; 92903ba3782SJens Axboe 93003ba3782SJens Axboe /* 93103ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 93203ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 93303ba3782SJens Axboe */ 93403ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 93503ba3782SJens Axboe if (hlist_unhashed(&inode->i_hash)) 93603ba3782SJens Axboe goto out; 93703ba3782SJens Axboe } 938a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 93903ba3782SJens Axboe goto out; 94003ba3782SJens Axboe 94103ba3782SJens Axboe /* 94203ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 94303ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 94403ba3782SJens Axboe */ 94503ba3782SJens Axboe if (!was_dirty) { 94603ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 947500b067cSJens Axboe struct backing_dev_info *bdi = wb->bdi; 948500b067cSJens Axboe 949500b067cSJens Axboe if (bdi_cap_writeback_dirty(bdi) && 950500b067cSJens Axboe !test_bit(BDI_registered, &bdi->state)) { 951500b067cSJens Axboe WARN_ON(1); 952500b067cSJens Axboe printk(KERN_ERR "bdi-%s not registered\n", 953500b067cSJens Axboe bdi->name); 954500b067cSJens Axboe } 95503ba3782SJens Axboe 95603ba3782SJens Axboe inode->dirtied_when = jiffies; 95703ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 95803ba3782SJens Axboe } 95903ba3782SJens Axboe } 96003ba3782SJens Axboe out: 96103ba3782SJens Axboe spin_unlock(&inode_lock); 96203ba3782SJens Axboe } 96303ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 96403ba3782SJens Axboe 96566f3b8e2SJens Axboe /* 96666f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 96766f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 96866f3b8e2SJens Axboe * 96966f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 97066f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 97166f3b8e2SJens Axboe * 97266f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 97366f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 97466f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 97566f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 97666f3b8e2SJens Axboe * 97766f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 97866f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 97966f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 98066f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 98166f3b8e2SJens Axboe */ 982b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 98366f3b8e2SJens Axboe { 98438f21977SNick Piggin struct inode *inode, *old_inode = NULL; 98538f21977SNick Piggin 98603ba3782SJens Axboe /* 98703ba3782SJens Axboe * We need to be protected against the filesystem going from 98803ba3782SJens Axboe * r/o to r/w or vice versa. 98903ba3782SJens Axboe */ 990b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 99103ba3782SJens Axboe 99266f3b8e2SJens Axboe spin_lock(&inode_lock); 99366f3b8e2SJens Axboe 99438f21977SNick Piggin /* 99538f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 99638f21977SNick Piggin * because there may have been pages dirtied before our sync 99738f21977SNick Piggin * call, but which had writeout started before we write it out. 99838f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 99938f21977SNick Piggin * we still have to wait for that writeout. 100038f21977SNick Piggin */ 1001b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 100238f21977SNick Piggin struct address_space *mapping; 100338f21977SNick Piggin 1004a4ffdde6SAl Viro if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) 100538f21977SNick Piggin continue; 100638f21977SNick Piggin mapping = inode->i_mapping; 100738f21977SNick Piggin if (mapping->nrpages == 0) 100838f21977SNick Piggin continue; 100938f21977SNick Piggin __iget(inode); 1010ae8547b0SHans Reiser spin_unlock(&inode_lock); 101138f21977SNick Piggin /* 101238f21977SNick Piggin * We hold a reference to 'inode' so it couldn't have 101338f21977SNick Piggin * been removed from s_inodes list while we dropped the 101438f21977SNick Piggin * inode_lock. We cannot iput the inode now as we can 101538f21977SNick Piggin * be holding the last reference and we cannot iput it 101638f21977SNick Piggin * under inode_lock. So we keep the reference and iput 101738f21977SNick Piggin * it later. 101838f21977SNick Piggin */ 101938f21977SNick Piggin iput(old_inode); 102038f21977SNick Piggin old_inode = inode; 102138f21977SNick Piggin 102238f21977SNick Piggin filemap_fdatawait(mapping); 102338f21977SNick Piggin 102438f21977SNick Piggin cond_resched(); 102538f21977SNick Piggin 102638f21977SNick Piggin spin_lock(&inode_lock); 102738f21977SNick Piggin } 102838f21977SNick Piggin spin_unlock(&inode_lock); 102938f21977SNick Piggin iput(old_inode); 103066f3b8e2SJens Axboe } 10311da177e4SLinus Torvalds 1032d8a8559cSJens Axboe /** 1033d8a8559cSJens Axboe * writeback_inodes_sb - writeback dirty inodes from given super_block 1034d8a8559cSJens Axboe * @sb: the superblock 10351da177e4SLinus Torvalds * 1036d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1037d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 1038d8a8559cSJens Axboe * for IO completion of submitted IO. The number of pages submitted is 1039d8a8559cSJens Axboe * returned. 10401da177e4SLinus Torvalds */ 1041b6e51316SJens Axboe void writeback_inodes_sb(struct super_block *sb) 10421da177e4SLinus Torvalds { 10430e3c9a22SJens Axboe unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 10440e3c9a22SJens Axboe unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 104583ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 104683ba7b07SChristoph Hellwig struct wb_writeback_work work = { 10473c4d7165SChristoph Hellwig .sb = sb, 10483c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 104983ba7b07SChristoph Hellwig .done = &done, 10503c4d7165SChristoph Hellwig }; 10510e3c9a22SJens Axboe 1052cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1053cf37e972SChristoph Hellwig 105483ba7b07SChristoph Hellwig work.nr_pages = nr_dirty + nr_unstable + 10550e3c9a22SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 10560e3c9a22SJens Axboe 105783ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 105883ba7b07SChristoph Hellwig wait_for_completion(&done); 10591da177e4SLinus Torvalds } 1060d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1061d8a8559cSJens Axboe 1062d8a8559cSJens Axboe /** 106317bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 106417bd55d0SEric Sandeen * @sb: the superblock 106517bd55d0SEric Sandeen * 106617bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 106717bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 106817bd55d0SEric Sandeen */ 106917bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 107017bd55d0SEric Sandeen { 107117bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1072cf37e972SChristoph Hellwig down_read(&sb->s_umount); 107317bd55d0SEric Sandeen writeback_inodes_sb(sb); 1074cf37e972SChristoph Hellwig up_read(&sb->s_umount); 107517bd55d0SEric Sandeen return 1; 107617bd55d0SEric Sandeen } else 107717bd55d0SEric Sandeen return 0; 107817bd55d0SEric Sandeen } 107917bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 108017bd55d0SEric Sandeen 108117bd55d0SEric Sandeen /** 1082d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1083d8a8559cSJens Axboe * @sb: the superblock 1084d8a8559cSJens Axboe * 1085d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1086d8a8559cSJens Axboe * super_block. The number of pages synced is returned. 1087d8a8559cSJens Axboe */ 1088b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1089d8a8559cSJens Axboe { 109083ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 109183ba7b07SChristoph Hellwig struct wb_writeback_work work = { 10923c4d7165SChristoph Hellwig .sb = sb, 10933c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 10943c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 10953c4d7165SChristoph Hellwig .range_cyclic = 0, 109683ba7b07SChristoph Hellwig .done = &done, 10973c4d7165SChristoph Hellwig }; 10983c4d7165SChristoph Hellwig 1099cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1100cf37e972SChristoph Hellwig 110183ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 110283ba7b07SChristoph Hellwig wait_for_completion(&done); 110383ba7b07SChristoph Hellwig 1104b6e51316SJens Axboe wait_sb_inodes(sb); 1105d8a8559cSJens Axboe } 1106d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 11071da177e4SLinus Torvalds 11081da177e4SLinus Torvalds /** 11091da177e4SLinus Torvalds * write_inode_now - write an inode to disk 11101da177e4SLinus Torvalds * @inode: inode to write to disk 11111da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 11121da177e4SLinus Torvalds * 11137f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 11147f04c26dSAndrea Arcangeli * primarily needed by knfsd. 11157f04c26dSAndrea Arcangeli * 11167f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 11171da177e4SLinus Torvalds */ 11181da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 11191da177e4SLinus Torvalds { 11201da177e4SLinus Torvalds int ret; 11211da177e4SLinus Torvalds struct writeback_control wbc = { 11221da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 112318914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1124111ebb6eSOGAWA Hirofumi .range_start = 0, 1125111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 11261da177e4SLinus Torvalds }; 11271da177e4SLinus Torvalds 11281da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 112949364ce2SAndrew Morton wbc.nr_to_write = 0; 11301da177e4SLinus Torvalds 11311da177e4SLinus Torvalds might_sleep(); 11321da177e4SLinus Torvalds spin_lock(&inode_lock); 113301c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 11341da177e4SLinus Torvalds spin_unlock(&inode_lock); 11351da177e4SLinus Torvalds if (sync) 11361c0eeaf5SJoern Engel inode_sync_wait(inode); 11371da177e4SLinus Torvalds return ret; 11381da177e4SLinus Torvalds } 11391da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 11401da177e4SLinus Torvalds 11411da177e4SLinus Torvalds /** 11421da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 11431da177e4SLinus Torvalds * @inode: the inode to sync 11441da177e4SLinus Torvalds * @wbc: controls the writeback mode 11451da177e4SLinus Torvalds * 11461da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 11471da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 11481da177e4SLinus Torvalds * update inode->i_state. 11491da177e4SLinus Torvalds * 11501da177e4SLinus Torvalds * The caller must have a ref on the inode. 11511da177e4SLinus Torvalds */ 11521da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 11531da177e4SLinus Torvalds { 11541da177e4SLinus Torvalds int ret; 11551da177e4SLinus Torvalds 11561da177e4SLinus Torvalds spin_lock(&inode_lock); 115701c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 11581da177e4SLinus Torvalds spin_unlock(&inode_lock); 11591da177e4SLinus Torvalds return ret; 11601da177e4SLinus Torvalds } 11611da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1162