11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 29455b2864SDave Chinner #include <linux/tracepoint.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 34c4a77a6cSJens Axboe */ 3583ba7b07SChristoph Hellwig struct wb_writeback_work { 36c4a77a6cSJens Axboe long nr_pages; 37c4a77a6cSJens Axboe struct super_block *sb; 38c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 3952957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4052957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4152957fe1SH Hartley Sweeten unsigned int for_background:1; 42c4a77a6cSJens Axboe 438010c3b6SJens Axboe struct list_head list; /* pending work list */ 4483ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 4503ba3782SJens Axboe }; 4603ba3782SJens Axboe 47455b2864SDave Chinner /* 48455b2864SDave Chinner * Include the creation of the trace points after defining the 49455b2864SDave Chinner * wb_writeback_work structure so that the definition remains local to this 50455b2864SDave Chinner * file. 51455b2864SDave Chinner */ 52455b2864SDave Chinner #define CREATE_TRACE_POINTS 53455b2864SDave Chinner #include <trace/events/writeback.h> 54455b2864SDave Chinner 55455b2864SDave Chinner #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 56455b2864SDave Chinner 57455b2864SDave Chinner /* 58455b2864SDave Chinner * We don't actually have pdflush, but this one is exported though /proc... 59455b2864SDave Chinner */ 60455b2864SDave Chinner int nr_pdflush_threads; 61455b2864SDave Chinner 62f11b00f3SAdrian Bunk /** 63f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 64f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 65f11b00f3SAdrian Bunk * 6603ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 6703ba3782SJens Axboe * backing device. 68f11b00f3SAdrian Bunk */ 69f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 70f11b00f3SAdrian Bunk { 7103ba3782SJens Axboe return !list_empty(&bdi->work_list); 72f11b00f3SAdrian Bunk } 73f11b00f3SAdrian Bunk 7483ba7b07SChristoph Hellwig static void bdi_queue_work(struct backing_dev_info *bdi, 7583ba7b07SChristoph Hellwig struct wb_writeback_work *work) 764195f73dSNick Piggin { 77455b2864SDave Chinner trace_writeback_queue(bdi, work); 78455b2864SDave Chinner 796467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 8083ba7b07SChristoph Hellwig list_add_tail(&work->list, &bdi->work_list); 81fff5b85aSArtem Bityutskiy if (bdi->wb.task) { 82fff5b85aSArtem Bityutskiy wake_up_process(bdi->wb.task); 83fff5b85aSArtem Bityutskiy } else { 841da177e4SLinus Torvalds /* 85fff5b85aSArtem Bityutskiy * The bdi thread isn't there, wake up the forker thread which 86fff5b85aSArtem Bityutskiy * will create and run it. 871da177e4SLinus Torvalds */ 88455b2864SDave Chinner trace_writeback_nothread(bdi, work); 8903ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 901da177e4SLinus Torvalds } 916467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 9203ba3782SJens Axboe } 931da177e4SLinus Torvalds 9483ba7b07SChristoph Hellwig static void 9583ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 9683ba7b07SChristoph Hellwig bool range_cyclic, bool for_background) 971da177e4SLinus Torvalds { 9883ba7b07SChristoph Hellwig struct wb_writeback_work *work; 9903ba3782SJens Axboe 100bcddc3f0SJens Axboe /* 101bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 102bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 103bcddc3f0SJens Axboe */ 10483ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 10583ba7b07SChristoph Hellwig if (!work) { 106455b2864SDave Chinner if (bdi->wb.task) { 107455b2864SDave Chinner trace_writeback_nowork(bdi); 10883ba7b07SChristoph Hellwig wake_up_process(bdi->wb.task); 109455b2864SDave Chinner } 11083ba7b07SChristoph Hellwig return; 11183ba7b07SChristoph Hellwig } 11283ba7b07SChristoph Hellwig 11383ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 11483ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 11583ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 11683ba7b07SChristoph Hellwig work->for_background = for_background; 11783ba7b07SChristoph Hellwig 118f11fcae8SJens Axboe bdi_queue_work(bdi, work); 11903ba3782SJens Axboe } 120b6e51316SJens Axboe 121b6e51316SJens Axboe /** 122b6e51316SJens Axboe * bdi_start_writeback - start writeback 123b6e51316SJens Axboe * @bdi: the backing device to write from 124b6e51316SJens Axboe * @nr_pages: the number of pages to write 125b6e51316SJens Axboe * 126b6e51316SJens Axboe * Description: 127b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 128b6e51316SJens Axboe * started when this function returns, we make no guarentees on 1290e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 130b6e51316SJens Axboe * 131b6e51316SJens Axboe */ 132c5444198SChristoph Hellwig void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 133b6e51316SJens Axboe { 13483ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, nr_pages, true, false); 135d3ddec76SWu Fengguang } 136d3ddec76SWu Fengguang 137c5444198SChristoph Hellwig /** 138c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 139c5444198SChristoph Hellwig * @bdi: the backing device to write from 140c5444198SChristoph Hellwig * 141c5444198SChristoph Hellwig * Description: 142c5444198SChristoph Hellwig * This does WB_SYNC_NONE background writeback. The IO is only 143c5444198SChristoph Hellwig * started when this function returns, we make no guarentees on 144c5444198SChristoph Hellwig * completion. Caller need not hold sb s_umount semaphore. 145c5444198SChristoph Hellwig */ 146c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 147c5444198SChristoph Hellwig { 14883ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, LONG_MAX, true, true); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds /* 1526610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1536610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1546610a0bcSAndrew Morton * 1556610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 15666f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 1576610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 1586610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 1596610a0bcSAndrew Morton */ 1606610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 1616610a0bcSAndrew Morton { 16203ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1636610a0bcSAndrew Morton 16403ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 16566f3b8e2SJens Axboe struct inode *tail; 1666610a0bcSAndrew Morton 16703ba3782SJens Axboe tail = list_entry(wb->b_dirty.next, struct inode, i_list); 16866f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 1696610a0bcSAndrew Morton inode->dirtied_when = jiffies; 1706610a0bcSAndrew Morton } 17103ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 1726610a0bcSAndrew Morton } 1736610a0bcSAndrew Morton 1746610a0bcSAndrew Morton /* 17566f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 176c986d1e2SAndrew Morton */ 1770e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 178c986d1e2SAndrew Morton { 17903ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 18003ba3782SJens Axboe 18103ba3782SJens Axboe list_move(&inode->i_list, &wb->b_more_io); 182c986d1e2SAndrew Morton } 183c986d1e2SAndrew Morton 1841c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 1851c0eeaf5SJoern Engel { 1861c0eeaf5SJoern Engel /* 1871c0eeaf5SJoern Engel * Prevent speculative execution through spin_unlock(&inode_lock); 1881c0eeaf5SJoern Engel */ 1891c0eeaf5SJoern Engel smp_mb(); 1901c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 1911c0eeaf5SJoern Engel } 1921c0eeaf5SJoern Engel 193d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 194d2caa3c5SJeff Layton { 195d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 196d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 197d2caa3c5SJeff Layton /* 198d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 199d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 200d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2015b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 202d2caa3c5SJeff Layton */ 203d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 204d2caa3c5SJeff Layton #endif 205d2caa3c5SJeff Layton return ret; 206d2caa3c5SJeff Layton } 207d2caa3c5SJeff Layton 208c986d1e2SAndrew Morton /* 2092c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 2102c136579SFengguang Wu */ 2112c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 2122c136579SFengguang Wu struct list_head *dispatch_queue, 2132c136579SFengguang Wu unsigned long *older_than_this) 2142c136579SFengguang Wu { 2155c03449dSShaohua Li LIST_HEAD(tmp); 2165c03449dSShaohua Li struct list_head *pos, *node; 217cf137307SJens Axboe struct super_block *sb = NULL; 2185c03449dSShaohua Li struct inode *inode; 219cf137307SJens Axboe int do_sb_sort = 0; 2205c03449dSShaohua Li 2212c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2225c03449dSShaohua Li inode = list_entry(delaying_queue->prev, struct inode, i_list); 2232c136579SFengguang Wu if (older_than_this && 224d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 2252c136579SFengguang Wu break; 226cf137307SJens Axboe if (sb && sb != inode->i_sb) 227cf137307SJens Axboe do_sb_sort = 1; 228cf137307SJens Axboe sb = inode->i_sb; 2295c03449dSShaohua Li list_move(&inode->i_list, &tmp); 2305c03449dSShaohua Li } 2315c03449dSShaohua Li 232cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 233cf137307SJens Axboe if (!do_sb_sort) { 234cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 235cf137307SJens Axboe return; 236cf137307SJens Axboe } 237cf137307SJens Axboe 2385c03449dSShaohua Li /* Move inodes from one superblock together */ 2395c03449dSShaohua Li while (!list_empty(&tmp)) { 2405c03449dSShaohua Li inode = list_entry(tmp.prev, struct inode, i_list); 2415c03449dSShaohua Li sb = inode->i_sb; 2425c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2435c03449dSShaohua Li inode = list_entry(pos, struct inode, i_list); 2445c03449dSShaohua Li if (inode->i_sb == sb) 2452c136579SFengguang Wu list_move(&inode->i_list, dispatch_queue); 2462c136579SFengguang Wu } 2472c136579SFengguang Wu } 2485c03449dSShaohua Li } 2492c136579SFengguang Wu 2502c136579SFengguang Wu /* 2512c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 2522c136579SFengguang Wu */ 25303ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 2542c136579SFengguang Wu { 25503ba3782SJens Axboe list_splice_init(&wb->b_more_io, wb->b_io.prev); 25603ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 25766f3b8e2SJens Axboe } 25866f3b8e2SJens Axboe 259a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 26066f3b8e2SJens Axboe { 26103ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 262a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 26303ba3782SJens Axboe return 0; 26466f3b8e2SJens Axboe } 26508d8e974SFengguang Wu 2662c136579SFengguang Wu /* 26701c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 26801c03194SChristoph Hellwig */ 26901c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 27001c03194SChristoph Hellwig { 27101c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 27201c03194SChristoph Hellwig wait_queue_head_t *wqh; 27301c03194SChristoph Hellwig 27401c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 27558a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 27601c03194SChristoph Hellwig spin_unlock(&inode_lock); 27701c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 27801c03194SChristoph Hellwig spin_lock(&inode_lock); 27958a9d3d8SRichard Kennedy } 28001c03194SChristoph Hellwig } 28101c03194SChristoph Hellwig 28201c03194SChristoph Hellwig /* 28301c03194SChristoph Hellwig * Write out an inode's dirty pages. Called under inode_lock. Either the 28401c03194SChristoph Hellwig * caller has ref on the inode (either via __iget or via syscall against an fd) 28501c03194SChristoph Hellwig * or the inode has I_WILL_FREE set (via generic_forget_inode) 28601c03194SChristoph Hellwig * 2871da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 2881da177e4SLinus Torvalds * 2891da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 2901da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 2911da177e4SLinus Torvalds * livelocks, etc. 2921da177e4SLinus Torvalds * 2931da177e4SLinus Torvalds * Called under inode_lock. 2941da177e4SLinus Torvalds */ 2951da177e4SLinus Torvalds static int 29601c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 2971da177e4SLinus Torvalds { 2981da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 29901c03194SChristoph Hellwig unsigned dirty; 3001da177e4SLinus Torvalds int ret; 3011da177e4SLinus Torvalds 30201c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 30301c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 30401c03194SChristoph Hellwig else 30501c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 30601c03194SChristoph Hellwig 30701c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 30801c03194SChristoph Hellwig /* 30901c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 31066f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 31101c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 31201c03194SChristoph Hellwig * 31301c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 31466f3b8e2SJens Axboe * completed a full scan of b_io. 31501c03194SChristoph Hellwig */ 316a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 31701c03194SChristoph Hellwig requeue_io(inode); 31801c03194SChristoph Hellwig return 0; 31901c03194SChristoph Hellwig } 32001c03194SChristoph Hellwig 32101c03194SChristoph Hellwig /* 32201c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 32301c03194SChristoph Hellwig */ 32401c03194SChristoph Hellwig inode_wait_for_writeback(inode); 32501c03194SChristoph Hellwig } 32601c03194SChristoph Hellwig 3271c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 3281da177e4SLinus Torvalds 3295547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 3301c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 3315547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 3321da177e4SLinus Torvalds spin_unlock(&inode_lock); 3331da177e4SLinus Torvalds 3341da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 3351da177e4SLinus Torvalds 33626821ed4SChristoph Hellwig /* 33726821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 33826821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 33926821ed4SChristoph Hellwig * I/O completion. 34026821ed4SChristoph Hellwig */ 341a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 34226821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 3431da177e4SLinus Torvalds if (ret == 0) 3441da177e4SLinus Torvalds ret = err; 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds 3475547e8aaSDmitry Monakhov /* 3485547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 3495547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 3505547e8aaSDmitry Monakhov * write_inode() 3515547e8aaSDmitry Monakhov */ 3525547e8aaSDmitry Monakhov spin_lock(&inode_lock); 3535547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 3545547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 3555547e8aaSDmitry Monakhov spin_unlock(&inode_lock); 35626821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 35726821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 358a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 3591da177e4SLinus Torvalds if (ret == 0) 3601da177e4SLinus Torvalds ret = err; 3611da177e4SLinus Torvalds } 3621da177e4SLinus Torvalds 3631da177e4SLinus Torvalds spin_lock(&inode_lock); 3641c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 365a4ffdde6SAl Viro if (!(inode->i_state & I_FREEING)) { 36623539afcSWu Fengguang if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 3671da177e4SLinus Torvalds /* 3681da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 3691da177e4SLinus Torvalds * sometimes bales out without doing anything. Redirty 37066f3b8e2SJens Axboe * the inode; Move it from b_io onto b_more_io/b_dirty. 3711b43ef91SAndrew Morton */ 3721b43ef91SAndrew Morton /* 3731b43ef91SAndrew Morton * akpm: if the caller was the kupdate function we put 37466f3b8e2SJens Axboe * this inode at the head of b_dirty so it gets first 3751b43ef91SAndrew Morton * consideration. Otherwise, move it to the tail, for 3761b43ef91SAndrew Morton * the reasons described there. I'm not really sure 3771b43ef91SAndrew Morton * how much sense this makes. Presumably I had a good 3781b43ef91SAndrew Morton * reasons for doing it this way, and I'd rather not 3791b43ef91SAndrew Morton * muck with it at present. 3801da177e4SLinus Torvalds */ 3811da177e4SLinus Torvalds if (wbc->for_kupdate) { 3821da177e4SLinus Torvalds /* 3832c136579SFengguang Wu * For the kupdate function we move the inode 38466f3b8e2SJens Axboe * to b_more_io so it will get more writeout as 3852c136579SFengguang Wu * soon as the queue becomes uncongested. 3861da177e4SLinus Torvalds */ 3871da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 3888bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 3898bc3be27SFengguang Wu /* 3908bc3be27SFengguang Wu * slice used up: queue for next turn 3918bc3be27SFengguang Wu */ 3920e0f4fc2SKen Chen requeue_io(inode); 3931da177e4SLinus Torvalds } else { 3941da177e4SLinus Torvalds /* 3958bc3be27SFengguang Wu * somehow blocked: retry later 3968bc3be27SFengguang Wu */ 3978bc3be27SFengguang Wu redirty_tail(inode); 3988bc3be27SFengguang Wu } 3998bc3be27SFengguang Wu } else { 4008bc3be27SFengguang Wu /* 4011da177e4SLinus Torvalds * Otherwise fully redirty the inode so that 4021da177e4SLinus Torvalds * other inodes on this superblock will get some 4031da177e4SLinus Torvalds * writeout. Otherwise heavy writing to one 4041da177e4SLinus Torvalds * file would indefinitely suspend writeout of 4051da177e4SLinus Torvalds * all the other files. 4061da177e4SLinus Torvalds */ 4071da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4081b43ef91SAndrew Morton redirty_tail(inode); 4091da177e4SLinus Torvalds } 41023539afcSWu Fengguang } else if (inode->i_state & I_DIRTY) { 41123539afcSWu Fengguang /* 41223539afcSWu Fengguang * Filesystems can dirty the inode during writeback 41323539afcSWu Fengguang * operations, such as delayed allocation during 41423539afcSWu Fengguang * submission or metadata updates after data IO 41523539afcSWu Fengguang * completion. 41623539afcSWu Fengguang */ 41723539afcSWu Fengguang redirty_tail(inode); 4181da177e4SLinus Torvalds } else if (atomic_read(&inode->i_count)) { 4191da177e4SLinus Torvalds /* 4201da177e4SLinus Torvalds * The inode is clean, inuse 4211da177e4SLinus Torvalds */ 4221da177e4SLinus Torvalds list_move(&inode->i_list, &inode_in_use); 4231da177e4SLinus Torvalds } else { 4241da177e4SLinus Torvalds /* 4251da177e4SLinus Torvalds * The inode is clean, unused 4261da177e4SLinus Torvalds */ 4271da177e4SLinus Torvalds list_move(&inode->i_list, &inode_unused); 4281da177e4SLinus Torvalds } 4291da177e4SLinus Torvalds } 4301c0eeaf5SJoern Engel inode_sync_complete(inode); 4311da177e4SLinus Torvalds return ret; 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds 43403ba3782SJens Axboe /* 435d19de7edSChristoph Hellwig * For background writeback the caller does not have the sb pinned 43603ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 43703ba3782SJens Axboe * go away while we are writing inodes from it. 43803ba3782SJens Axboe */ 439d19de7edSChristoph Hellwig static bool pin_sb_for_writeback(struct super_block *sb) 4401da177e4SLinus Torvalds { 44103ba3782SJens Axboe spin_lock(&sb_lock); 44229cb4859SChristoph Hellwig if (list_empty(&sb->s_instances)) { 44303ba3782SJens Axboe spin_unlock(&sb_lock); 44429cb4859SChristoph Hellwig return false; 44503ba3782SJens Axboe } 44629cb4859SChristoph Hellwig 44729cb4859SChristoph Hellwig sb->s_count++; 44829cb4859SChristoph Hellwig spin_unlock(&sb_lock); 44929cb4859SChristoph Hellwig 45029cb4859SChristoph Hellwig if (down_read_trylock(&sb->s_umount)) { 45129cb4859SChristoph Hellwig if (sb->s_root) 45229cb4859SChristoph Hellwig return true; 45303ba3782SJens Axboe up_read(&sb->s_umount); 45403ba3782SJens Axboe } 45529cb4859SChristoph Hellwig 45629cb4859SChristoph Hellwig put_super(sb); 457d19de7edSChristoph Hellwig return false; 45803ba3782SJens Axboe } 45903ba3782SJens Axboe 460f11c9c5cSEdward Shishkin /* 461f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 462edadfb10SChristoph Hellwig * 463edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 464f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 465f11c9c5cSEdward Shishkin * in reverse order. 466edadfb10SChristoph Hellwig * 467f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 468f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 469f11c9c5cSEdward Shishkin */ 470edadfb10SChristoph Hellwig static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 471edadfb10SChristoph Hellwig struct writeback_control *wbc, bool only_this_sb) 47203ba3782SJens Axboe { 47303ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 474f11c9c5cSEdward Shishkin long pages_skipped; 47503ba3782SJens Axboe struct inode *inode = list_entry(wb->b_io.prev, 4761da177e4SLinus Torvalds struct inode, i_list); 477edadfb10SChristoph Hellwig 478edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 479edadfb10SChristoph Hellwig if (only_this_sb) { 480edadfb10SChristoph Hellwig /* 481edadfb10SChristoph Hellwig * We only want to write back data for this 482edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 483edadfb10SChristoph Hellwig * to it back onto the dirty list. 484edadfb10SChristoph Hellwig */ 48566f3b8e2SJens Axboe redirty_tail(inode); 48666f3b8e2SJens Axboe continue; 48766f3b8e2SJens Axboe } 488edadfb10SChristoph Hellwig 489edadfb10SChristoph Hellwig /* 490edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 491edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 492edadfb10SChristoph Hellwig * pin the next superblock. 493edadfb10SChristoph Hellwig */ 494f11c9c5cSEdward Shishkin return 0; 495edadfb10SChristoph Hellwig } 496edadfb10SChristoph Hellwig 49784a89245SWu Fengguang if (inode->i_state & (I_NEW | I_WILL_FREE)) { 4987ef0d737SNick Piggin requeue_io(inode); 4997ef0d737SNick Piggin continue; 5007ef0d737SNick Piggin } 501d2caa3c5SJeff Layton /* 502d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 503d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 504d2caa3c5SJeff Layton */ 505f11c9c5cSEdward Shishkin if (inode_dirtied_after(inode, wbc->wb_start)) 506f11c9c5cSEdward Shishkin return 1; 5071da177e4SLinus Torvalds 508a4ffdde6SAl Viro BUG_ON(inode->i_state & I_FREEING); 5091da177e4SLinus Torvalds __iget(inode); 5101da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 51101c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 5121da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 5131da177e4SLinus Torvalds /* 5141da177e4SLinus Torvalds * writeback is not making progress due to locked 5151da177e4SLinus Torvalds * buffers. Skip this inode for now. 5161da177e4SLinus Torvalds */ 517f57b9b7bSAndrew Morton redirty_tail(inode); 5181da177e4SLinus Torvalds } 5191da177e4SLinus Torvalds spin_unlock(&inode_lock); 5201da177e4SLinus Torvalds iput(inode); 5214ffc8444SOGAWA Hirofumi cond_resched(); 5221da177e4SLinus Torvalds spin_lock(&inode_lock); 5238bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 5248bc3be27SFengguang Wu wbc->more_io = 1; 525f11c9c5cSEdward Shishkin return 1; 5261da177e4SLinus Torvalds } 52703ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 5288bc3be27SFengguang Wu wbc->more_io = 1; 5298bc3be27SFengguang Wu } 530f11c9c5cSEdward Shishkin /* b_io is empty */ 531f11c9c5cSEdward Shishkin return 1; 532f11c9c5cSEdward Shishkin } 53338f21977SNick Piggin 5349c3a8ee8SChristoph Hellwig void writeback_inodes_wb(struct bdi_writeback *wb, 535f11c9c5cSEdward Shishkin struct writeback_control *wbc) 536f11c9c5cSEdward Shishkin { 537f11c9c5cSEdward Shishkin int ret = 0; 5389ecc2738SJens Axboe 5397624ee72SJan Kara if (!wbc->wb_start) 540f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 541f11c9c5cSEdward Shishkin spin_lock(&inode_lock); 542f11c9c5cSEdward Shishkin if (!wbc->for_kupdate || list_empty(&wb->b_io)) 543f11c9c5cSEdward Shishkin queue_io(wb, wbc->older_than_this); 544f11c9c5cSEdward Shishkin 545f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 546f11c9c5cSEdward Shishkin struct inode *inode = list_entry(wb->b_io.prev, 547f11c9c5cSEdward Shishkin struct inode, i_list); 548f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 549f11c9c5cSEdward Shishkin 550334132aeSChristoph Hellwig if (!pin_sb_for_writeback(sb)) { 551334132aeSChristoph Hellwig requeue_io(inode); 552d19de7edSChristoph Hellwig continue; 553334132aeSChristoph Hellwig } 554edadfb10SChristoph Hellwig ret = writeback_sb_inodes(sb, wb, wbc, false); 555d19de7edSChristoph Hellwig drop_super(sb); 556f11c9c5cSEdward Shishkin 557f11c9c5cSEdward Shishkin if (ret) 558f11c9c5cSEdward Shishkin break; 559f11c9c5cSEdward Shishkin } 56066f3b8e2SJens Axboe spin_unlock(&inode_lock); 56166f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 56266f3b8e2SJens Axboe } 56366f3b8e2SJens Axboe 564edadfb10SChristoph Hellwig static void __writeback_inodes_sb(struct super_block *sb, 565edadfb10SChristoph Hellwig struct bdi_writeback *wb, struct writeback_control *wbc) 566edadfb10SChristoph Hellwig { 567edadfb10SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 568edadfb10SChristoph Hellwig 569edadfb10SChristoph Hellwig spin_lock(&inode_lock); 570edadfb10SChristoph Hellwig if (!wbc->for_kupdate || list_empty(&wb->b_io)) 571edadfb10SChristoph Hellwig queue_io(wb, wbc->older_than_this); 572edadfb10SChristoph Hellwig writeback_sb_inodes(sb, wb, wbc, true); 573edadfb10SChristoph Hellwig spin_unlock(&inode_lock); 574edadfb10SChristoph Hellwig } 575edadfb10SChristoph Hellwig 57603ba3782SJens Axboe /* 57703ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 57803ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 57903ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 58003ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 58103ba3782SJens Axboe * the dirty each time it has written this many pages. 58203ba3782SJens Axboe */ 58303ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 58403ba3782SJens Axboe 58503ba3782SJens Axboe static inline bool over_bground_thresh(void) 58603ba3782SJens Axboe { 58703ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 58803ba3782SJens Axboe 58916c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 59003ba3782SJens Axboe 59103ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 59203ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) >= background_thresh); 59303ba3782SJens Axboe } 59403ba3782SJens Axboe 59503ba3782SJens Axboe /* 59603ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 59703ba3782SJens Axboe * 59803ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 59903ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 60003ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 60103ba3782SJens Axboe * older than a specific point in time. 60203ba3782SJens Axboe * 60303ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 60403ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 60503ba3782SJens Axboe * one-second gap. 60603ba3782SJens Axboe * 60703ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 60803ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 60903ba3782SJens Axboe */ 610c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 61183ba7b07SChristoph Hellwig struct wb_writeback_work *work) 61203ba3782SJens Axboe { 61303ba3782SJens Axboe struct writeback_control wbc = { 61483ba7b07SChristoph Hellwig .sync_mode = work->sync_mode, 61503ba3782SJens Axboe .older_than_this = NULL, 61683ba7b07SChristoph Hellwig .for_kupdate = work->for_kupdate, 61783ba7b07SChristoph Hellwig .for_background = work->for_background, 61883ba7b07SChristoph Hellwig .range_cyclic = work->range_cyclic, 61903ba3782SJens Axboe }; 62003ba3782SJens Axboe unsigned long oldest_jif; 62103ba3782SJens Axboe long wrote = 0; 622a5989bdcSJan Kara struct inode *inode; 62303ba3782SJens Axboe 62403ba3782SJens Axboe if (wbc.for_kupdate) { 62503ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 62603ba3782SJens Axboe oldest_jif = jiffies - 62703ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 62803ba3782SJens Axboe } 629c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 630c4a77a6cSJens Axboe wbc.range_start = 0; 631c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 632c4a77a6cSJens Axboe } 63303ba3782SJens Axboe 6347624ee72SJan Kara wbc.wb_start = jiffies; /* livelock avoidance */ 63503ba3782SJens Axboe for (;;) { 63603ba3782SJens Axboe /* 637d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 63803ba3782SJens Axboe */ 63983ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 64003ba3782SJens Axboe break; 64103ba3782SJens Axboe 64203ba3782SJens Axboe /* 643d3ddec76SWu Fengguang * For background writeout, stop when we are below the 644d3ddec76SWu Fengguang * background dirty threshold 64503ba3782SJens Axboe */ 64683ba7b07SChristoph Hellwig if (work->for_background && !over_bground_thresh()) 64703ba3782SJens Axboe break; 64803ba3782SJens Axboe 64903ba3782SJens Axboe wbc.more_io = 0; 65003ba3782SJens Axboe wbc.nr_to_write = MAX_WRITEBACK_PAGES; 65103ba3782SJens Axboe wbc.pages_skipped = 0; 652028c2dd1SDave Chinner 653028c2dd1SDave Chinner trace_wbc_writeback_start(&wbc, wb->bdi); 65483ba7b07SChristoph Hellwig if (work->sb) 65583ba7b07SChristoph Hellwig __writeback_inodes_sb(work->sb, wb, &wbc); 656edadfb10SChristoph Hellwig else 65703ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 658028c2dd1SDave Chinner trace_wbc_writeback_written(&wbc, wb->bdi); 659028c2dd1SDave Chinner 66083ba7b07SChristoph Hellwig work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 66103ba3782SJens Axboe wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 66203ba3782SJens Axboe 66303ba3782SJens Axboe /* 66471fd05a8SJens Axboe * If we consumed everything, see if we have more 66503ba3782SJens Axboe */ 66671fd05a8SJens Axboe if (wbc.nr_to_write <= 0) 66771fd05a8SJens Axboe continue; 66871fd05a8SJens Axboe /* 66971fd05a8SJens Axboe * Didn't write everything and we don't have more IO, bail 67071fd05a8SJens Axboe */ 67171fd05a8SJens Axboe if (!wbc.more_io) 67271fd05a8SJens Axboe break; 67371fd05a8SJens Axboe /* 67471fd05a8SJens Axboe * Did we write something? Try for more 67571fd05a8SJens Axboe */ 676a5989bdcSJan Kara if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) 67703ba3782SJens Axboe continue; 678a5989bdcSJan Kara /* 679a5989bdcSJan Kara * Nothing written. Wait for some inode to 680a5989bdcSJan Kara * become available for writeback. Otherwise 681a5989bdcSJan Kara * we'll just busyloop. 682a5989bdcSJan Kara */ 683a5989bdcSJan Kara spin_lock(&inode_lock); 684a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 68571fd05a8SJens Axboe inode = list_entry(wb->b_more_io.prev, 686a5989bdcSJan Kara struct inode, i_list); 687028c2dd1SDave Chinner trace_wbc_writeback_wait(&wbc, wb->bdi); 688a5989bdcSJan Kara inode_wait_for_writeback(inode); 689a5989bdcSJan Kara } 690a5989bdcSJan Kara spin_unlock(&inode_lock); 69103ba3782SJens Axboe } 69203ba3782SJens Axboe 69303ba3782SJens Axboe return wrote; 69403ba3782SJens Axboe } 69503ba3782SJens Axboe 69603ba3782SJens Axboe /* 69783ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 69803ba3782SJens Axboe */ 69983ba7b07SChristoph Hellwig static struct wb_writeback_work * 70008852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 70103ba3782SJens Axboe { 70283ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 70303ba3782SJens Axboe 7046467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 70583ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 70683ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 70783ba7b07SChristoph Hellwig struct wb_writeback_work, list); 70883ba7b07SChristoph Hellwig list_del_init(&work->list); 70903ba3782SJens Axboe } 7106467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 71183ba7b07SChristoph Hellwig return work; 71203ba3782SJens Axboe } 71303ba3782SJens Axboe 71403ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 71503ba3782SJens Axboe { 71603ba3782SJens Axboe unsigned long expired; 71703ba3782SJens Axboe long nr_pages; 71803ba3782SJens Axboe 71969b62d01SJens Axboe /* 72069b62d01SJens Axboe * When set to zero, disable periodic writeback 72169b62d01SJens Axboe */ 72269b62d01SJens Axboe if (!dirty_writeback_interval) 72369b62d01SJens Axboe return 0; 72469b62d01SJens Axboe 72503ba3782SJens Axboe expired = wb->last_old_flush + 72603ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 72703ba3782SJens Axboe if (time_before(jiffies, expired)) 72803ba3782SJens Axboe return 0; 72903ba3782SJens Axboe 73003ba3782SJens Axboe wb->last_old_flush = jiffies; 73103ba3782SJens Axboe nr_pages = global_page_state(NR_FILE_DIRTY) + 73203ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) + 73303ba3782SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 73403ba3782SJens Axboe 735c4a77a6cSJens Axboe if (nr_pages) { 73683ba7b07SChristoph Hellwig struct wb_writeback_work work = { 737c4a77a6cSJens Axboe .nr_pages = nr_pages, 738c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 739c4a77a6cSJens Axboe .for_kupdate = 1, 740c4a77a6cSJens Axboe .range_cyclic = 1, 741c4a77a6cSJens Axboe }; 742c4a77a6cSJens Axboe 74383ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 744c4a77a6cSJens Axboe } 74503ba3782SJens Axboe 74603ba3782SJens Axboe return 0; 74703ba3782SJens Axboe } 74803ba3782SJens Axboe 74903ba3782SJens Axboe /* 75003ba3782SJens Axboe * Retrieve work items and do the writeback they describe 75103ba3782SJens Axboe */ 75203ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 75303ba3782SJens Axboe { 75403ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 75583ba7b07SChristoph Hellwig struct wb_writeback_work *work; 756c4a77a6cSJens Axboe long wrote = 0; 75703ba3782SJens Axboe 75808852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 75903ba3782SJens Axboe /* 76003ba3782SJens Axboe * Override sync mode, in case we must wait for completion 76183ba7b07SChristoph Hellwig * because this thread is exiting now. 76203ba3782SJens Axboe */ 76303ba3782SJens Axboe if (force_wait) 76483ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 76583ba7b07SChristoph Hellwig 766455b2864SDave Chinner trace_writeback_exec(bdi, work); 767455b2864SDave Chinner 76883ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 76903ba3782SJens Axboe 77003ba3782SJens Axboe /* 77183ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 77283ba7b07SChristoph Hellwig * work item, otherwise just free it. 77303ba3782SJens Axboe */ 77483ba7b07SChristoph Hellwig if (work->done) 77583ba7b07SChristoph Hellwig complete(work->done); 77683ba7b07SChristoph Hellwig else 77783ba7b07SChristoph Hellwig kfree(work); 77803ba3782SJens Axboe } 77903ba3782SJens Axboe 78003ba3782SJens Axboe /* 78103ba3782SJens Axboe * Check for periodic writeback, kupdated() style 78203ba3782SJens Axboe */ 78303ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 78403ba3782SJens Axboe 78503ba3782SJens Axboe return wrote; 78603ba3782SJens Axboe } 78703ba3782SJens Axboe 78803ba3782SJens Axboe /* 78903ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 79003ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 79103ba3782SJens Axboe */ 79208243900SChristoph Hellwig int bdi_writeback_thread(void *data) 79303ba3782SJens Axboe { 79408243900SChristoph Hellwig struct bdi_writeback *wb = data; 79508243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 79603ba3782SJens Axboe long pages_written; 79703ba3782SJens Axboe 79808243900SChristoph Hellwig current->flags |= PF_FLUSHER | PF_SWAPWRITE; 79908243900SChristoph Hellwig set_freezable(); 800ecd58403SArtem Bityutskiy wb->last_active = jiffies; 80103ba3782SJens Axboe 80203ba3782SJens Axboe /* 80308243900SChristoph Hellwig * Our parent may run at a different priority, just set us to normal 80403ba3782SJens Axboe */ 80508243900SChristoph Hellwig set_user_nice(current, 0); 80608243900SChristoph Hellwig 807455b2864SDave Chinner trace_writeback_thread_start(bdi); 808455b2864SDave Chinner 80903ba3782SJens Axboe while (!kthread_should_stop()) { 8106467716aSArtem Bityutskiy /* 8116467716aSArtem Bityutskiy * Remove own delayed wake-up timer, since we are already awake 8126467716aSArtem Bityutskiy * and we'll take care of the preriodic write-back. 8136467716aSArtem Bityutskiy */ 8146467716aSArtem Bityutskiy del_timer(&wb->wakeup_timer); 8156467716aSArtem Bityutskiy 81603ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 81703ba3782SJens Axboe 818455b2864SDave Chinner trace_writeback_pages_written(pages_written); 819455b2864SDave Chinner 82003ba3782SJens Axboe if (pages_written) 821ecd58403SArtem Bityutskiy wb->last_active = jiffies; 82203ba3782SJens Axboe 823297252c8SArtem Bityutskiy set_current_state(TASK_INTERRUPTIBLE); 824297252c8SArtem Bityutskiy if (!list_empty(&bdi->work_list)) { 825297252c8SArtem Bityutskiy __set_current_state(TASK_RUNNING); 826297252c8SArtem Bityutskiy continue; 82703ba3782SJens Axboe } 82803ba3782SJens Axboe 829253c34e9SArtem Bityutskiy if (wb_has_dirty_io(wb) && dirty_writeback_interval) 830fff5b85aSArtem Bityutskiy schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 831253c34e9SArtem Bityutskiy else { 832253c34e9SArtem Bityutskiy /* 833253c34e9SArtem Bityutskiy * We have nothing to do, so can go sleep without any 834253c34e9SArtem Bityutskiy * timeout and save power. When a work is queued or 835253c34e9SArtem Bityutskiy * something is made dirty - we will be woken up. 836253c34e9SArtem Bityutskiy */ 83769b62d01SJens Axboe schedule(); 838f9eadbbdSJens Axboe } 83969b62d01SJens Axboe 84003ba3782SJens Axboe try_to_freeze(); 84103ba3782SJens Axboe } 84203ba3782SJens Axboe 843fff5b85aSArtem Bityutskiy /* Flush any work that raced with us exiting */ 84408243900SChristoph Hellwig if (!list_empty(&bdi->work_list)) 84508243900SChristoph Hellwig wb_do_writeback(wb, 1); 846455b2864SDave Chinner 847455b2864SDave Chinner trace_writeback_thread_stop(bdi); 84803ba3782SJens Axboe return 0; 84903ba3782SJens Axboe } 85003ba3782SJens Axboe 85108243900SChristoph Hellwig 85203ba3782SJens Axboe /* 85303ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 85403ba3782SJens Axboe * the whole world. 85503ba3782SJens Axboe */ 85603ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 85703ba3782SJens Axboe { 858b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 859b8c2f347SChristoph Hellwig 86083ba7b07SChristoph Hellwig if (!nr_pages) { 86183ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 86203ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 863b8c2f347SChristoph Hellwig } 864b8c2f347SChristoph Hellwig 865b8c2f347SChristoph Hellwig rcu_read_lock(); 866b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 867b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 868b8c2f347SChristoph Hellwig continue; 86983ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, nr_pages, false, false); 870b8c2f347SChristoph Hellwig } 871b8c2f347SChristoph Hellwig rcu_read_unlock(); 87203ba3782SJens Axboe } 87303ba3782SJens Axboe 87403ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 87503ba3782SJens Axboe { 87603ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 87703ba3782SJens Axboe struct dentry *dentry; 87803ba3782SJens Axboe const char *name = "?"; 87903ba3782SJens Axboe 88003ba3782SJens Axboe dentry = d_find_alias(inode); 88103ba3782SJens Axboe if (dentry) { 88203ba3782SJens Axboe spin_lock(&dentry->d_lock); 88303ba3782SJens Axboe name = (const char *) dentry->d_name.name; 88403ba3782SJens Axboe } 88503ba3782SJens Axboe printk(KERN_DEBUG 88603ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 88703ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 88803ba3782SJens Axboe name, inode->i_sb->s_id); 88903ba3782SJens Axboe if (dentry) { 89003ba3782SJens Axboe spin_unlock(&dentry->d_lock); 89103ba3782SJens Axboe dput(dentry); 89203ba3782SJens Axboe } 89303ba3782SJens Axboe } 89403ba3782SJens Axboe } 89503ba3782SJens Axboe 89603ba3782SJens Axboe /** 89703ba3782SJens Axboe * __mark_inode_dirty - internal function 89803ba3782SJens Axboe * @inode: inode to mark 89903ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 90003ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 90103ba3782SJens Axboe * mark_inode_dirty_sync. 90203ba3782SJens Axboe * 90303ba3782SJens Axboe * Put the inode on the super block's dirty list. 90403ba3782SJens Axboe * 90503ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 90603ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 90703ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 90803ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 90903ba3782SJens Axboe * 91003ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 91103ba3782SJens Axboe * them dirty. 91203ba3782SJens Axboe * 91303ba3782SJens Axboe * This function *must* be atomic for the I_DIRTY_PAGES case - 91403ba3782SJens Axboe * set_page_dirty() is called under spinlock in several places. 91503ba3782SJens Axboe * 91603ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 91703ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 91803ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 91903ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 92003ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 92103ba3782SJens Axboe * blockdev inode. 92203ba3782SJens Axboe */ 92303ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 92403ba3782SJens Axboe { 92503ba3782SJens Axboe struct super_block *sb = inode->i_sb; 926253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 927253c34e9SArtem Bityutskiy bool wakeup_bdi = false; 92803ba3782SJens Axboe 92903ba3782SJens Axboe /* 93003ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 93103ba3782SJens Axboe * dirty the inode itself 93203ba3782SJens Axboe */ 93303ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 93403ba3782SJens Axboe if (sb->s_op->dirty_inode) 93503ba3782SJens Axboe sb->s_op->dirty_inode(inode); 93603ba3782SJens Axboe } 93703ba3782SJens Axboe 93803ba3782SJens Axboe /* 93903ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 94003ba3782SJens Axboe * -- mikulas 94103ba3782SJens Axboe */ 94203ba3782SJens Axboe smp_mb(); 94303ba3782SJens Axboe 94403ba3782SJens Axboe /* avoid the locking if we can */ 94503ba3782SJens Axboe if ((inode->i_state & flags) == flags) 94603ba3782SJens Axboe return; 94703ba3782SJens Axboe 94803ba3782SJens Axboe if (unlikely(block_dump)) 94903ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 95003ba3782SJens Axboe 95103ba3782SJens Axboe spin_lock(&inode_lock); 95203ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 95303ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 95403ba3782SJens Axboe 95503ba3782SJens Axboe inode->i_state |= flags; 95603ba3782SJens Axboe 95703ba3782SJens Axboe /* 95803ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 95903ba3782SJens Axboe * The unlocker will place the inode on the appropriate 96003ba3782SJens Axboe * superblock list, based upon its state. 96103ba3782SJens Axboe */ 96203ba3782SJens Axboe if (inode->i_state & I_SYNC) 96303ba3782SJens Axboe goto out; 96403ba3782SJens Axboe 96503ba3782SJens Axboe /* 96603ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 96703ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 96803ba3782SJens Axboe */ 96903ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 97003ba3782SJens Axboe if (hlist_unhashed(&inode->i_hash)) 97103ba3782SJens Axboe goto out; 97203ba3782SJens Axboe } 973a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 97403ba3782SJens Axboe goto out; 97503ba3782SJens Axboe 97603ba3782SJens Axboe /* 97703ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 97803ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 97903ba3782SJens Axboe */ 98003ba3782SJens Axboe if (!was_dirty) { 981253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 982500b067cSJens Axboe 983253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 984253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 985253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 986253c34e9SArtem Bityutskiy 987253c34e9SArtem Bityutskiy /* 988253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 989253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 990253c34e9SArtem Bityutskiy * bdi thread to make sure background 991253c34e9SArtem Bityutskiy * write-back happens later. 992253c34e9SArtem Bityutskiy */ 993253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 994253c34e9SArtem Bityutskiy wakeup_bdi = true; 995500b067cSJens Axboe } 99603ba3782SJens Axboe 99703ba3782SJens Axboe inode->dirtied_when = jiffies; 998253c34e9SArtem Bityutskiy list_move(&inode->i_list, &bdi->wb.b_dirty); 99903ba3782SJens Axboe } 100003ba3782SJens Axboe } 100103ba3782SJens Axboe out: 100203ba3782SJens Axboe spin_unlock(&inode_lock); 1003253c34e9SArtem Bityutskiy 1004253c34e9SArtem Bityutskiy if (wakeup_bdi) 10056467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 100603ba3782SJens Axboe } 100703ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 100803ba3782SJens Axboe 100966f3b8e2SJens Axboe /* 101066f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 101166f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 101266f3b8e2SJens Axboe * 101366f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 101466f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 101566f3b8e2SJens Axboe * 101666f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 101766f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 101866f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 101966f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 102066f3b8e2SJens Axboe * 102166f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 102266f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 102366f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 102466f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 102566f3b8e2SJens Axboe */ 1026b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 102766f3b8e2SJens Axboe { 102838f21977SNick Piggin struct inode *inode, *old_inode = NULL; 102938f21977SNick Piggin 103003ba3782SJens Axboe /* 103103ba3782SJens Axboe * We need to be protected against the filesystem going from 103203ba3782SJens Axboe * r/o to r/w or vice versa. 103303ba3782SJens Axboe */ 1034b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 103503ba3782SJens Axboe 103666f3b8e2SJens Axboe spin_lock(&inode_lock); 103766f3b8e2SJens Axboe 103838f21977SNick Piggin /* 103938f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 104038f21977SNick Piggin * because there may have been pages dirtied before our sync 104138f21977SNick Piggin * call, but which had writeout started before we write it out. 104238f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 104338f21977SNick Piggin * we still have to wait for that writeout. 104438f21977SNick Piggin */ 1045b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 104638f21977SNick Piggin struct address_space *mapping; 104738f21977SNick Piggin 1048a4ffdde6SAl Viro if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) 104938f21977SNick Piggin continue; 105038f21977SNick Piggin mapping = inode->i_mapping; 105138f21977SNick Piggin if (mapping->nrpages == 0) 105238f21977SNick Piggin continue; 105338f21977SNick Piggin __iget(inode); 1054ae8547b0SHans Reiser spin_unlock(&inode_lock); 105538f21977SNick Piggin /* 105638f21977SNick Piggin * We hold a reference to 'inode' so it couldn't have 105738f21977SNick Piggin * been removed from s_inodes list while we dropped the 105838f21977SNick Piggin * inode_lock. We cannot iput the inode now as we can 105938f21977SNick Piggin * be holding the last reference and we cannot iput it 106038f21977SNick Piggin * under inode_lock. So we keep the reference and iput 106138f21977SNick Piggin * it later. 106238f21977SNick Piggin */ 106338f21977SNick Piggin iput(old_inode); 106438f21977SNick Piggin old_inode = inode; 106538f21977SNick Piggin 106638f21977SNick Piggin filemap_fdatawait(mapping); 106738f21977SNick Piggin 106838f21977SNick Piggin cond_resched(); 106938f21977SNick Piggin 107038f21977SNick Piggin spin_lock(&inode_lock); 107138f21977SNick Piggin } 107238f21977SNick Piggin spin_unlock(&inode_lock); 107338f21977SNick Piggin iput(old_inode); 107466f3b8e2SJens Axboe } 10751da177e4SLinus Torvalds 1076d8a8559cSJens Axboe /** 1077d8a8559cSJens Axboe * writeback_inodes_sb - writeback dirty inodes from given super_block 1078d8a8559cSJens Axboe * @sb: the superblock 10791da177e4SLinus Torvalds * 1080d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1081d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 1082d8a8559cSJens Axboe * for IO completion of submitted IO. The number of pages submitted is 1083d8a8559cSJens Axboe * returned. 10841da177e4SLinus Torvalds */ 1085b6e51316SJens Axboe void writeback_inodes_sb(struct super_block *sb) 10861da177e4SLinus Torvalds { 10870e3c9a22SJens Axboe unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 10880e3c9a22SJens Axboe unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 108983ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 109083ba7b07SChristoph Hellwig struct wb_writeback_work work = { 10913c4d7165SChristoph Hellwig .sb = sb, 10923c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 109383ba7b07SChristoph Hellwig .done = &done, 10943c4d7165SChristoph Hellwig }; 10950e3c9a22SJens Axboe 1096cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1097cf37e972SChristoph Hellwig 109883ba7b07SChristoph Hellwig work.nr_pages = nr_dirty + nr_unstable + 10990e3c9a22SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 11000e3c9a22SJens Axboe 110183ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 110283ba7b07SChristoph Hellwig wait_for_completion(&done); 11031da177e4SLinus Torvalds } 1104d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1105d8a8559cSJens Axboe 1106d8a8559cSJens Axboe /** 110717bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 110817bd55d0SEric Sandeen * @sb: the superblock 110917bd55d0SEric Sandeen * 111017bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 111117bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 111217bd55d0SEric Sandeen */ 111317bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 111417bd55d0SEric Sandeen { 111517bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1116cf37e972SChristoph Hellwig down_read(&sb->s_umount); 111717bd55d0SEric Sandeen writeback_inodes_sb(sb); 1118cf37e972SChristoph Hellwig up_read(&sb->s_umount); 111917bd55d0SEric Sandeen return 1; 112017bd55d0SEric Sandeen } else 112117bd55d0SEric Sandeen return 0; 112217bd55d0SEric Sandeen } 112317bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 112417bd55d0SEric Sandeen 112517bd55d0SEric Sandeen /** 1126d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1127d8a8559cSJens Axboe * @sb: the superblock 1128d8a8559cSJens Axboe * 1129d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1130d8a8559cSJens Axboe * super_block. The number of pages synced is returned. 1131d8a8559cSJens Axboe */ 1132b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1133d8a8559cSJens Axboe { 113483ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 113583ba7b07SChristoph Hellwig struct wb_writeback_work work = { 11363c4d7165SChristoph Hellwig .sb = sb, 11373c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 11383c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 11393c4d7165SChristoph Hellwig .range_cyclic = 0, 114083ba7b07SChristoph Hellwig .done = &done, 11413c4d7165SChristoph Hellwig }; 11423c4d7165SChristoph Hellwig 1143cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1144cf37e972SChristoph Hellwig 114583ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 114683ba7b07SChristoph Hellwig wait_for_completion(&done); 114783ba7b07SChristoph Hellwig 1148b6e51316SJens Axboe wait_sb_inodes(sb); 1149d8a8559cSJens Axboe } 1150d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 11511da177e4SLinus Torvalds 11521da177e4SLinus Torvalds /** 11531da177e4SLinus Torvalds * write_inode_now - write an inode to disk 11541da177e4SLinus Torvalds * @inode: inode to write to disk 11551da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 11561da177e4SLinus Torvalds * 11577f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 11587f04c26dSAndrea Arcangeli * primarily needed by knfsd. 11597f04c26dSAndrea Arcangeli * 11607f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 11611da177e4SLinus Torvalds */ 11621da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 11631da177e4SLinus Torvalds { 11641da177e4SLinus Torvalds int ret; 11651da177e4SLinus Torvalds struct writeback_control wbc = { 11661da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 116718914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1168111ebb6eSOGAWA Hirofumi .range_start = 0, 1169111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 11701da177e4SLinus Torvalds }; 11711da177e4SLinus Torvalds 11721da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 117349364ce2SAndrew Morton wbc.nr_to_write = 0; 11741da177e4SLinus Torvalds 11751da177e4SLinus Torvalds might_sleep(); 11761da177e4SLinus Torvalds spin_lock(&inode_lock); 117701c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 11781da177e4SLinus Torvalds spin_unlock(&inode_lock); 11791da177e4SLinus Torvalds if (sync) 11801c0eeaf5SJoern Engel inode_sync_wait(inode); 11811da177e4SLinus Torvalds return ret; 11821da177e4SLinus Torvalds } 11831da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 11841da177e4SLinus Torvalds 11851da177e4SLinus Torvalds /** 11861da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 11871da177e4SLinus Torvalds * @inode: the inode to sync 11881da177e4SLinus Torvalds * @wbc: controls the writeback mode 11891da177e4SLinus Torvalds * 11901da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 11911da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 11921da177e4SLinus Torvalds * update inode->i_state. 11931da177e4SLinus Torvalds * 11941da177e4SLinus Torvalds * The caller must have a ref on the inode. 11951da177e4SLinus Torvalds */ 11961da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 11971da177e4SLinus Torvalds { 11981da177e4SLinus Torvalds int ret; 11991da177e4SLinus Torvalds 12001da177e4SLinus Torvalds spin_lock(&inode_lock); 120101c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 12021da177e4SLinus Torvalds spin_unlock(&inode_lock); 12031da177e4SLinus Torvalds return ret; 12041da177e4SLinus Torvalds } 12051da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1206