11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 29455b2864SDave Chinner #include <linux/tracepoint.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 34c4a77a6cSJens Axboe */ 3583ba7b07SChristoph Hellwig struct wb_writeback_work { 36c4a77a6cSJens Axboe long nr_pages; 37c4a77a6cSJens Axboe struct super_block *sb; 38c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 3952957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4052957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4152957fe1SH Hartley Sweeten unsigned int for_background:1; 42c4a77a6cSJens Axboe 438010c3b6SJens Axboe struct list_head list; /* pending work list */ 4483ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 4503ba3782SJens Axboe }; 4603ba3782SJens Axboe 47455b2864SDave Chinner /* 48455b2864SDave Chinner * Include the creation of the trace points after defining the 49455b2864SDave Chinner * wb_writeback_work structure so that the definition remains local to this 50455b2864SDave Chinner * file. 51455b2864SDave Chinner */ 52455b2864SDave Chinner #define CREATE_TRACE_POINTS 53455b2864SDave Chinner #include <trace/events/writeback.h> 54455b2864SDave Chinner 55455b2864SDave Chinner #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 56455b2864SDave Chinner 57455b2864SDave Chinner /* 58455b2864SDave Chinner * We don't actually have pdflush, but this one is exported though /proc... 59455b2864SDave Chinner */ 60455b2864SDave Chinner int nr_pdflush_threads; 61455b2864SDave Chinner 62f11b00f3SAdrian Bunk /** 63f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 64f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 65f11b00f3SAdrian Bunk * 6603ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 6703ba3782SJens Axboe * backing device. 68f11b00f3SAdrian Bunk */ 69f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 70f11b00f3SAdrian Bunk { 7103ba3782SJens Axboe return !list_empty(&bdi->work_list); 72f11b00f3SAdrian Bunk } 73f11b00f3SAdrian Bunk 7483ba7b07SChristoph Hellwig static void bdi_queue_work(struct backing_dev_info *bdi, 7583ba7b07SChristoph Hellwig struct wb_writeback_work *work) 764195f73dSNick Piggin { 77455b2864SDave Chinner trace_writeback_queue(bdi, work); 78455b2864SDave Chinner 796467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 8083ba7b07SChristoph Hellwig list_add_tail(&work->list, &bdi->work_list); 81fff5b85aSArtem Bityutskiy if (bdi->wb.task) { 82fff5b85aSArtem Bityutskiy wake_up_process(bdi->wb.task); 83fff5b85aSArtem Bityutskiy } else { 841da177e4SLinus Torvalds /* 85fff5b85aSArtem Bityutskiy * The bdi thread isn't there, wake up the forker thread which 86fff5b85aSArtem Bityutskiy * will create and run it. 871da177e4SLinus Torvalds */ 88455b2864SDave Chinner trace_writeback_nothread(bdi, work); 8903ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 901da177e4SLinus Torvalds } 916467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 9203ba3782SJens Axboe } 931da177e4SLinus Torvalds 9483ba7b07SChristoph Hellwig static void 9583ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 9683ba7b07SChristoph Hellwig bool range_cyclic, bool for_background) 971da177e4SLinus Torvalds { 9883ba7b07SChristoph Hellwig struct wb_writeback_work *work; 9903ba3782SJens Axboe 100bcddc3f0SJens Axboe /* 101bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 102bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 103bcddc3f0SJens Axboe */ 10483ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 10583ba7b07SChristoph Hellwig if (!work) { 106455b2864SDave Chinner if (bdi->wb.task) { 107455b2864SDave Chinner trace_writeback_nowork(bdi); 10883ba7b07SChristoph Hellwig wake_up_process(bdi->wb.task); 109455b2864SDave Chinner } 11083ba7b07SChristoph Hellwig return; 11183ba7b07SChristoph Hellwig } 11283ba7b07SChristoph Hellwig 11383ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 11483ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 11583ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 11683ba7b07SChristoph Hellwig work->for_background = for_background; 11783ba7b07SChristoph Hellwig 118f11fcae8SJens Axboe bdi_queue_work(bdi, work); 11903ba3782SJens Axboe } 120b6e51316SJens Axboe 121b6e51316SJens Axboe /** 122b6e51316SJens Axboe * bdi_start_writeback - start writeback 123b6e51316SJens Axboe * @bdi: the backing device to write from 124b6e51316SJens Axboe * @nr_pages: the number of pages to write 125b6e51316SJens Axboe * 126b6e51316SJens Axboe * Description: 127b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 128b6e51316SJens Axboe * started when this function returns, we make no guarentees on 1290e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 130b6e51316SJens Axboe * 131b6e51316SJens Axboe */ 132c5444198SChristoph Hellwig void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 133b6e51316SJens Axboe { 13483ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, nr_pages, true, false); 135d3ddec76SWu Fengguang } 136d3ddec76SWu Fengguang 137c5444198SChristoph Hellwig /** 138c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 139c5444198SChristoph Hellwig * @bdi: the backing device to write from 140c5444198SChristoph Hellwig * 141c5444198SChristoph Hellwig * Description: 142c5444198SChristoph Hellwig * This does WB_SYNC_NONE background writeback. The IO is only 143c5444198SChristoph Hellwig * started when this function returns, we make no guarentees on 144c5444198SChristoph Hellwig * completion. Caller need not hold sb s_umount semaphore. 145c5444198SChristoph Hellwig */ 146c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 147c5444198SChristoph Hellwig { 14883ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, LONG_MAX, true, true); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds /* 1526610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1536610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1546610a0bcSAndrew Morton * 1556610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 15666f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 1576610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 1586610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 1596610a0bcSAndrew Morton */ 1606610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 1616610a0bcSAndrew Morton { 16203ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1636610a0bcSAndrew Morton 16403ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 16566f3b8e2SJens Axboe struct inode *tail; 1666610a0bcSAndrew Morton 16703ba3782SJens Axboe tail = list_entry(wb->b_dirty.next, struct inode, i_list); 16866f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 1696610a0bcSAndrew Morton inode->dirtied_when = jiffies; 1706610a0bcSAndrew Morton } 17103ba3782SJens Axboe list_move(&inode->i_list, &wb->b_dirty); 1726610a0bcSAndrew Morton } 1736610a0bcSAndrew Morton 1746610a0bcSAndrew Morton /* 17566f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 176c986d1e2SAndrew Morton */ 1770e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 178c986d1e2SAndrew Morton { 17903ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 18003ba3782SJens Axboe 18103ba3782SJens Axboe list_move(&inode->i_list, &wb->b_more_io); 182c986d1e2SAndrew Morton } 183c986d1e2SAndrew Morton 1841c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 1851c0eeaf5SJoern Engel { 1861c0eeaf5SJoern Engel /* 1871c0eeaf5SJoern Engel * Prevent speculative execution through spin_unlock(&inode_lock); 1881c0eeaf5SJoern Engel */ 1891c0eeaf5SJoern Engel smp_mb(); 1901c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 1911c0eeaf5SJoern Engel } 1921c0eeaf5SJoern Engel 193d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 194d2caa3c5SJeff Layton { 195d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 196d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 197d2caa3c5SJeff Layton /* 198d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 199d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 200d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2015b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 202d2caa3c5SJeff Layton */ 203d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 204d2caa3c5SJeff Layton #endif 205d2caa3c5SJeff Layton return ret; 206d2caa3c5SJeff Layton } 207d2caa3c5SJeff Layton 208c986d1e2SAndrew Morton /* 2092c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 2102c136579SFengguang Wu */ 2112c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 2122c136579SFengguang Wu struct list_head *dispatch_queue, 2132c136579SFengguang Wu unsigned long *older_than_this) 2142c136579SFengguang Wu { 2155c03449dSShaohua Li LIST_HEAD(tmp); 2165c03449dSShaohua Li struct list_head *pos, *node; 217cf137307SJens Axboe struct super_block *sb = NULL; 2185c03449dSShaohua Li struct inode *inode; 219cf137307SJens Axboe int do_sb_sort = 0; 2205c03449dSShaohua Li 2212c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2225c03449dSShaohua Li inode = list_entry(delaying_queue->prev, struct inode, i_list); 2232c136579SFengguang Wu if (older_than_this && 224d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 2252c136579SFengguang Wu break; 226cf137307SJens Axboe if (sb && sb != inode->i_sb) 227cf137307SJens Axboe do_sb_sort = 1; 228cf137307SJens Axboe sb = inode->i_sb; 2295c03449dSShaohua Li list_move(&inode->i_list, &tmp); 2305c03449dSShaohua Li } 2315c03449dSShaohua Li 232cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 233cf137307SJens Axboe if (!do_sb_sort) { 234cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 235cf137307SJens Axboe return; 236cf137307SJens Axboe } 237cf137307SJens Axboe 2385c03449dSShaohua Li /* Move inodes from one superblock together */ 2395c03449dSShaohua Li while (!list_empty(&tmp)) { 2405c03449dSShaohua Li inode = list_entry(tmp.prev, struct inode, i_list); 2415c03449dSShaohua Li sb = inode->i_sb; 2425c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2435c03449dSShaohua Li inode = list_entry(pos, struct inode, i_list); 2445c03449dSShaohua Li if (inode->i_sb == sb) 2452c136579SFengguang Wu list_move(&inode->i_list, dispatch_queue); 2462c136579SFengguang Wu } 2472c136579SFengguang Wu } 2485c03449dSShaohua Li } 2492c136579SFengguang Wu 2502c136579SFengguang Wu /* 2512c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 2524ea879b9SWu Fengguang * Before 2534ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2544ea879b9SWu Fengguang * =============> gf edc BA 2554ea879b9SWu Fengguang * After 2564ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2574ea879b9SWu Fengguang * =============> g fBAedc 2584ea879b9SWu Fengguang * | 2594ea879b9SWu Fengguang * +--> dequeue for IO 2602c136579SFengguang Wu */ 26103ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 2622c136579SFengguang Wu { 2634ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 26403ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 26566f3b8e2SJens Axboe } 26666f3b8e2SJens Axboe 267a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 26866f3b8e2SJens Axboe { 26903ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 270a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 27103ba3782SJens Axboe return 0; 27266f3b8e2SJens Axboe } 27308d8e974SFengguang Wu 2742c136579SFengguang Wu /* 27501c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 27601c03194SChristoph Hellwig */ 27701c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 27801c03194SChristoph Hellwig { 27901c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 28001c03194SChristoph Hellwig wait_queue_head_t *wqh; 28101c03194SChristoph Hellwig 28201c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 28358a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 28401c03194SChristoph Hellwig spin_unlock(&inode_lock); 28501c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 28601c03194SChristoph Hellwig spin_lock(&inode_lock); 28758a9d3d8SRichard Kennedy } 28801c03194SChristoph Hellwig } 28901c03194SChristoph Hellwig 29001c03194SChristoph Hellwig /* 29101c03194SChristoph Hellwig * Write out an inode's dirty pages. Called under inode_lock. Either the 29201c03194SChristoph Hellwig * caller has ref on the inode (either via __iget or via syscall against an fd) 29301c03194SChristoph Hellwig * or the inode has I_WILL_FREE set (via generic_forget_inode) 29401c03194SChristoph Hellwig * 2951da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 2961da177e4SLinus Torvalds * 2971da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 2981da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 2991da177e4SLinus Torvalds * livelocks, etc. 3001da177e4SLinus Torvalds * 3011da177e4SLinus Torvalds * Called under inode_lock. 3021da177e4SLinus Torvalds */ 3031da177e4SLinus Torvalds static int 30401c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 3051da177e4SLinus Torvalds { 3061da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 30701c03194SChristoph Hellwig unsigned dirty; 3081da177e4SLinus Torvalds int ret; 3091da177e4SLinus Torvalds 31001c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 31101c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 31201c03194SChristoph Hellwig else 31301c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 31401c03194SChristoph Hellwig 31501c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 31601c03194SChristoph Hellwig /* 31701c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 31866f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 31901c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 32001c03194SChristoph Hellwig * 32101c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 32266f3b8e2SJens Axboe * completed a full scan of b_io. 32301c03194SChristoph Hellwig */ 324a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 32501c03194SChristoph Hellwig requeue_io(inode); 32601c03194SChristoph Hellwig return 0; 32701c03194SChristoph Hellwig } 32801c03194SChristoph Hellwig 32901c03194SChristoph Hellwig /* 33001c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 33101c03194SChristoph Hellwig */ 33201c03194SChristoph Hellwig inode_wait_for_writeback(inode); 33301c03194SChristoph Hellwig } 33401c03194SChristoph Hellwig 3351c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 3361da177e4SLinus Torvalds 3375547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 3381c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 3395547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 3401da177e4SLinus Torvalds spin_unlock(&inode_lock); 3411da177e4SLinus Torvalds 3421da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 3431da177e4SLinus Torvalds 34426821ed4SChristoph Hellwig /* 34526821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 34626821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 34726821ed4SChristoph Hellwig * I/O completion. 34826821ed4SChristoph Hellwig */ 349a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 35026821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 3511da177e4SLinus Torvalds if (ret == 0) 3521da177e4SLinus Torvalds ret = err; 3531da177e4SLinus Torvalds } 3541da177e4SLinus Torvalds 3555547e8aaSDmitry Monakhov /* 3565547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 3575547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 3585547e8aaSDmitry Monakhov * write_inode() 3595547e8aaSDmitry Monakhov */ 3605547e8aaSDmitry Monakhov spin_lock(&inode_lock); 3615547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 3625547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 3635547e8aaSDmitry Monakhov spin_unlock(&inode_lock); 36426821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 36526821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 366a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 3671da177e4SLinus Torvalds if (ret == 0) 3681da177e4SLinus Torvalds ret = err; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds 3711da177e4SLinus Torvalds spin_lock(&inode_lock); 3721c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 373a4ffdde6SAl Viro if (!(inode->i_state & I_FREEING)) { 37423539afcSWu Fengguang if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 3751da177e4SLinus Torvalds /* 3761da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 377a50aeb40SWu Fengguang * sometimes bales out without doing anything. 3781da177e4SLinus Torvalds */ 3791da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 3808bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 3818bc3be27SFengguang Wu /* 3828bc3be27SFengguang Wu * slice used up: queue for next turn 3838bc3be27SFengguang Wu */ 3840e0f4fc2SKen Chen requeue_io(inode); 3851da177e4SLinus Torvalds } else { 3861da177e4SLinus Torvalds /* 387a50aeb40SWu Fengguang * Writeback blocked by something other than 388a50aeb40SWu Fengguang * congestion. Delay the inode for some time to 389a50aeb40SWu Fengguang * avoid spinning on the CPU (100% iowait) 390a50aeb40SWu Fengguang * retrying writeback of the dirty page/inode 391a50aeb40SWu Fengguang * that cannot be performed immediately. 3928bc3be27SFengguang Wu */ 3938bc3be27SFengguang Wu redirty_tail(inode); 3948bc3be27SFengguang Wu } 39523539afcSWu Fengguang } else if (inode->i_state & I_DIRTY) { 39623539afcSWu Fengguang /* 39723539afcSWu Fengguang * Filesystems can dirty the inode during writeback 39823539afcSWu Fengguang * operations, such as delayed allocation during 39923539afcSWu Fengguang * submission or metadata updates after data IO 40023539afcSWu Fengguang * completion. 40123539afcSWu Fengguang */ 40223539afcSWu Fengguang redirty_tail(inode); 4031da177e4SLinus Torvalds } else if (atomic_read(&inode->i_count)) { 4041da177e4SLinus Torvalds /* 4051da177e4SLinus Torvalds * The inode is clean, inuse 4061da177e4SLinus Torvalds */ 4071da177e4SLinus Torvalds list_move(&inode->i_list, &inode_in_use); 4081da177e4SLinus Torvalds } else { 4091da177e4SLinus Torvalds /* 4101da177e4SLinus Torvalds * The inode is clean, unused 4111da177e4SLinus Torvalds */ 4121da177e4SLinus Torvalds list_move(&inode->i_list, &inode_unused); 4131da177e4SLinus Torvalds } 4141da177e4SLinus Torvalds } 4151c0eeaf5SJoern Engel inode_sync_complete(inode); 4161da177e4SLinus Torvalds return ret; 4171da177e4SLinus Torvalds } 4181da177e4SLinus Torvalds 41903ba3782SJens Axboe /* 420d19de7edSChristoph Hellwig * For background writeback the caller does not have the sb pinned 42103ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 42203ba3782SJens Axboe * go away while we are writing inodes from it. 42303ba3782SJens Axboe */ 424d19de7edSChristoph Hellwig static bool pin_sb_for_writeback(struct super_block *sb) 4251da177e4SLinus Torvalds { 42603ba3782SJens Axboe spin_lock(&sb_lock); 42729cb4859SChristoph Hellwig if (list_empty(&sb->s_instances)) { 42803ba3782SJens Axboe spin_unlock(&sb_lock); 42929cb4859SChristoph Hellwig return false; 43003ba3782SJens Axboe } 43129cb4859SChristoph Hellwig 43229cb4859SChristoph Hellwig sb->s_count++; 43329cb4859SChristoph Hellwig spin_unlock(&sb_lock); 43429cb4859SChristoph Hellwig 43529cb4859SChristoph Hellwig if (down_read_trylock(&sb->s_umount)) { 43629cb4859SChristoph Hellwig if (sb->s_root) 43729cb4859SChristoph Hellwig return true; 43803ba3782SJens Axboe up_read(&sb->s_umount); 43903ba3782SJens Axboe } 44029cb4859SChristoph Hellwig 44129cb4859SChristoph Hellwig put_super(sb); 442d19de7edSChristoph Hellwig return false; 44303ba3782SJens Axboe } 44403ba3782SJens Axboe 445f11c9c5cSEdward Shishkin /* 446f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 447edadfb10SChristoph Hellwig * 448edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 449f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 450f11c9c5cSEdward Shishkin * in reverse order. 451edadfb10SChristoph Hellwig * 452f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 453f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 454f11c9c5cSEdward Shishkin */ 455edadfb10SChristoph Hellwig static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 456edadfb10SChristoph Hellwig struct writeback_control *wbc, bool only_this_sb) 45703ba3782SJens Axboe { 45803ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 459f11c9c5cSEdward Shishkin long pages_skipped; 46003ba3782SJens Axboe struct inode *inode = list_entry(wb->b_io.prev, 4611da177e4SLinus Torvalds struct inode, i_list); 462edadfb10SChristoph Hellwig 463edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 464edadfb10SChristoph Hellwig if (only_this_sb) { 465edadfb10SChristoph Hellwig /* 466edadfb10SChristoph Hellwig * We only want to write back data for this 467edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 468edadfb10SChristoph Hellwig * to it back onto the dirty list. 469edadfb10SChristoph Hellwig */ 47066f3b8e2SJens Axboe redirty_tail(inode); 47166f3b8e2SJens Axboe continue; 47266f3b8e2SJens Axboe } 473edadfb10SChristoph Hellwig 474edadfb10SChristoph Hellwig /* 475edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 476edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 477edadfb10SChristoph Hellwig * pin the next superblock. 478edadfb10SChristoph Hellwig */ 479f11c9c5cSEdward Shishkin return 0; 480edadfb10SChristoph Hellwig } 481edadfb10SChristoph Hellwig 48284a89245SWu Fengguang if (inode->i_state & (I_NEW | I_WILL_FREE)) { 4837ef0d737SNick Piggin requeue_io(inode); 4847ef0d737SNick Piggin continue; 4857ef0d737SNick Piggin } 486d2caa3c5SJeff Layton /* 487d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 488d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 489d2caa3c5SJeff Layton */ 490f11c9c5cSEdward Shishkin if (inode_dirtied_after(inode, wbc->wb_start)) 491f11c9c5cSEdward Shishkin return 1; 4921da177e4SLinus Torvalds 493a4ffdde6SAl Viro BUG_ON(inode->i_state & I_FREEING); 4941da177e4SLinus Torvalds __iget(inode); 4951da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 49601c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 4971da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 4981da177e4SLinus Torvalds /* 4991da177e4SLinus Torvalds * writeback is not making progress due to locked 5001da177e4SLinus Torvalds * buffers. Skip this inode for now. 5011da177e4SLinus Torvalds */ 502f57b9b7bSAndrew Morton redirty_tail(inode); 5031da177e4SLinus Torvalds } 5041da177e4SLinus Torvalds spin_unlock(&inode_lock); 5051da177e4SLinus Torvalds iput(inode); 5064ffc8444SOGAWA Hirofumi cond_resched(); 5071da177e4SLinus Torvalds spin_lock(&inode_lock); 5088bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 5098bc3be27SFengguang Wu wbc->more_io = 1; 510f11c9c5cSEdward Shishkin return 1; 5111da177e4SLinus Torvalds } 51203ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 5138bc3be27SFengguang Wu wbc->more_io = 1; 5148bc3be27SFengguang Wu } 515f11c9c5cSEdward Shishkin /* b_io is empty */ 516f11c9c5cSEdward Shishkin return 1; 517f11c9c5cSEdward Shishkin } 51838f21977SNick Piggin 5199c3a8ee8SChristoph Hellwig void writeback_inodes_wb(struct bdi_writeback *wb, 520f11c9c5cSEdward Shishkin struct writeback_control *wbc) 521f11c9c5cSEdward Shishkin { 522f11c9c5cSEdward Shishkin int ret = 0; 5239ecc2738SJens Axboe 5247624ee72SJan Kara if (!wbc->wb_start) 525f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 526f11c9c5cSEdward Shishkin spin_lock(&inode_lock); 527f11c9c5cSEdward Shishkin if (!wbc->for_kupdate || list_empty(&wb->b_io)) 528f11c9c5cSEdward Shishkin queue_io(wb, wbc->older_than_this); 529f11c9c5cSEdward Shishkin 530f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 531f11c9c5cSEdward Shishkin struct inode *inode = list_entry(wb->b_io.prev, 532f11c9c5cSEdward Shishkin struct inode, i_list); 533f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 534f11c9c5cSEdward Shishkin 535334132aeSChristoph Hellwig if (!pin_sb_for_writeback(sb)) { 536334132aeSChristoph Hellwig requeue_io(inode); 537d19de7edSChristoph Hellwig continue; 538334132aeSChristoph Hellwig } 539edadfb10SChristoph Hellwig ret = writeback_sb_inodes(sb, wb, wbc, false); 540d19de7edSChristoph Hellwig drop_super(sb); 541f11c9c5cSEdward Shishkin 542f11c9c5cSEdward Shishkin if (ret) 543f11c9c5cSEdward Shishkin break; 544f11c9c5cSEdward Shishkin } 54566f3b8e2SJens Axboe spin_unlock(&inode_lock); 54666f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 54766f3b8e2SJens Axboe } 54866f3b8e2SJens Axboe 549edadfb10SChristoph Hellwig static void __writeback_inodes_sb(struct super_block *sb, 550edadfb10SChristoph Hellwig struct bdi_writeback *wb, struct writeback_control *wbc) 551edadfb10SChristoph Hellwig { 552edadfb10SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 553edadfb10SChristoph Hellwig 554edadfb10SChristoph Hellwig spin_lock(&inode_lock); 555edadfb10SChristoph Hellwig if (!wbc->for_kupdate || list_empty(&wb->b_io)) 556edadfb10SChristoph Hellwig queue_io(wb, wbc->older_than_this); 557edadfb10SChristoph Hellwig writeback_sb_inodes(sb, wb, wbc, true); 558edadfb10SChristoph Hellwig spin_unlock(&inode_lock); 559edadfb10SChristoph Hellwig } 560edadfb10SChristoph Hellwig 56103ba3782SJens Axboe /* 56203ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 56303ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 56403ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 56503ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 56603ba3782SJens Axboe * the dirty each time it has written this many pages. 56703ba3782SJens Axboe */ 56803ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 56903ba3782SJens Axboe 57003ba3782SJens Axboe static inline bool over_bground_thresh(void) 57103ba3782SJens Axboe { 57203ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 57303ba3782SJens Axboe 57416c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 57503ba3782SJens Axboe 57603ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 57703ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) >= background_thresh); 57803ba3782SJens Axboe } 57903ba3782SJens Axboe 58003ba3782SJens Axboe /* 58103ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 58203ba3782SJens Axboe * 58303ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 58403ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 58503ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 58603ba3782SJens Axboe * older than a specific point in time. 58703ba3782SJens Axboe * 58803ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 58903ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 59003ba3782SJens Axboe * one-second gap. 59103ba3782SJens Axboe * 59203ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 59303ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 59403ba3782SJens Axboe */ 595c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 59683ba7b07SChristoph Hellwig struct wb_writeback_work *work) 59703ba3782SJens Axboe { 59803ba3782SJens Axboe struct writeback_control wbc = { 59983ba7b07SChristoph Hellwig .sync_mode = work->sync_mode, 60003ba3782SJens Axboe .older_than_this = NULL, 60183ba7b07SChristoph Hellwig .for_kupdate = work->for_kupdate, 60283ba7b07SChristoph Hellwig .for_background = work->for_background, 60383ba7b07SChristoph Hellwig .range_cyclic = work->range_cyclic, 60403ba3782SJens Axboe }; 60503ba3782SJens Axboe unsigned long oldest_jif; 60603ba3782SJens Axboe long wrote = 0; 607a5989bdcSJan Kara struct inode *inode; 60803ba3782SJens Axboe 60903ba3782SJens Axboe if (wbc.for_kupdate) { 61003ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 61103ba3782SJens Axboe oldest_jif = jiffies - 61203ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 61303ba3782SJens Axboe } 614c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 615c4a77a6cSJens Axboe wbc.range_start = 0; 616c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 617c4a77a6cSJens Axboe } 61803ba3782SJens Axboe 6197624ee72SJan Kara wbc.wb_start = jiffies; /* livelock avoidance */ 62003ba3782SJens Axboe for (;;) { 62103ba3782SJens Axboe /* 622d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 62303ba3782SJens Axboe */ 62483ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 62503ba3782SJens Axboe break; 62603ba3782SJens Axboe 62703ba3782SJens Axboe /* 628d3ddec76SWu Fengguang * For background writeout, stop when we are below the 629d3ddec76SWu Fengguang * background dirty threshold 63003ba3782SJens Axboe */ 63183ba7b07SChristoph Hellwig if (work->for_background && !over_bground_thresh()) 63203ba3782SJens Axboe break; 63303ba3782SJens Axboe 63403ba3782SJens Axboe wbc.more_io = 0; 63503ba3782SJens Axboe wbc.nr_to_write = MAX_WRITEBACK_PAGES; 63603ba3782SJens Axboe wbc.pages_skipped = 0; 637028c2dd1SDave Chinner 638028c2dd1SDave Chinner trace_wbc_writeback_start(&wbc, wb->bdi); 63983ba7b07SChristoph Hellwig if (work->sb) 64083ba7b07SChristoph Hellwig __writeback_inodes_sb(work->sb, wb, &wbc); 641edadfb10SChristoph Hellwig else 64203ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 643028c2dd1SDave Chinner trace_wbc_writeback_written(&wbc, wb->bdi); 644028c2dd1SDave Chinner 64583ba7b07SChristoph Hellwig work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 64603ba3782SJens Axboe wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 64703ba3782SJens Axboe 64803ba3782SJens Axboe /* 64971fd05a8SJens Axboe * If we consumed everything, see if we have more 65003ba3782SJens Axboe */ 65171fd05a8SJens Axboe if (wbc.nr_to_write <= 0) 65271fd05a8SJens Axboe continue; 65371fd05a8SJens Axboe /* 65471fd05a8SJens Axboe * Didn't write everything and we don't have more IO, bail 65571fd05a8SJens Axboe */ 65671fd05a8SJens Axboe if (!wbc.more_io) 65771fd05a8SJens Axboe break; 65871fd05a8SJens Axboe /* 65971fd05a8SJens Axboe * Did we write something? Try for more 66071fd05a8SJens Axboe */ 661a5989bdcSJan Kara if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) 66203ba3782SJens Axboe continue; 663a5989bdcSJan Kara /* 664a5989bdcSJan Kara * Nothing written. Wait for some inode to 665a5989bdcSJan Kara * become available for writeback. Otherwise 666a5989bdcSJan Kara * we'll just busyloop. 667a5989bdcSJan Kara */ 668a5989bdcSJan Kara spin_lock(&inode_lock); 669a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 67071fd05a8SJens Axboe inode = list_entry(wb->b_more_io.prev, 671a5989bdcSJan Kara struct inode, i_list); 672028c2dd1SDave Chinner trace_wbc_writeback_wait(&wbc, wb->bdi); 673a5989bdcSJan Kara inode_wait_for_writeback(inode); 674a5989bdcSJan Kara } 675a5989bdcSJan Kara spin_unlock(&inode_lock); 67603ba3782SJens Axboe } 67703ba3782SJens Axboe 67803ba3782SJens Axboe return wrote; 67903ba3782SJens Axboe } 68003ba3782SJens Axboe 68103ba3782SJens Axboe /* 68283ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 68303ba3782SJens Axboe */ 68483ba7b07SChristoph Hellwig static struct wb_writeback_work * 68508852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 68603ba3782SJens Axboe { 68783ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 68803ba3782SJens Axboe 6896467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 69083ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 69183ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 69283ba7b07SChristoph Hellwig struct wb_writeback_work, list); 69383ba7b07SChristoph Hellwig list_del_init(&work->list); 69403ba3782SJens Axboe } 6956467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 69683ba7b07SChristoph Hellwig return work; 69703ba3782SJens Axboe } 69803ba3782SJens Axboe 69903ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 70003ba3782SJens Axboe { 70103ba3782SJens Axboe unsigned long expired; 70203ba3782SJens Axboe long nr_pages; 70303ba3782SJens Axboe 70469b62d01SJens Axboe /* 70569b62d01SJens Axboe * When set to zero, disable periodic writeback 70669b62d01SJens Axboe */ 70769b62d01SJens Axboe if (!dirty_writeback_interval) 70869b62d01SJens Axboe return 0; 70969b62d01SJens Axboe 71003ba3782SJens Axboe expired = wb->last_old_flush + 71103ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 71203ba3782SJens Axboe if (time_before(jiffies, expired)) 71303ba3782SJens Axboe return 0; 71403ba3782SJens Axboe 71503ba3782SJens Axboe wb->last_old_flush = jiffies; 71603ba3782SJens Axboe nr_pages = global_page_state(NR_FILE_DIRTY) + 71703ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS) + 71803ba3782SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 71903ba3782SJens Axboe 720c4a77a6cSJens Axboe if (nr_pages) { 72183ba7b07SChristoph Hellwig struct wb_writeback_work work = { 722c4a77a6cSJens Axboe .nr_pages = nr_pages, 723c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 724c4a77a6cSJens Axboe .for_kupdate = 1, 725c4a77a6cSJens Axboe .range_cyclic = 1, 726c4a77a6cSJens Axboe }; 727c4a77a6cSJens Axboe 72883ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 729c4a77a6cSJens Axboe } 73003ba3782SJens Axboe 73103ba3782SJens Axboe return 0; 73203ba3782SJens Axboe } 73303ba3782SJens Axboe 73403ba3782SJens Axboe /* 73503ba3782SJens Axboe * Retrieve work items and do the writeback they describe 73603ba3782SJens Axboe */ 73703ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 73803ba3782SJens Axboe { 73903ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 74083ba7b07SChristoph Hellwig struct wb_writeback_work *work; 741c4a77a6cSJens Axboe long wrote = 0; 74203ba3782SJens Axboe 74308852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 74403ba3782SJens Axboe /* 74503ba3782SJens Axboe * Override sync mode, in case we must wait for completion 74683ba7b07SChristoph Hellwig * because this thread is exiting now. 74703ba3782SJens Axboe */ 74803ba3782SJens Axboe if (force_wait) 74983ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 75083ba7b07SChristoph Hellwig 751455b2864SDave Chinner trace_writeback_exec(bdi, work); 752455b2864SDave Chinner 75383ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 75403ba3782SJens Axboe 75503ba3782SJens Axboe /* 75683ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 75783ba7b07SChristoph Hellwig * work item, otherwise just free it. 75803ba3782SJens Axboe */ 75983ba7b07SChristoph Hellwig if (work->done) 76083ba7b07SChristoph Hellwig complete(work->done); 76183ba7b07SChristoph Hellwig else 76283ba7b07SChristoph Hellwig kfree(work); 76303ba3782SJens Axboe } 76403ba3782SJens Axboe 76503ba3782SJens Axboe /* 76603ba3782SJens Axboe * Check for periodic writeback, kupdated() style 76703ba3782SJens Axboe */ 76803ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 76903ba3782SJens Axboe 77003ba3782SJens Axboe return wrote; 77103ba3782SJens Axboe } 77203ba3782SJens Axboe 77303ba3782SJens Axboe /* 77403ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 77503ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 77603ba3782SJens Axboe */ 77708243900SChristoph Hellwig int bdi_writeback_thread(void *data) 77803ba3782SJens Axboe { 77908243900SChristoph Hellwig struct bdi_writeback *wb = data; 78008243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 78103ba3782SJens Axboe long pages_written; 78203ba3782SJens Axboe 78308243900SChristoph Hellwig current->flags |= PF_FLUSHER | PF_SWAPWRITE; 78408243900SChristoph Hellwig set_freezable(); 785ecd58403SArtem Bityutskiy wb->last_active = jiffies; 78603ba3782SJens Axboe 78703ba3782SJens Axboe /* 78808243900SChristoph Hellwig * Our parent may run at a different priority, just set us to normal 78903ba3782SJens Axboe */ 79008243900SChristoph Hellwig set_user_nice(current, 0); 79108243900SChristoph Hellwig 792455b2864SDave Chinner trace_writeback_thread_start(bdi); 793455b2864SDave Chinner 79403ba3782SJens Axboe while (!kthread_should_stop()) { 7956467716aSArtem Bityutskiy /* 7966467716aSArtem Bityutskiy * Remove own delayed wake-up timer, since we are already awake 7976467716aSArtem Bityutskiy * and we'll take care of the preriodic write-back. 7986467716aSArtem Bityutskiy */ 7996467716aSArtem Bityutskiy del_timer(&wb->wakeup_timer); 8006467716aSArtem Bityutskiy 80103ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 80203ba3782SJens Axboe 803455b2864SDave Chinner trace_writeback_pages_written(pages_written); 804455b2864SDave Chinner 80503ba3782SJens Axboe if (pages_written) 806ecd58403SArtem Bityutskiy wb->last_active = jiffies; 80703ba3782SJens Axboe 808297252c8SArtem Bityutskiy set_current_state(TASK_INTERRUPTIBLE); 809297252c8SArtem Bityutskiy if (!list_empty(&bdi->work_list)) { 810297252c8SArtem Bityutskiy __set_current_state(TASK_RUNNING); 811297252c8SArtem Bityutskiy continue; 81203ba3782SJens Axboe } 81303ba3782SJens Axboe 814253c34e9SArtem Bityutskiy if (wb_has_dirty_io(wb) && dirty_writeback_interval) 815fff5b85aSArtem Bityutskiy schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 816253c34e9SArtem Bityutskiy else { 817253c34e9SArtem Bityutskiy /* 818253c34e9SArtem Bityutskiy * We have nothing to do, so can go sleep without any 819253c34e9SArtem Bityutskiy * timeout and save power. When a work is queued or 820253c34e9SArtem Bityutskiy * something is made dirty - we will be woken up. 821253c34e9SArtem Bityutskiy */ 82269b62d01SJens Axboe schedule(); 823f9eadbbdSJens Axboe } 82469b62d01SJens Axboe 82503ba3782SJens Axboe try_to_freeze(); 82603ba3782SJens Axboe } 82703ba3782SJens Axboe 828fff5b85aSArtem Bityutskiy /* Flush any work that raced with us exiting */ 82908243900SChristoph Hellwig if (!list_empty(&bdi->work_list)) 83008243900SChristoph Hellwig wb_do_writeback(wb, 1); 831455b2864SDave Chinner 832455b2864SDave Chinner trace_writeback_thread_stop(bdi); 83303ba3782SJens Axboe return 0; 83403ba3782SJens Axboe } 83503ba3782SJens Axboe 83608243900SChristoph Hellwig 83703ba3782SJens Axboe /* 83803ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 83903ba3782SJens Axboe * the whole world. 84003ba3782SJens Axboe */ 84103ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 84203ba3782SJens Axboe { 843b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 844b8c2f347SChristoph Hellwig 84583ba7b07SChristoph Hellwig if (!nr_pages) { 84683ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 84703ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 848b8c2f347SChristoph Hellwig } 849b8c2f347SChristoph Hellwig 850b8c2f347SChristoph Hellwig rcu_read_lock(); 851b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 852b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 853b8c2f347SChristoph Hellwig continue; 85483ba7b07SChristoph Hellwig __bdi_start_writeback(bdi, nr_pages, false, false); 855b8c2f347SChristoph Hellwig } 856b8c2f347SChristoph Hellwig rcu_read_unlock(); 85703ba3782SJens Axboe } 85803ba3782SJens Axboe 85903ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 86003ba3782SJens Axboe { 86103ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 86203ba3782SJens Axboe struct dentry *dentry; 86303ba3782SJens Axboe const char *name = "?"; 86403ba3782SJens Axboe 86503ba3782SJens Axboe dentry = d_find_alias(inode); 86603ba3782SJens Axboe if (dentry) { 86703ba3782SJens Axboe spin_lock(&dentry->d_lock); 86803ba3782SJens Axboe name = (const char *) dentry->d_name.name; 86903ba3782SJens Axboe } 87003ba3782SJens Axboe printk(KERN_DEBUG 87103ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 87203ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 87303ba3782SJens Axboe name, inode->i_sb->s_id); 87403ba3782SJens Axboe if (dentry) { 87503ba3782SJens Axboe spin_unlock(&dentry->d_lock); 87603ba3782SJens Axboe dput(dentry); 87703ba3782SJens Axboe } 87803ba3782SJens Axboe } 87903ba3782SJens Axboe } 88003ba3782SJens Axboe 88103ba3782SJens Axboe /** 88203ba3782SJens Axboe * __mark_inode_dirty - internal function 88303ba3782SJens Axboe * @inode: inode to mark 88403ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 88503ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 88603ba3782SJens Axboe * mark_inode_dirty_sync. 88703ba3782SJens Axboe * 88803ba3782SJens Axboe * Put the inode on the super block's dirty list. 88903ba3782SJens Axboe * 89003ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 89103ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 89203ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 89303ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 89403ba3782SJens Axboe * 89503ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 89603ba3782SJens Axboe * them dirty. 89703ba3782SJens Axboe * 89803ba3782SJens Axboe * This function *must* be atomic for the I_DIRTY_PAGES case - 89903ba3782SJens Axboe * set_page_dirty() is called under spinlock in several places. 90003ba3782SJens Axboe * 90103ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 90203ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 90303ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 90403ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 90503ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 90603ba3782SJens Axboe * blockdev inode. 90703ba3782SJens Axboe */ 90803ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 90903ba3782SJens Axboe { 91003ba3782SJens Axboe struct super_block *sb = inode->i_sb; 911253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 912253c34e9SArtem Bityutskiy bool wakeup_bdi = false; 91303ba3782SJens Axboe 91403ba3782SJens Axboe /* 91503ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 91603ba3782SJens Axboe * dirty the inode itself 91703ba3782SJens Axboe */ 91803ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 91903ba3782SJens Axboe if (sb->s_op->dirty_inode) 92003ba3782SJens Axboe sb->s_op->dirty_inode(inode); 92103ba3782SJens Axboe } 92203ba3782SJens Axboe 92303ba3782SJens Axboe /* 92403ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 92503ba3782SJens Axboe * -- mikulas 92603ba3782SJens Axboe */ 92703ba3782SJens Axboe smp_mb(); 92803ba3782SJens Axboe 92903ba3782SJens Axboe /* avoid the locking if we can */ 93003ba3782SJens Axboe if ((inode->i_state & flags) == flags) 93103ba3782SJens Axboe return; 93203ba3782SJens Axboe 93303ba3782SJens Axboe if (unlikely(block_dump)) 93403ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 93503ba3782SJens Axboe 93603ba3782SJens Axboe spin_lock(&inode_lock); 93703ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 93803ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 93903ba3782SJens Axboe 94003ba3782SJens Axboe inode->i_state |= flags; 94103ba3782SJens Axboe 94203ba3782SJens Axboe /* 94303ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 94403ba3782SJens Axboe * The unlocker will place the inode on the appropriate 94503ba3782SJens Axboe * superblock list, based upon its state. 94603ba3782SJens Axboe */ 94703ba3782SJens Axboe if (inode->i_state & I_SYNC) 94803ba3782SJens Axboe goto out; 94903ba3782SJens Axboe 95003ba3782SJens Axboe /* 95103ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 95203ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 95303ba3782SJens Axboe */ 95403ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 95503ba3782SJens Axboe if (hlist_unhashed(&inode->i_hash)) 95603ba3782SJens Axboe goto out; 95703ba3782SJens Axboe } 958a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 95903ba3782SJens Axboe goto out; 96003ba3782SJens Axboe 96103ba3782SJens Axboe /* 96203ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 96303ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 96403ba3782SJens Axboe */ 96503ba3782SJens Axboe if (!was_dirty) { 966253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 967500b067cSJens Axboe 968253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 969253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 970253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 971253c34e9SArtem Bityutskiy 972253c34e9SArtem Bityutskiy /* 973253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 974253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 975253c34e9SArtem Bityutskiy * bdi thread to make sure background 976253c34e9SArtem Bityutskiy * write-back happens later. 977253c34e9SArtem Bityutskiy */ 978253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 979253c34e9SArtem Bityutskiy wakeup_bdi = true; 980500b067cSJens Axboe } 98103ba3782SJens Axboe 98203ba3782SJens Axboe inode->dirtied_when = jiffies; 983253c34e9SArtem Bityutskiy list_move(&inode->i_list, &bdi->wb.b_dirty); 98403ba3782SJens Axboe } 98503ba3782SJens Axboe } 98603ba3782SJens Axboe out: 98703ba3782SJens Axboe spin_unlock(&inode_lock); 988253c34e9SArtem Bityutskiy 989253c34e9SArtem Bityutskiy if (wakeup_bdi) 9906467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 99103ba3782SJens Axboe } 99203ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 99303ba3782SJens Axboe 99466f3b8e2SJens Axboe /* 99566f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 99666f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 99766f3b8e2SJens Axboe * 99866f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 99966f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 100066f3b8e2SJens Axboe * 100166f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 100266f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 100366f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 100466f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 100566f3b8e2SJens Axboe * 100666f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 100766f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 100866f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 100966f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 101066f3b8e2SJens Axboe */ 1011b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 101266f3b8e2SJens Axboe { 101338f21977SNick Piggin struct inode *inode, *old_inode = NULL; 101438f21977SNick Piggin 101503ba3782SJens Axboe /* 101603ba3782SJens Axboe * We need to be protected against the filesystem going from 101703ba3782SJens Axboe * r/o to r/w or vice versa. 101803ba3782SJens Axboe */ 1019b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 102003ba3782SJens Axboe 102166f3b8e2SJens Axboe spin_lock(&inode_lock); 102266f3b8e2SJens Axboe 102338f21977SNick Piggin /* 102438f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 102538f21977SNick Piggin * because there may have been pages dirtied before our sync 102638f21977SNick Piggin * call, but which had writeout started before we write it out. 102738f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 102838f21977SNick Piggin * we still have to wait for that writeout. 102938f21977SNick Piggin */ 1030b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 103138f21977SNick Piggin struct address_space *mapping; 103238f21977SNick Piggin 1033a4ffdde6SAl Viro if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) 103438f21977SNick Piggin continue; 103538f21977SNick Piggin mapping = inode->i_mapping; 103638f21977SNick Piggin if (mapping->nrpages == 0) 103738f21977SNick Piggin continue; 103838f21977SNick Piggin __iget(inode); 1039ae8547b0SHans Reiser spin_unlock(&inode_lock); 104038f21977SNick Piggin /* 104138f21977SNick Piggin * We hold a reference to 'inode' so it couldn't have 104238f21977SNick Piggin * been removed from s_inodes list while we dropped the 104338f21977SNick Piggin * inode_lock. We cannot iput the inode now as we can 104438f21977SNick Piggin * be holding the last reference and we cannot iput it 104538f21977SNick Piggin * under inode_lock. So we keep the reference and iput 104638f21977SNick Piggin * it later. 104738f21977SNick Piggin */ 104838f21977SNick Piggin iput(old_inode); 104938f21977SNick Piggin old_inode = inode; 105038f21977SNick Piggin 105138f21977SNick Piggin filemap_fdatawait(mapping); 105238f21977SNick Piggin 105338f21977SNick Piggin cond_resched(); 105438f21977SNick Piggin 105538f21977SNick Piggin spin_lock(&inode_lock); 105638f21977SNick Piggin } 105738f21977SNick Piggin spin_unlock(&inode_lock); 105838f21977SNick Piggin iput(old_inode); 105966f3b8e2SJens Axboe } 10601da177e4SLinus Torvalds 1061d8a8559cSJens Axboe /** 1062d8a8559cSJens Axboe * writeback_inodes_sb - writeback dirty inodes from given super_block 1063d8a8559cSJens Axboe * @sb: the superblock 10641da177e4SLinus Torvalds * 1065d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1066d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 1067d8a8559cSJens Axboe * for IO completion of submitted IO. The number of pages submitted is 1068d8a8559cSJens Axboe * returned. 10691da177e4SLinus Torvalds */ 1070b6e51316SJens Axboe void writeback_inodes_sb(struct super_block *sb) 10711da177e4SLinus Torvalds { 10720e3c9a22SJens Axboe unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 10730e3c9a22SJens Axboe unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 107483ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 107583ba7b07SChristoph Hellwig struct wb_writeback_work work = { 10763c4d7165SChristoph Hellwig .sb = sb, 10773c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 107883ba7b07SChristoph Hellwig .done = &done, 10793c4d7165SChristoph Hellwig }; 10800e3c9a22SJens Axboe 1081cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1082cf37e972SChristoph Hellwig 108383ba7b07SChristoph Hellwig work.nr_pages = nr_dirty + nr_unstable + 10840e3c9a22SJens Axboe (inodes_stat.nr_inodes - inodes_stat.nr_unused); 10850e3c9a22SJens Axboe 108683ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 108783ba7b07SChristoph Hellwig wait_for_completion(&done); 10881da177e4SLinus Torvalds } 1089d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1090d8a8559cSJens Axboe 1091d8a8559cSJens Axboe /** 109217bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 109317bd55d0SEric Sandeen * @sb: the superblock 109417bd55d0SEric Sandeen * 109517bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 109617bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 109717bd55d0SEric Sandeen */ 109817bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 109917bd55d0SEric Sandeen { 110017bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1101cf37e972SChristoph Hellwig down_read(&sb->s_umount); 110217bd55d0SEric Sandeen writeback_inodes_sb(sb); 1103cf37e972SChristoph Hellwig up_read(&sb->s_umount); 110417bd55d0SEric Sandeen return 1; 110517bd55d0SEric Sandeen } else 110617bd55d0SEric Sandeen return 0; 110717bd55d0SEric Sandeen } 110817bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 110917bd55d0SEric Sandeen 111017bd55d0SEric Sandeen /** 1111d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1112d8a8559cSJens Axboe * @sb: the superblock 1113d8a8559cSJens Axboe * 1114d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1115d8a8559cSJens Axboe * super_block. The number of pages synced is returned. 1116d8a8559cSJens Axboe */ 1117b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1118d8a8559cSJens Axboe { 111983ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 112083ba7b07SChristoph Hellwig struct wb_writeback_work work = { 11213c4d7165SChristoph Hellwig .sb = sb, 11223c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 11233c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 11243c4d7165SChristoph Hellwig .range_cyclic = 0, 112583ba7b07SChristoph Hellwig .done = &done, 11263c4d7165SChristoph Hellwig }; 11273c4d7165SChristoph Hellwig 1128cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1129cf37e972SChristoph Hellwig 113083ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 113183ba7b07SChristoph Hellwig wait_for_completion(&done); 113283ba7b07SChristoph Hellwig 1133b6e51316SJens Axboe wait_sb_inodes(sb); 1134d8a8559cSJens Axboe } 1135d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 11361da177e4SLinus Torvalds 11371da177e4SLinus Torvalds /** 11381da177e4SLinus Torvalds * write_inode_now - write an inode to disk 11391da177e4SLinus Torvalds * @inode: inode to write to disk 11401da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 11411da177e4SLinus Torvalds * 11427f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 11437f04c26dSAndrea Arcangeli * primarily needed by knfsd. 11447f04c26dSAndrea Arcangeli * 11457f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 11461da177e4SLinus Torvalds */ 11471da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 11481da177e4SLinus Torvalds { 11491da177e4SLinus Torvalds int ret; 11501da177e4SLinus Torvalds struct writeback_control wbc = { 11511da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 115218914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1153111ebb6eSOGAWA Hirofumi .range_start = 0, 1154111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 11551da177e4SLinus Torvalds }; 11561da177e4SLinus Torvalds 11571da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 115849364ce2SAndrew Morton wbc.nr_to_write = 0; 11591da177e4SLinus Torvalds 11601da177e4SLinus Torvalds might_sleep(); 11611da177e4SLinus Torvalds spin_lock(&inode_lock); 116201c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 11631da177e4SLinus Torvalds spin_unlock(&inode_lock); 11641da177e4SLinus Torvalds if (sync) 11651c0eeaf5SJoern Engel inode_sync_wait(inode); 11661da177e4SLinus Torvalds return ret; 11671da177e4SLinus Torvalds } 11681da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 11691da177e4SLinus Torvalds 11701da177e4SLinus Torvalds /** 11711da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 11721da177e4SLinus Torvalds * @inode: the inode to sync 11731da177e4SLinus Torvalds * @wbc: controls the writeback mode 11741da177e4SLinus Torvalds * 11751da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 11761da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 11771da177e4SLinus Torvalds * update inode->i_state. 11781da177e4SLinus Torvalds * 11791da177e4SLinus Torvalds * The caller must have a ref on the inode. 11801da177e4SLinus Torvalds */ 11811da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 11821da177e4SLinus Torvalds { 11831da177e4SLinus Torvalds int ret; 11841da177e4SLinus Torvalds 11851da177e4SLinus Torvalds spin_lock(&inode_lock); 118601c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 11871da177e4SLinus Torvalds spin_unlock(&inode_lock); 11881da177e4SLinus Torvalds return ret; 11891da177e4SLinus Torvalds } 11901da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1191