11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 29455b2864SDave Chinner #include <linux/tracepoint.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 34c4a77a6cSJens Axboe */ 3583ba7b07SChristoph Hellwig struct wb_writeback_work { 36c4a77a6cSJens Axboe long nr_pages; 37c4a77a6cSJens Axboe struct super_block *sb; 38c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 3952957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4052957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4152957fe1SH Hartley Sweeten unsigned int for_background:1; 42c4a77a6cSJens Axboe 438010c3b6SJens Axboe struct list_head list; /* pending work list */ 4483ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 4503ba3782SJens Axboe }; 4603ba3782SJens Axboe 47455b2864SDave Chinner /* 48455b2864SDave Chinner * Include the creation of the trace points after defining the 49455b2864SDave Chinner * wb_writeback_work structure so that the definition remains local to this 50455b2864SDave Chinner * file. 51455b2864SDave Chinner */ 52455b2864SDave Chinner #define CREATE_TRACE_POINTS 53455b2864SDave Chinner #include <trace/events/writeback.h> 54455b2864SDave Chinner 55455b2864SDave Chinner /* 56455b2864SDave Chinner * We don't actually have pdflush, but this one is exported though /proc... 57455b2864SDave Chinner */ 58455b2864SDave Chinner int nr_pdflush_threads; 59455b2864SDave Chinner 60f11b00f3SAdrian Bunk /** 61f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 62f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 63f11b00f3SAdrian Bunk * 6403ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 6503ba3782SJens Axboe * backing device. 66f11b00f3SAdrian Bunk */ 67f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 68f11b00f3SAdrian Bunk { 6981d73a32SJan Kara return test_bit(BDI_writeback_running, &bdi->state); 70f11b00f3SAdrian Bunk } 71f11b00f3SAdrian Bunk 72692ebd17SJan Kara static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 73692ebd17SJan Kara { 74692ebd17SJan Kara struct super_block *sb = inode->i_sb; 75692ebd17SJan Kara 76aaead25bSChristoph Hellwig if (strcmp(sb->s_type->name, "bdev") == 0) 77aaead25bSChristoph Hellwig return inode->i_mapping->backing_dev_info; 78aaead25bSChristoph Hellwig 79692ebd17SJan Kara return sb->s_bdi; 80692ebd17SJan Kara } 81692ebd17SJan Kara 827ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 837ccf19a8SNick Piggin { 847ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 857ccf19a8SNick Piggin } 867ccf19a8SNick Piggin 876585027aSJan Kara /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ 886585027aSJan Kara static void bdi_wakeup_flusher(struct backing_dev_info *bdi) 894195f73dSNick Piggin { 90fff5b85aSArtem Bityutskiy if (bdi->wb.task) { 91fff5b85aSArtem Bityutskiy wake_up_process(bdi->wb.task); 92fff5b85aSArtem Bityutskiy } else { 931da177e4SLinus Torvalds /* 94fff5b85aSArtem Bityutskiy * The bdi thread isn't there, wake up the forker thread which 95fff5b85aSArtem Bityutskiy * will create and run it. 961da177e4SLinus Torvalds */ 9703ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 981da177e4SLinus Torvalds } 996585027aSJan Kara } 1006585027aSJan Kara 1016585027aSJan Kara static void bdi_queue_work(struct backing_dev_info *bdi, 1026585027aSJan Kara struct wb_writeback_work *work) 1036585027aSJan Kara { 1046585027aSJan Kara trace_writeback_queue(bdi, work); 1056585027aSJan Kara 1066585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1076585027aSJan Kara list_add_tail(&work->list, &bdi->work_list); 1086585027aSJan Kara if (!bdi->wb.task) 1096585027aSJan Kara trace_writeback_nothread(bdi, work); 1106585027aSJan Kara bdi_wakeup_flusher(bdi); 1116467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 11203ba3782SJens Axboe } 1131da177e4SLinus Torvalds 11483ba7b07SChristoph Hellwig static void 11583ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1166585027aSJan Kara bool range_cyclic) 1171da177e4SLinus Torvalds { 11883ba7b07SChristoph Hellwig struct wb_writeback_work *work; 11903ba3782SJens Axboe 120bcddc3f0SJens Axboe /* 121bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 122bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 123bcddc3f0SJens Axboe */ 12483ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 12583ba7b07SChristoph Hellwig if (!work) { 126455b2864SDave Chinner if (bdi->wb.task) { 127455b2864SDave Chinner trace_writeback_nowork(bdi); 12883ba7b07SChristoph Hellwig wake_up_process(bdi->wb.task); 129455b2864SDave Chinner } 13083ba7b07SChristoph Hellwig return; 13183ba7b07SChristoph Hellwig } 13283ba7b07SChristoph Hellwig 13383ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 13483ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 13583ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 13683ba7b07SChristoph Hellwig 137f11fcae8SJens Axboe bdi_queue_work(bdi, work); 13803ba3782SJens Axboe } 139b6e51316SJens Axboe 140b6e51316SJens Axboe /** 141b6e51316SJens Axboe * bdi_start_writeback - start writeback 142b6e51316SJens Axboe * @bdi: the backing device to write from 143b6e51316SJens Axboe * @nr_pages: the number of pages to write 144b6e51316SJens Axboe * 145b6e51316SJens Axboe * Description: 146b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 147b6e51316SJens Axboe * started when this function returns, we make no guarentees on 1480e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 149b6e51316SJens Axboe * 150b6e51316SJens Axboe */ 151c5444198SChristoph Hellwig void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 152b6e51316SJens Axboe { 1536585027aSJan Kara __bdi_start_writeback(bdi, nr_pages, true); 154d3ddec76SWu Fengguang } 155d3ddec76SWu Fengguang 156c5444198SChristoph Hellwig /** 157c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 158c5444198SChristoph Hellwig * @bdi: the backing device to write from 159c5444198SChristoph Hellwig * 160c5444198SChristoph Hellwig * Description: 1616585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 1626585027aSJan Kara * this function returns, it is only guaranteed that for given BDI 1636585027aSJan Kara * some IO is happening if we are over background dirty threshold. 1646585027aSJan Kara * Caller need not hold sb s_umount semaphore. 165c5444198SChristoph Hellwig */ 166c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 167c5444198SChristoph Hellwig { 1686585027aSJan Kara /* 1696585027aSJan Kara * We just wake up the flusher thread. It will perform background 1706585027aSJan Kara * writeback as soon as there is no other work to do. 1716585027aSJan Kara */ 1726585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1736585027aSJan Kara bdi_wakeup_flusher(bdi); 1746585027aSJan Kara spin_unlock_bh(&bdi->wb_lock); 1751da177e4SLinus Torvalds } 1761da177e4SLinus Torvalds 1771da177e4SLinus Torvalds /* 1786610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1796610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1806610a0bcSAndrew Morton * 1816610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 18266f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 1836610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 1846610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 1856610a0bcSAndrew Morton */ 1866610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 1876610a0bcSAndrew Morton { 18803ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1896610a0bcSAndrew Morton 19003ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 19166f3b8e2SJens Axboe struct inode *tail; 1926610a0bcSAndrew Morton 1937ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 19466f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 1956610a0bcSAndrew Morton inode->dirtied_when = jiffies; 1966610a0bcSAndrew Morton } 1977ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_dirty); 1986610a0bcSAndrew Morton } 1996610a0bcSAndrew Morton 2006610a0bcSAndrew Morton /* 20166f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 202c986d1e2SAndrew Morton */ 2030e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 204c986d1e2SAndrew Morton { 20503ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 20603ba3782SJens Axboe 2077ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_more_io); 208c986d1e2SAndrew Morton } 209c986d1e2SAndrew Morton 2101c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2111c0eeaf5SJoern Engel { 2121c0eeaf5SJoern Engel /* 2131c0eeaf5SJoern Engel * Prevent speculative execution through spin_unlock(&inode_lock); 2141c0eeaf5SJoern Engel */ 2151c0eeaf5SJoern Engel smp_mb(); 2161c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2171c0eeaf5SJoern Engel } 2181c0eeaf5SJoern Engel 219d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 220d2caa3c5SJeff Layton { 221d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 222d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 223d2caa3c5SJeff Layton /* 224d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 225d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 226d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2275b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 228d2caa3c5SJeff Layton */ 229d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 230d2caa3c5SJeff Layton #endif 231d2caa3c5SJeff Layton return ret; 232d2caa3c5SJeff Layton } 233d2caa3c5SJeff Layton 234c986d1e2SAndrew Morton /* 2352c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 2362c136579SFengguang Wu */ 2372c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 2382c136579SFengguang Wu struct list_head *dispatch_queue, 2392c136579SFengguang Wu unsigned long *older_than_this) 2402c136579SFengguang Wu { 2415c03449dSShaohua Li LIST_HEAD(tmp); 2425c03449dSShaohua Li struct list_head *pos, *node; 243cf137307SJens Axboe struct super_block *sb = NULL; 2445c03449dSShaohua Li struct inode *inode; 245cf137307SJens Axboe int do_sb_sort = 0; 2465c03449dSShaohua Li 2472c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2487ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 2492c136579SFengguang Wu if (older_than_this && 250d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 2512c136579SFengguang Wu break; 252cf137307SJens Axboe if (sb && sb != inode->i_sb) 253cf137307SJens Axboe do_sb_sort = 1; 254cf137307SJens Axboe sb = inode->i_sb; 2557ccf19a8SNick Piggin list_move(&inode->i_wb_list, &tmp); 2565c03449dSShaohua Li } 2575c03449dSShaohua Li 258cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 259cf137307SJens Axboe if (!do_sb_sort) { 260cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 261cf137307SJens Axboe return; 262cf137307SJens Axboe } 263cf137307SJens Axboe 2645c03449dSShaohua Li /* Move inodes from one superblock together */ 2655c03449dSShaohua Li while (!list_empty(&tmp)) { 2667ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 2675c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2687ccf19a8SNick Piggin inode = wb_inode(pos); 2695c03449dSShaohua Li if (inode->i_sb == sb) 2707ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 2712c136579SFengguang Wu } 2722c136579SFengguang Wu } 2735c03449dSShaohua Li } 2742c136579SFengguang Wu 2752c136579SFengguang Wu /* 2762c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 2774ea879b9SWu Fengguang * Before 2784ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2794ea879b9SWu Fengguang * =============> gf edc BA 2804ea879b9SWu Fengguang * After 2814ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2824ea879b9SWu Fengguang * =============> g fBAedc 2834ea879b9SWu Fengguang * | 2844ea879b9SWu Fengguang * +--> dequeue for IO 2852c136579SFengguang Wu */ 28603ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 2872c136579SFengguang Wu { 2884ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 28903ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 29066f3b8e2SJens Axboe } 29166f3b8e2SJens Axboe 292a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 29366f3b8e2SJens Axboe { 29403ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 295a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 29603ba3782SJens Axboe return 0; 29766f3b8e2SJens Axboe } 29808d8e974SFengguang Wu 2992c136579SFengguang Wu /* 30001c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 30101c03194SChristoph Hellwig */ 30201c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 30301c03194SChristoph Hellwig { 30401c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 30501c03194SChristoph Hellwig wait_queue_head_t *wqh; 30601c03194SChristoph Hellwig 30701c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 30858a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 30901c03194SChristoph Hellwig spin_unlock(&inode_lock); 31001c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 31101c03194SChristoph Hellwig spin_lock(&inode_lock); 31258a9d3d8SRichard Kennedy } 31301c03194SChristoph Hellwig } 31401c03194SChristoph Hellwig 31501c03194SChristoph Hellwig /* 31601c03194SChristoph Hellwig * Write out an inode's dirty pages. Called under inode_lock. Either the 31701c03194SChristoph Hellwig * caller has ref on the inode (either via __iget or via syscall against an fd) 31801c03194SChristoph Hellwig * or the inode has I_WILL_FREE set (via generic_forget_inode) 31901c03194SChristoph Hellwig * 3201da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 3211da177e4SLinus Torvalds * 3221da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 3231da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 3241da177e4SLinus Torvalds * livelocks, etc. 3251da177e4SLinus Torvalds * 3261da177e4SLinus Torvalds * Called under inode_lock. 3271da177e4SLinus Torvalds */ 3281da177e4SLinus Torvalds static int 32901c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 3301da177e4SLinus Torvalds { 3311da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 33201c03194SChristoph Hellwig unsigned dirty; 3331da177e4SLinus Torvalds int ret; 3341da177e4SLinus Torvalds 33501c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 33601c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 33701c03194SChristoph Hellwig else 33801c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 33901c03194SChristoph Hellwig 34001c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 34101c03194SChristoph Hellwig /* 34201c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 34366f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 34401c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 34501c03194SChristoph Hellwig * 34601c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 34766f3b8e2SJens Axboe * completed a full scan of b_io. 34801c03194SChristoph Hellwig */ 349a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 35001c03194SChristoph Hellwig requeue_io(inode); 35101c03194SChristoph Hellwig return 0; 35201c03194SChristoph Hellwig } 35301c03194SChristoph Hellwig 35401c03194SChristoph Hellwig /* 35501c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 35601c03194SChristoph Hellwig */ 35701c03194SChristoph Hellwig inode_wait_for_writeback(inode); 35801c03194SChristoph Hellwig } 35901c03194SChristoph Hellwig 3601c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 3611da177e4SLinus Torvalds 3625547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 3631c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 3645547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 3651da177e4SLinus Torvalds spin_unlock(&inode_lock); 3661da177e4SLinus Torvalds 3671da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 3681da177e4SLinus Torvalds 36926821ed4SChristoph Hellwig /* 37026821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 37126821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 37226821ed4SChristoph Hellwig * I/O completion. 37326821ed4SChristoph Hellwig */ 374a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 37526821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 3761da177e4SLinus Torvalds if (ret == 0) 3771da177e4SLinus Torvalds ret = err; 3781da177e4SLinus Torvalds } 3791da177e4SLinus Torvalds 3805547e8aaSDmitry Monakhov /* 3815547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 3825547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 3835547e8aaSDmitry Monakhov * write_inode() 3845547e8aaSDmitry Monakhov */ 3855547e8aaSDmitry Monakhov spin_lock(&inode_lock); 3865547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 3875547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 3885547e8aaSDmitry Monakhov spin_unlock(&inode_lock); 38926821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 39026821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 391a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 3921da177e4SLinus Torvalds if (ret == 0) 3931da177e4SLinus Torvalds ret = err; 3941da177e4SLinus Torvalds } 3951da177e4SLinus Torvalds 3961da177e4SLinus Torvalds spin_lock(&inode_lock); 3971c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 398a4ffdde6SAl Viro if (!(inode->i_state & I_FREEING)) { 39923539afcSWu Fengguang if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4001da177e4SLinus Torvalds /* 4011da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 402a50aeb40SWu Fengguang * sometimes bales out without doing anything. 4031da177e4SLinus Torvalds */ 4041da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4058bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 4068bc3be27SFengguang Wu /* 4078bc3be27SFengguang Wu * slice used up: queue for next turn 4088bc3be27SFengguang Wu */ 4090e0f4fc2SKen Chen requeue_io(inode); 4101da177e4SLinus Torvalds } else { 4111da177e4SLinus Torvalds /* 412a50aeb40SWu Fengguang * Writeback blocked by something other than 413a50aeb40SWu Fengguang * congestion. Delay the inode for some time to 414a50aeb40SWu Fengguang * avoid spinning on the CPU (100% iowait) 415a50aeb40SWu Fengguang * retrying writeback of the dirty page/inode 416a50aeb40SWu Fengguang * that cannot be performed immediately. 4178bc3be27SFengguang Wu */ 4188bc3be27SFengguang Wu redirty_tail(inode); 4198bc3be27SFengguang Wu } 42023539afcSWu Fengguang } else if (inode->i_state & I_DIRTY) { 42123539afcSWu Fengguang /* 42223539afcSWu Fengguang * Filesystems can dirty the inode during writeback 42323539afcSWu Fengguang * operations, such as delayed allocation during 42423539afcSWu Fengguang * submission or metadata updates after data IO 42523539afcSWu Fengguang * completion. 42623539afcSWu Fengguang */ 42723539afcSWu Fengguang redirty_tail(inode); 4281da177e4SLinus Torvalds } else { 4291da177e4SLinus Torvalds /* 4309e38d86fSNick Piggin * The inode is clean. At this point we either have 4319e38d86fSNick Piggin * a reference to the inode or it's on it's way out. 4329e38d86fSNick Piggin * No need to add it back to the LRU. 4331da177e4SLinus Torvalds */ 4347ccf19a8SNick Piggin list_del_init(&inode->i_wb_list); 4351da177e4SLinus Torvalds } 4361da177e4SLinus Torvalds } 4371c0eeaf5SJoern Engel inode_sync_complete(inode); 4381da177e4SLinus Torvalds return ret; 4391da177e4SLinus Torvalds } 4401da177e4SLinus Torvalds 44103ba3782SJens Axboe /* 442d19de7edSChristoph Hellwig * For background writeback the caller does not have the sb pinned 44303ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 44403ba3782SJens Axboe * go away while we are writing inodes from it. 44503ba3782SJens Axboe */ 446d19de7edSChristoph Hellwig static bool pin_sb_for_writeback(struct super_block *sb) 4471da177e4SLinus Torvalds { 44803ba3782SJens Axboe spin_lock(&sb_lock); 44929cb4859SChristoph Hellwig if (list_empty(&sb->s_instances)) { 45003ba3782SJens Axboe spin_unlock(&sb_lock); 45129cb4859SChristoph Hellwig return false; 45203ba3782SJens Axboe } 45329cb4859SChristoph Hellwig 45429cb4859SChristoph Hellwig sb->s_count++; 45529cb4859SChristoph Hellwig spin_unlock(&sb_lock); 45629cb4859SChristoph Hellwig 45729cb4859SChristoph Hellwig if (down_read_trylock(&sb->s_umount)) { 45829cb4859SChristoph Hellwig if (sb->s_root) 45929cb4859SChristoph Hellwig return true; 46003ba3782SJens Axboe up_read(&sb->s_umount); 46103ba3782SJens Axboe } 46229cb4859SChristoph Hellwig 46329cb4859SChristoph Hellwig put_super(sb); 464d19de7edSChristoph Hellwig return false; 46503ba3782SJens Axboe } 46603ba3782SJens Axboe 467f11c9c5cSEdward Shishkin /* 468f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 469edadfb10SChristoph Hellwig * 470edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 471f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 472f11c9c5cSEdward Shishkin * in reverse order. 473edadfb10SChristoph Hellwig * 474f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 475f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 476f11c9c5cSEdward Shishkin */ 477edadfb10SChristoph Hellwig static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 478edadfb10SChristoph Hellwig struct writeback_control *wbc, bool only_this_sb) 47903ba3782SJens Axboe { 48003ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 481f11c9c5cSEdward Shishkin long pages_skipped; 4827ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 483edadfb10SChristoph Hellwig 484edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 485edadfb10SChristoph Hellwig if (only_this_sb) { 486edadfb10SChristoph Hellwig /* 487edadfb10SChristoph Hellwig * We only want to write back data for this 488edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 489edadfb10SChristoph Hellwig * to it back onto the dirty list. 490edadfb10SChristoph Hellwig */ 49166f3b8e2SJens Axboe redirty_tail(inode); 49266f3b8e2SJens Axboe continue; 49366f3b8e2SJens Axboe } 494edadfb10SChristoph Hellwig 495edadfb10SChristoph Hellwig /* 496edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 497edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 498edadfb10SChristoph Hellwig * pin the next superblock. 499edadfb10SChristoph Hellwig */ 500f11c9c5cSEdward Shishkin return 0; 501edadfb10SChristoph Hellwig } 502edadfb10SChristoph Hellwig 5039843b76aSChristoph Hellwig /* 5049843b76aSChristoph Hellwig * Don't bother with new inodes or inodes beeing freed, first 5059843b76aSChristoph Hellwig * kind does not need peridic writeout yet, and for the latter 5069843b76aSChristoph Hellwig * kind writeout is handled by the freer. 5079843b76aSChristoph Hellwig */ 5089843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 5097ef0d737SNick Piggin requeue_io(inode); 5107ef0d737SNick Piggin continue; 5117ef0d737SNick Piggin } 5129843b76aSChristoph Hellwig 513d2caa3c5SJeff Layton /* 514d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 515d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 516d2caa3c5SJeff Layton */ 517f11c9c5cSEdward Shishkin if (inode_dirtied_after(inode, wbc->wb_start)) 518f11c9c5cSEdward Shishkin return 1; 5191da177e4SLinus Torvalds 5201da177e4SLinus Torvalds __iget(inode); 5211da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 52201c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 5231da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 5241da177e4SLinus Torvalds /* 5251da177e4SLinus Torvalds * writeback is not making progress due to locked 5261da177e4SLinus Torvalds * buffers. Skip this inode for now. 5271da177e4SLinus Torvalds */ 528f57b9b7bSAndrew Morton redirty_tail(inode); 5291da177e4SLinus Torvalds } 5301da177e4SLinus Torvalds spin_unlock(&inode_lock); 5311da177e4SLinus Torvalds iput(inode); 5324ffc8444SOGAWA Hirofumi cond_resched(); 5331da177e4SLinus Torvalds spin_lock(&inode_lock); 5348bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 5358bc3be27SFengguang Wu wbc->more_io = 1; 536f11c9c5cSEdward Shishkin return 1; 5371da177e4SLinus Torvalds } 53803ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 5398bc3be27SFengguang Wu wbc->more_io = 1; 5408bc3be27SFengguang Wu } 541f11c9c5cSEdward Shishkin /* b_io is empty */ 542f11c9c5cSEdward Shishkin return 1; 543f11c9c5cSEdward Shishkin } 54438f21977SNick Piggin 5459c3a8ee8SChristoph Hellwig void writeback_inodes_wb(struct bdi_writeback *wb, 546f11c9c5cSEdward Shishkin struct writeback_control *wbc) 547f11c9c5cSEdward Shishkin { 548f11c9c5cSEdward Shishkin int ret = 0; 5499ecc2738SJens Axboe 5507624ee72SJan Kara if (!wbc->wb_start) 551f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 552f11c9c5cSEdward Shishkin spin_lock(&inode_lock); 553f11c9c5cSEdward Shishkin if (!wbc->for_kupdate || list_empty(&wb->b_io)) 554f11c9c5cSEdward Shishkin queue_io(wb, wbc->older_than_this); 555f11c9c5cSEdward Shishkin 556f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 5577ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 558f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 559f11c9c5cSEdward Shishkin 560334132aeSChristoph Hellwig if (!pin_sb_for_writeback(sb)) { 561334132aeSChristoph Hellwig requeue_io(inode); 562d19de7edSChristoph Hellwig continue; 563334132aeSChristoph Hellwig } 564edadfb10SChristoph Hellwig ret = writeback_sb_inodes(sb, wb, wbc, false); 565d19de7edSChristoph Hellwig drop_super(sb); 566f11c9c5cSEdward Shishkin 567f11c9c5cSEdward Shishkin if (ret) 568f11c9c5cSEdward Shishkin break; 569f11c9c5cSEdward Shishkin } 57066f3b8e2SJens Axboe spin_unlock(&inode_lock); 57166f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 57266f3b8e2SJens Axboe } 57366f3b8e2SJens Axboe 574edadfb10SChristoph Hellwig static void __writeback_inodes_sb(struct super_block *sb, 575edadfb10SChristoph Hellwig struct bdi_writeback *wb, struct writeback_control *wbc) 576edadfb10SChristoph Hellwig { 577edadfb10SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 578edadfb10SChristoph Hellwig 579edadfb10SChristoph Hellwig spin_lock(&inode_lock); 580edadfb10SChristoph Hellwig if (!wbc->for_kupdate || list_empty(&wb->b_io)) 581edadfb10SChristoph Hellwig queue_io(wb, wbc->older_than_this); 582edadfb10SChristoph Hellwig writeback_sb_inodes(sb, wb, wbc, true); 583edadfb10SChristoph Hellwig spin_unlock(&inode_lock); 584edadfb10SChristoph Hellwig } 585edadfb10SChristoph Hellwig 58603ba3782SJens Axboe /* 58703ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 58803ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 58903ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 59003ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 59103ba3782SJens Axboe * the dirty each time it has written this many pages. 59203ba3782SJens Axboe */ 59303ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 59403ba3782SJens Axboe 59503ba3782SJens Axboe static inline bool over_bground_thresh(void) 59603ba3782SJens Axboe { 59703ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 59803ba3782SJens Axboe 59916c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 60003ba3782SJens Axboe 60103ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 6024cbec4c8SWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh); 60303ba3782SJens Axboe } 60403ba3782SJens Axboe 60503ba3782SJens Axboe /* 60603ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 60703ba3782SJens Axboe * 60803ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 60903ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 61003ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 61103ba3782SJens Axboe * older than a specific point in time. 61203ba3782SJens Axboe * 61303ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 61403ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 61503ba3782SJens Axboe * one-second gap. 61603ba3782SJens Axboe * 61703ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 61803ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 61903ba3782SJens Axboe */ 620c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 62183ba7b07SChristoph Hellwig struct wb_writeback_work *work) 62203ba3782SJens Axboe { 62303ba3782SJens Axboe struct writeback_control wbc = { 62483ba7b07SChristoph Hellwig .sync_mode = work->sync_mode, 62503ba3782SJens Axboe .older_than_this = NULL, 62683ba7b07SChristoph Hellwig .for_kupdate = work->for_kupdate, 62783ba7b07SChristoph Hellwig .for_background = work->for_background, 62883ba7b07SChristoph Hellwig .range_cyclic = work->range_cyclic, 62903ba3782SJens Axboe }; 63003ba3782SJens Axboe unsigned long oldest_jif; 63103ba3782SJens Axboe long wrote = 0; 632a5989bdcSJan Kara struct inode *inode; 63303ba3782SJens Axboe 63403ba3782SJens Axboe if (wbc.for_kupdate) { 63503ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 63603ba3782SJens Axboe oldest_jif = jiffies - 63703ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 63803ba3782SJens Axboe } 639c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 640c4a77a6cSJens Axboe wbc.range_start = 0; 641c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 642c4a77a6cSJens Axboe } 64303ba3782SJens Axboe 6447624ee72SJan Kara wbc.wb_start = jiffies; /* livelock avoidance */ 64503ba3782SJens Axboe for (;;) { 64603ba3782SJens Axboe /* 647d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 64803ba3782SJens Axboe */ 64983ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 65003ba3782SJens Axboe break; 65103ba3782SJens Axboe 65203ba3782SJens Axboe /* 653d3ddec76SWu Fengguang * For background writeout, stop when we are below the 654d3ddec76SWu Fengguang * background dirty threshold 65503ba3782SJens Axboe */ 65683ba7b07SChristoph Hellwig if (work->for_background && !over_bground_thresh()) 65703ba3782SJens Axboe break; 65803ba3782SJens Axboe 65903ba3782SJens Axboe wbc.more_io = 0; 66003ba3782SJens Axboe wbc.nr_to_write = MAX_WRITEBACK_PAGES; 66103ba3782SJens Axboe wbc.pages_skipped = 0; 662028c2dd1SDave Chinner 663028c2dd1SDave Chinner trace_wbc_writeback_start(&wbc, wb->bdi); 66483ba7b07SChristoph Hellwig if (work->sb) 66583ba7b07SChristoph Hellwig __writeback_inodes_sb(work->sb, wb, &wbc); 666edadfb10SChristoph Hellwig else 66703ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 668028c2dd1SDave Chinner trace_wbc_writeback_written(&wbc, wb->bdi); 669028c2dd1SDave Chinner 67083ba7b07SChristoph Hellwig work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 67103ba3782SJens Axboe wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 67203ba3782SJens Axboe 67303ba3782SJens Axboe /* 67471fd05a8SJens Axboe * If we consumed everything, see if we have more 67503ba3782SJens Axboe */ 67671fd05a8SJens Axboe if (wbc.nr_to_write <= 0) 67771fd05a8SJens Axboe continue; 67871fd05a8SJens Axboe /* 67971fd05a8SJens Axboe * Didn't write everything and we don't have more IO, bail 68071fd05a8SJens Axboe */ 68171fd05a8SJens Axboe if (!wbc.more_io) 68271fd05a8SJens Axboe break; 68371fd05a8SJens Axboe /* 68471fd05a8SJens Axboe * Did we write something? Try for more 68571fd05a8SJens Axboe */ 686a5989bdcSJan Kara if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) 68703ba3782SJens Axboe continue; 688a5989bdcSJan Kara /* 689a5989bdcSJan Kara * Nothing written. Wait for some inode to 690a5989bdcSJan Kara * become available for writeback. Otherwise 691a5989bdcSJan Kara * we'll just busyloop. 692a5989bdcSJan Kara */ 693a5989bdcSJan Kara spin_lock(&inode_lock); 694a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 6957ccf19a8SNick Piggin inode = wb_inode(wb->b_more_io.prev); 696028c2dd1SDave Chinner trace_wbc_writeback_wait(&wbc, wb->bdi); 697a5989bdcSJan Kara inode_wait_for_writeback(inode); 698a5989bdcSJan Kara } 699a5989bdcSJan Kara spin_unlock(&inode_lock); 70003ba3782SJens Axboe } 70103ba3782SJens Axboe 70203ba3782SJens Axboe return wrote; 70303ba3782SJens Axboe } 70403ba3782SJens Axboe 70503ba3782SJens Axboe /* 70683ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 70703ba3782SJens Axboe */ 70883ba7b07SChristoph Hellwig static struct wb_writeback_work * 70908852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 71003ba3782SJens Axboe { 71183ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 71203ba3782SJens Axboe 7136467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 71483ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 71583ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 71683ba7b07SChristoph Hellwig struct wb_writeback_work, list); 71783ba7b07SChristoph Hellwig list_del_init(&work->list); 71803ba3782SJens Axboe } 7196467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 72083ba7b07SChristoph Hellwig return work; 72103ba3782SJens Axboe } 72203ba3782SJens Axboe 723cdf01dd5SLinus Torvalds /* 724cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 725cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 726cdf01dd5SLinus Torvalds */ 727cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 728cdf01dd5SLinus Torvalds { 729cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 730cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 731cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 732cdf01dd5SLinus Torvalds } 733cdf01dd5SLinus Torvalds 7346585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 7356585027aSJan Kara { 7366585027aSJan Kara if (over_bground_thresh()) { 7376585027aSJan Kara 7386585027aSJan Kara struct wb_writeback_work work = { 7396585027aSJan Kara .nr_pages = LONG_MAX, 7406585027aSJan Kara .sync_mode = WB_SYNC_NONE, 7416585027aSJan Kara .for_background = 1, 7426585027aSJan Kara .range_cyclic = 1, 7436585027aSJan Kara }; 7446585027aSJan Kara 7456585027aSJan Kara return wb_writeback(wb, &work); 7466585027aSJan Kara } 7476585027aSJan Kara 7486585027aSJan Kara return 0; 7496585027aSJan Kara } 7506585027aSJan Kara 75103ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 75203ba3782SJens Axboe { 75303ba3782SJens Axboe unsigned long expired; 75403ba3782SJens Axboe long nr_pages; 75503ba3782SJens Axboe 75669b62d01SJens Axboe /* 75769b62d01SJens Axboe * When set to zero, disable periodic writeback 75869b62d01SJens Axboe */ 75969b62d01SJens Axboe if (!dirty_writeback_interval) 76069b62d01SJens Axboe return 0; 76169b62d01SJens Axboe 76203ba3782SJens Axboe expired = wb->last_old_flush + 76303ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 76403ba3782SJens Axboe if (time_before(jiffies, expired)) 76503ba3782SJens Axboe return 0; 76603ba3782SJens Axboe 76703ba3782SJens Axboe wb->last_old_flush = jiffies; 768cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 76903ba3782SJens Axboe 770c4a77a6cSJens Axboe if (nr_pages) { 77183ba7b07SChristoph Hellwig struct wb_writeback_work work = { 772c4a77a6cSJens Axboe .nr_pages = nr_pages, 773c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 774c4a77a6cSJens Axboe .for_kupdate = 1, 775c4a77a6cSJens Axboe .range_cyclic = 1, 776c4a77a6cSJens Axboe }; 777c4a77a6cSJens Axboe 77883ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 779c4a77a6cSJens Axboe } 78003ba3782SJens Axboe 78103ba3782SJens Axboe return 0; 78203ba3782SJens Axboe } 78303ba3782SJens Axboe 78403ba3782SJens Axboe /* 78503ba3782SJens Axboe * Retrieve work items and do the writeback they describe 78603ba3782SJens Axboe */ 78703ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 78803ba3782SJens Axboe { 78903ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 79083ba7b07SChristoph Hellwig struct wb_writeback_work *work; 791c4a77a6cSJens Axboe long wrote = 0; 79203ba3782SJens Axboe 79381d73a32SJan Kara set_bit(BDI_writeback_running, &wb->bdi->state); 79408852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 79503ba3782SJens Axboe /* 79603ba3782SJens Axboe * Override sync mode, in case we must wait for completion 79783ba7b07SChristoph Hellwig * because this thread is exiting now. 79803ba3782SJens Axboe */ 79903ba3782SJens Axboe if (force_wait) 80083ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 80183ba7b07SChristoph Hellwig 802455b2864SDave Chinner trace_writeback_exec(bdi, work); 803455b2864SDave Chinner 80483ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 80503ba3782SJens Axboe 80603ba3782SJens Axboe /* 80783ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 80883ba7b07SChristoph Hellwig * work item, otherwise just free it. 80903ba3782SJens Axboe */ 81083ba7b07SChristoph Hellwig if (work->done) 81183ba7b07SChristoph Hellwig complete(work->done); 81283ba7b07SChristoph Hellwig else 81383ba7b07SChristoph Hellwig kfree(work); 81403ba3782SJens Axboe } 81503ba3782SJens Axboe 81603ba3782SJens Axboe /* 81703ba3782SJens Axboe * Check for periodic writeback, kupdated() style 81803ba3782SJens Axboe */ 81903ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 8206585027aSJan Kara wrote += wb_check_background_flush(wb); 82181d73a32SJan Kara clear_bit(BDI_writeback_running, &wb->bdi->state); 82203ba3782SJens Axboe 82303ba3782SJens Axboe return wrote; 82403ba3782SJens Axboe } 82503ba3782SJens Axboe 82603ba3782SJens Axboe /* 82703ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 82803ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 82903ba3782SJens Axboe */ 83008243900SChristoph Hellwig int bdi_writeback_thread(void *data) 83103ba3782SJens Axboe { 83208243900SChristoph Hellwig struct bdi_writeback *wb = data; 83308243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 83403ba3782SJens Axboe long pages_written; 83503ba3782SJens Axboe 836766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 83708243900SChristoph Hellwig set_freezable(); 838ecd58403SArtem Bityutskiy wb->last_active = jiffies; 83903ba3782SJens Axboe 84003ba3782SJens Axboe /* 84108243900SChristoph Hellwig * Our parent may run at a different priority, just set us to normal 84203ba3782SJens Axboe */ 84308243900SChristoph Hellwig set_user_nice(current, 0); 84408243900SChristoph Hellwig 845455b2864SDave Chinner trace_writeback_thread_start(bdi); 846455b2864SDave Chinner 84703ba3782SJens Axboe while (!kthread_should_stop()) { 8486467716aSArtem Bityutskiy /* 8496467716aSArtem Bityutskiy * Remove own delayed wake-up timer, since we are already awake 8506467716aSArtem Bityutskiy * and we'll take care of the preriodic write-back. 8516467716aSArtem Bityutskiy */ 8526467716aSArtem Bityutskiy del_timer(&wb->wakeup_timer); 8536467716aSArtem Bityutskiy 85403ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 85503ba3782SJens Axboe 856455b2864SDave Chinner trace_writeback_pages_written(pages_written); 857455b2864SDave Chinner 85803ba3782SJens Axboe if (pages_written) 859ecd58403SArtem Bityutskiy wb->last_active = jiffies; 86003ba3782SJens Axboe 861297252c8SArtem Bityutskiy set_current_state(TASK_INTERRUPTIBLE); 862b76b4014SJ. Bruce Fields if (!list_empty(&bdi->work_list) || kthread_should_stop()) { 863297252c8SArtem Bityutskiy __set_current_state(TASK_RUNNING); 864297252c8SArtem Bityutskiy continue; 86503ba3782SJens Axboe } 86603ba3782SJens Axboe 867253c34e9SArtem Bityutskiy if (wb_has_dirty_io(wb) && dirty_writeback_interval) 868fff5b85aSArtem Bityutskiy schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 869253c34e9SArtem Bityutskiy else { 870253c34e9SArtem Bityutskiy /* 871253c34e9SArtem Bityutskiy * We have nothing to do, so can go sleep without any 872253c34e9SArtem Bityutskiy * timeout and save power. When a work is queued or 873253c34e9SArtem Bityutskiy * something is made dirty - we will be woken up. 874253c34e9SArtem Bityutskiy */ 87569b62d01SJens Axboe schedule(); 876f9eadbbdSJens Axboe } 87769b62d01SJens Axboe 87803ba3782SJens Axboe try_to_freeze(); 87903ba3782SJens Axboe } 88003ba3782SJens Axboe 881fff5b85aSArtem Bityutskiy /* Flush any work that raced with us exiting */ 88208243900SChristoph Hellwig if (!list_empty(&bdi->work_list)) 88308243900SChristoph Hellwig wb_do_writeback(wb, 1); 884455b2864SDave Chinner 885455b2864SDave Chinner trace_writeback_thread_stop(bdi); 88603ba3782SJens Axboe return 0; 88703ba3782SJens Axboe } 88803ba3782SJens Axboe 88908243900SChristoph Hellwig 89003ba3782SJens Axboe /* 89103ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 89203ba3782SJens Axboe * the whole world. 89303ba3782SJens Axboe */ 89403ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 89503ba3782SJens Axboe { 896b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 897b8c2f347SChristoph Hellwig 89883ba7b07SChristoph Hellwig if (!nr_pages) { 89983ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 90003ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 901b8c2f347SChristoph Hellwig } 902b8c2f347SChristoph Hellwig 903b8c2f347SChristoph Hellwig rcu_read_lock(); 904b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 905b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 906b8c2f347SChristoph Hellwig continue; 9076585027aSJan Kara __bdi_start_writeback(bdi, nr_pages, false); 908b8c2f347SChristoph Hellwig } 909b8c2f347SChristoph Hellwig rcu_read_unlock(); 91003ba3782SJens Axboe } 91103ba3782SJens Axboe 91203ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 91303ba3782SJens Axboe { 91403ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 91503ba3782SJens Axboe struct dentry *dentry; 91603ba3782SJens Axboe const char *name = "?"; 91703ba3782SJens Axboe 91803ba3782SJens Axboe dentry = d_find_alias(inode); 91903ba3782SJens Axboe if (dentry) { 92003ba3782SJens Axboe spin_lock(&dentry->d_lock); 92103ba3782SJens Axboe name = (const char *) dentry->d_name.name; 92203ba3782SJens Axboe } 92303ba3782SJens Axboe printk(KERN_DEBUG 92403ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 92503ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 92603ba3782SJens Axboe name, inode->i_sb->s_id); 92703ba3782SJens Axboe if (dentry) { 92803ba3782SJens Axboe spin_unlock(&dentry->d_lock); 92903ba3782SJens Axboe dput(dentry); 93003ba3782SJens Axboe } 93103ba3782SJens Axboe } 93203ba3782SJens Axboe } 93303ba3782SJens Axboe 93403ba3782SJens Axboe /** 93503ba3782SJens Axboe * __mark_inode_dirty - internal function 93603ba3782SJens Axboe * @inode: inode to mark 93703ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 93803ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 93903ba3782SJens Axboe * mark_inode_dirty_sync. 94003ba3782SJens Axboe * 94103ba3782SJens Axboe * Put the inode on the super block's dirty list. 94203ba3782SJens Axboe * 94303ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 94403ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 94503ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 94603ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 94703ba3782SJens Axboe * 94803ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 94903ba3782SJens Axboe * them dirty. 95003ba3782SJens Axboe * 95103ba3782SJens Axboe * This function *must* be atomic for the I_DIRTY_PAGES case - 95203ba3782SJens Axboe * set_page_dirty() is called under spinlock in several places. 95303ba3782SJens Axboe * 95403ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 95503ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 95603ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 95703ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 95803ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 95903ba3782SJens Axboe * blockdev inode. 96003ba3782SJens Axboe */ 96103ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 96203ba3782SJens Axboe { 96303ba3782SJens Axboe struct super_block *sb = inode->i_sb; 964253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 965253c34e9SArtem Bityutskiy bool wakeup_bdi = false; 96603ba3782SJens Axboe 96703ba3782SJens Axboe /* 96803ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 96903ba3782SJens Axboe * dirty the inode itself 97003ba3782SJens Axboe */ 97103ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 97203ba3782SJens Axboe if (sb->s_op->dirty_inode) 97303ba3782SJens Axboe sb->s_op->dirty_inode(inode); 97403ba3782SJens Axboe } 97503ba3782SJens Axboe 97603ba3782SJens Axboe /* 97703ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 97803ba3782SJens Axboe * -- mikulas 97903ba3782SJens Axboe */ 98003ba3782SJens Axboe smp_mb(); 98103ba3782SJens Axboe 98203ba3782SJens Axboe /* avoid the locking if we can */ 98303ba3782SJens Axboe if ((inode->i_state & flags) == flags) 98403ba3782SJens Axboe return; 98503ba3782SJens Axboe 98603ba3782SJens Axboe if (unlikely(block_dump)) 98703ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 98803ba3782SJens Axboe 98903ba3782SJens Axboe spin_lock(&inode_lock); 99003ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 99103ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 99203ba3782SJens Axboe 99303ba3782SJens Axboe inode->i_state |= flags; 99403ba3782SJens Axboe 99503ba3782SJens Axboe /* 99603ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 99703ba3782SJens Axboe * The unlocker will place the inode on the appropriate 99803ba3782SJens Axboe * superblock list, based upon its state. 99903ba3782SJens Axboe */ 100003ba3782SJens Axboe if (inode->i_state & I_SYNC) 100103ba3782SJens Axboe goto out; 100203ba3782SJens Axboe 100303ba3782SJens Axboe /* 100403ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 100503ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 100603ba3782SJens Axboe */ 100703ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 10081d3382cbSAl Viro if (inode_unhashed(inode)) 100903ba3782SJens Axboe goto out; 101003ba3782SJens Axboe } 1011a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 101203ba3782SJens Axboe goto out; 101303ba3782SJens Axboe 101403ba3782SJens Axboe /* 101503ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 101603ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 101703ba3782SJens Axboe */ 101803ba3782SJens Axboe if (!was_dirty) { 1019253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1020500b067cSJens Axboe 1021253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 1022253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 1023253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 1024253c34e9SArtem Bityutskiy 1025253c34e9SArtem Bityutskiy /* 1026253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 1027253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 1028253c34e9SArtem Bityutskiy * bdi thread to make sure background 1029253c34e9SArtem Bityutskiy * write-back happens later. 1030253c34e9SArtem Bityutskiy */ 1031253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 1032253c34e9SArtem Bityutskiy wakeup_bdi = true; 1033500b067cSJens Axboe } 103403ba3782SJens Axboe 103503ba3782SJens Axboe inode->dirtied_when = jiffies; 10367ccf19a8SNick Piggin list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 103703ba3782SJens Axboe } 103803ba3782SJens Axboe } 103903ba3782SJens Axboe out: 104003ba3782SJens Axboe spin_unlock(&inode_lock); 1041253c34e9SArtem Bityutskiy 1042253c34e9SArtem Bityutskiy if (wakeup_bdi) 10436467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 104403ba3782SJens Axboe } 104503ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 104603ba3782SJens Axboe 104766f3b8e2SJens Axboe /* 104866f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 104966f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 105066f3b8e2SJens Axboe * 105166f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 105266f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 105366f3b8e2SJens Axboe * 105466f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 105566f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 105666f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 105766f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 105866f3b8e2SJens Axboe * 105966f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 106066f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 106166f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 106266f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 106366f3b8e2SJens Axboe */ 1064b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 106566f3b8e2SJens Axboe { 106638f21977SNick Piggin struct inode *inode, *old_inode = NULL; 106738f21977SNick Piggin 106803ba3782SJens Axboe /* 106903ba3782SJens Axboe * We need to be protected against the filesystem going from 107003ba3782SJens Axboe * r/o to r/w or vice versa. 107103ba3782SJens Axboe */ 1072b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 107303ba3782SJens Axboe 107466f3b8e2SJens Axboe spin_lock(&inode_lock); 107566f3b8e2SJens Axboe 107638f21977SNick Piggin /* 107738f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 107838f21977SNick Piggin * because there may have been pages dirtied before our sync 107938f21977SNick Piggin * call, but which had writeout started before we write it out. 108038f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 108138f21977SNick Piggin * we still have to wait for that writeout. 108238f21977SNick Piggin */ 1083b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 108438f21977SNick Piggin struct address_space *mapping; 108538f21977SNick Piggin 1086a4ffdde6SAl Viro if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) 108738f21977SNick Piggin continue; 108838f21977SNick Piggin mapping = inode->i_mapping; 108938f21977SNick Piggin if (mapping->nrpages == 0) 109038f21977SNick Piggin continue; 109138f21977SNick Piggin __iget(inode); 1092ae8547b0SHans Reiser spin_unlock(&inode_lock); 109338f21977SNick Piggin /* 109438f21977SNick Piggin * We hold a reference to 'inode' so it couldn't have 109538f21977SNick Piggin * been removed from s_inodes list while we dropped the 109638f21977SNick Piggin * inode_lock. We cannot iput the inode now as we can 109738f21977SNick Piggin * be holding the last reference and we cannot iput it 109838f21977SNick Piggin * under inode_lock. So we keep the reference and iput 109938f21977SNick Piggin * it later. 110038f21977SNick Piggin */ 110138f21977SNick Piggin iput(old_inode); 110238f21977SNick Piggin old_inode = inode; 110338f21977SNick Piggin 110438f21977SNick Piggin filemap_fdatawait(mapping); 110538f21977SNick Piggin 110638f21977SNick Piggin cond_resched(); 110738f21977SNick Piggin 110838f21977SNick Piggin spin_lock(&inode_lock); 110938f21977SNick Piggin } 111038f21977SNick Piggin spin_unlock(&inode_lock); 111138f21977SNick Piggin iput(old_inode); 111266f3b8e2SJens Axboe } 11131da177e4SLinus Torvalds 1114d8a8559cSJens Axboe /** 11153259f8beSChris Mason * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1116d8a8559cSJens Axboe * @sb: the superblock 11173259f8beSChris Mason * @nr: the number of pages to write 11181da177e4SLinus Torvalds * 1119d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1120d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 11213259f8beSChris Mason * for IO completion of submitted IO. 11221da177e4SLinus Torvalds */ 11233259f8beSChris Mason void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) 11241da177e4SLinus Torvalds { 112583ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 112683ba7b07SChristoph Hellwig struct wb_writeback_work work = { 11273c4d7165SChristoph Hellwig .sb = sb, 11283c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 112983ba7b07SChristoph Hellwig .done = &done, 11303259f8beSChris Mason .nr_pages = nr, 11313c4d7165SChristoph Hellwig }; 11320e3c9a22SJens Axboe 1133cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 113483ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 113583ba7b07SChristoph Hellwig wait_for_completion(&done); 11361da177e4SLinus Torvalds } 11373259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 11383259f8beSChris Mason 11393259f8beSChris Mason /** 11403259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 11413259f8beSChris Mason * @sb: the superblock 11423259f8beSChris Mason * 11433259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 11443259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 11453259f8beSChris Mason * for IO completion of submitted IO. 11463259f8beSChris Mason */ 11473259f8beSChris Mason void writeback_inodes_sb(struct super_block *sb) 11483259f8beSChris Mason { 1149925d169fSLinus Torvalds return writeback_inodes_sb_nr(sb, get_nr_dirty_pages()); 11503259f8beSChris Mason } 1151d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1152d8a8559cSJens Axboe 1153d8a8559cSJens Axboe /** 115417bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 115517bd55d0SEric Sandeen * @sb: the superblock 115617bd55d0SEric Sandeen * 115717bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 115817bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 115917bd55d0SEric Sandeen */ 116017bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 116117bd55d0SEric Sandeen { 116217bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1163cf37e972SChristoph Hellwig down_read(&sb->s_umount); 116417bd55d0SEric Sandeen writeback_inodes_sb(sb); 1165cf37e972SChristoph Hellwig up_read(&sb->s_umount); 116617bd55d0SEric Sandeen return 1; 116717bd55d0SEric Sandeen } else 116817bd55d0SEric Sandeen return 0; 116917bd55d0SEric Sandeen } 117017bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 117117bd55d0SEric Sandeen 117217bd55d0SEric Sandeen /** 11733259f8beSChris Mason * writeback_inodes_sb_if_idle - start writeback if none underway 11743259f8beSChris Mason * @sb: the superblock 11753259f8beSChris Mason * @nr: the number of pages to write 11763259f8beSChris Mason * 11773259f8beSChris Mason * Invoke writeback_inodes_sb if no writeback is currently underway. 11783259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 11793259f8beSChris Mason */ 11803259f8beSChris Mason int writeback_inodes_sb_nr_if_idle(struct super_block *sb, 11813259f8beSChris Mason unsigned long nr) 11823259f8beSChris Mason { 11833259f8beSChris Mason if (!writeback_in_progress(sb->s_bdi)) { 11843259f8beSChris Mason down_read(&sb->s_umount); 11853259f8beSChris Mason writeback_inodes_sb_nr(sb, nr); 11863259f8beSChris Mason up_read(&sb->s_umount); 11873259f8beSChris Mason return 1; 11883259f8beSChris Mason } else 11893259f8beSChris Mason return 0; 11903259f8beSChris Mason } 11913259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle); 11923259f8beSChris Mason 11933259f8beSChris Mason /** 1194d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1195d8a8559cSJens Axboe * @sb: the superblock 1196d8a8559cSJens Axboe * 1197d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1198d8a8559cSJens Axboe * super_block. The number of pages synced is returned. 1199d8a8559cSJens Axboe */ 1200b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1201d8a8559cSJens Axboe { 120283ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 120383ba7b07SChristoph Hellwig struct wb_writeback_work work = { 12043c4d7165SChristoph Hellwig .sb = sb, 12053c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 12063c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 12073c4d7165SChristoph Hellwig .range_cyclic = 0, 120883ba7b07SChristoph Hellwig .done = &done, 12093c4d7165SChristoph Hellwig }; 12103c4d7165SChristoph Hellwig 1211cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1212cf37e972SChristoph Hellwig 121383ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 121483ba7b07SChristoph Hellwig wait_for_completion(&done); 121583ba7b07SChristoph Hellwig 1216b6e51316SJens Axboe wait_sb_inodes(sb); 1217d8a8559cSJens Axboe } 1218d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 12191da177e4SLinus Torvalds 12201da177e4SLinus Torvalds /** 12211da177e4SLinus Torvalds * write_inode_now - write an inode to disk 12221da177e4SLinus Torvalds * @inode: inode to write to disk 12231da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 12241da177e4SLinus Torvalds * 12257f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 12267f04c26dSAndrea Arcangeli * primarily needed by knfsd. 12277f04c26dSAndrea Arcangeli * 12287f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 12291da177e4SLinus Torvalds */ 12301da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 12311da177e4SLinus Torvalds { 12321da177e4SLinus Torvalds int ret; 12331da177e4SLinus Torvalds struct writeback_control wbc = { 12341da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 123518914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1236111ebb6eSOGAWA Hirofumi .range_start = 0, 1237111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 12381da177e4SLinus Torvalds }; 12391da177e4SLinus Torvalds 12401da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 124149364ce2SAndrew Morton wbc.nr_to_write = 0; 12421da177e4SLinus Torvalds 12431da177e4SLinus Torvalds might_sleep(); 12441da177e4SLinus Torvalds spin_lock(&inode_lock); 124501c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 12461da177e4SLinus Torvalds spin_unlock(&inode_lock); 12471da177e4SLinus Torvalds if (sync) 12481c0eeaf5SJoern Engel inode_sync_wait(inode); 12491da177e4SLinus Torvalds return ret; 12501da177e4SLinus Torvalds } 12511da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 12521da177e4SLinus Torvalds 12531da177e4SLinus Torvalds /** 12541da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 12551da177e4SLinus Torvalds * @inode: the inode to sync 12561da177e4SLinus Torvalds * @wbc: controls the writeback mode 12571da177e4SLinus Torvalds * 12581da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 12591da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 12601da177e4SLinus Torvalds * update inode->i_state. 12611da177e4SLinus Torvalds * 12621da177e4SLinus Torvalds * The caller must have a ref on the inode. 12631da177e4SLinus Torvalds */ 12641da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 12651da177e4SLinus Torvalds { 12661da177e4SLinus Torvalds int ret; 12671da177e4SLinus Torvalds 12681da177e4SLinus Torvalds spin_lock(&inode_lock); 126901c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 12701da177e4SLinus Torvalds spin_unlock(&inode_lock); 12711da177e4SLinus Torvalds return ret; 12721da177e4SLinus Torvalds } 12731da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1274c3765016SChristoph Hellwig 1275c3765016SChristoph Hellwig /** 1276c3765016SChristoph Hellwig * sync_inode - write an inode to disk 1277c3765016SChristoph Hellwig * @inode: the inode to sync 1278c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1279c3765016SChristoph Hellwig * 1280c3765016SChristoph Hellwig * Write an inode to disk and adjust it's dirty state after completion. 1281c3765016SChristoph Hellwig * 1282c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1283c3765016SChristoph Hellwig */ 1284c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1285c3765016SChristoph Hellwig { 1286c3765016SChristoph Hellwig struct writeback_control wbc = { 1287c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1288c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1289c3765016SChristoph Hellwig }; 1290c3765016SChristoph Hellwig 1291c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1292c3765016SChristoph Hellwig } 1293c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1294