11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 29455b2864SDave Chinner #include <linux/tracepoint.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 34c4a77a6cSJens Axboe */ 3583ba7b07SChristoph Hellwig struct wb_writeback_work { 36c4a77a6cSJens Axboe long nr_pages; 37c4a77a6cSJens Axboe struct super_block *sb; 38c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 3952957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4052957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4152957fe1SH Hartley Sweeten unsigned int for_background:1; 42c4a77a6cSJens Axboe 438010c3b6SJens Axboe struct list_head list; /* pending work list */ 4483ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 4503ba3782SJens Axboe }; 4603ba3782SJens Axboe 47455b2864SDave Chinner /* 48455b2864SDave Chinner * Include the creation of the trace points after defining the 49455b2864SDave Chinner * wb_writeback_work structure so that the definition remains local to this 50455b2864SDave Chinner * file. 51455b2864SDave Chinner */ 52455b2864SDave Chinner #define CREATE_TRACE_POINTS 53455b2864SDave Chinner #include <trace/events/writeback.h> 54455b2864SDave Chinner 55455b2864SDave Chinner /* 56455b2864SDave Chinner * We don't actually have pdflush, but this one is exported though /proc... 57455b2864SDave Chinner */ 58455b2864SDave Chinner int nr_pdflush_threads; 59455b2864SDave Chinner 60f11b00f3SAdrian Bunk /** 61f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 62f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 63f11b00f3SAdrian Bunk * 6403ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 6503ba3782SJens Axboe * backing device. 66f11b00f3SAdrian Bunk */ 67f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 68f11b00f3SAdrian Bunk { 6981d73a32SJan Kara return test_bit(BDI_writeback_running, &bdi->state); 70f11b00f3SAdrian Bunk } 71f11b00f3SAdrian Bunk 72692ebd17SJan Kara static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 73692ebd17SJan Kara { 74692ebd17SJan Kara struct super_block *sb = inode->i_sb; 75692ebd17SJan Kara 76aaead25bSChristoph Hellwig if (strcmp(sb->s_type->name, "bdev") == 0) 77aaead25bSChristoph Hellwig return inode->i_mapping->backing_dev_info; 78aaead25bSChristoph Hellwig 79692ebd17SJan Kara return sb->s_bdi; 80692ebd17SJan Kara } 81692ebd17SJan Kara 827ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 837ccf19a8SNick Piggin { 847ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 857ccf19a8SNick Piggin } 867ccf19a8SNick Piggin 876585027aSJan Kara /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ 886585027aSJan Kara static void bdi_wakeup_flusher(struct backing_dev_info *bdi) 894195f73dSNick Piggin { 90fff5b85aSArtem Bityutskiy if (bdi->wb.task) { 91fff5b85aSArtem Bityutskiy wake_up_process(bdi->wb.task); 92fff5b85aSArtem Bityutskiy } else { 931da177e4SLinus Torvalds /* 94fff5b85aSArtem Bityutskiy * The bdi thread isn't there, wake up the forker thread which 95fff5b85aSArtem Bityutskiy * will create and run it. 961da177e4SLinus Torvalds */ 9703ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 981da177e4SLinus Torvalds } 996585027aSJan Kara } 1006585027aSJan Kara 1016585027aSJan Kara static void bdi_queue_work(struct backing_dev_info *bdi, 1026585027aSJan Kara struct wb_writeback_work *work) 1036585027aSJan Kara { 1046585027aSJan Kara trace_writeback_queue(bdi, work); 1056585027aSJan Kara 1066585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1076585027aSJan Kara list_add_tail(&work->list, &bdi->work_list); 1086585027aSJan Kara if (!bdi->wb.task) 1096585027aSJan Kara trace_writeback_nothread(bdi, work); 1106585027aSJan Kara bdi_wakeup_flusher(bdi); 1116467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 11203ba3782SJens Axboe } 1131da177e4SLinus Torvalds 11483ba7b07SChristoph Hellwig static void 11583ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1166585027aSJan Kara bool range_cyclic) 1171da177e4SLinus Torvalds { 11883ba7b07SChristoph Hellwig struct wb_writeback_work *work; 11903ba3782SJens Axboe 120bcddc3f0SJens Axboe /* 121bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 122bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 123bcddc3f0SJens Axboe */ 12483ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 12583ba7b07SChristoph Hellwig if (!work) { 126455b2864SDave Chinner if (bdi->wb.task) { 127455b2864SDave Chinner trace_writeback_nowork(bdi); 12883ba7b07SChristoph Hellwig wake_up_process(bdi->wb.task); 129455b2864SDave Chinner } 13083ba7b07SChristoph Hellwig return; 13183ba7b07SChristoph Hellwig } 13283ba7b07SChristoph Hellwig 13383ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 13483ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 13583ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 13683ba7b07SChristoph Hellwig 137f11fcae8SJens Axboe bdi_queue_work(bdi, work); 13803ba3782SJens Axboe } 139b6e51316SJens Axboe 140b6e51316SJens Axboe /** 141b6e51316SJens Axboe * bdi_start_writeback - start writeback 142b6e51316SJens Axboe * @bdi: the backing device to write from 143b6e51316SJens Axboe * @nr_pages: the number of pages to write 144b6e51316SJens Axboe * 145b6e51316SJens Axboe * Description: 146b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 14725985edcSLucas De Marchi * started when this function returns, we make no guarantees on 1480e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 149b6e51316SJens Axboe * 150b6e51316SJens Axboe */ 151c5444198SChristoph Hellwig void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 152b6e51316SJens Axboe { 1536585027aSJan Kara __bdi_start_writeback(bdi, nr_pages, true); 154d3ddec76SWu Fengguang } 155d3ddec76SWu Fengguang 156c5444198SChristoph Hellwig /** 157c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 158c5444198SChristoph Hellwig * @bdi: the backing device to write from 159c5444198SChristoph Hellwig * 160c5444198SChristoph Hellwig * Description: 1616585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 1626585027aSJan Kara * this function returns, it is only guaranteed that for given BDI 1636585027aSJan Kara * some IO is happening if we are over background dirty threshold. 1646585027aSJan Kara * Caller need not hold sb s_umount semaphore. 165c5444198SChristoph Hellwig */ 166c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 167c5444198SChristoph Hellwig { 1686585027aSJan Kara /* 1696585027aSJan Kara * We just wake up the flusher thread. It will perform background 1706585027aSJan Kara * writeback as soon as there is no other work to do. 1716585027aSJan Kara */ 17271927e84SWu Fengguang trace_writeback_wake_background(bdi); 1736585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1746585027aSJan Kara bdi_wakeup_flusher(bdi); 1756585027aSJan Kara spin_unlock_bh(&bdi->wb_lock); 1761da177e4SLinus Torvalds } 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds /* 179a66979abSDave Chinner * Remove the inode from the writeback list it is on. 180a66979abSDave Chinner */ 181a66979abSDave Chinner void inode_wb_list_del(struct inode *inode) 182a66979abSDave Chinner { 183a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 184a66979abSDave Chinner list_del_init(&inode->i_wb_list); 185a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 186a66979abSDave Chinner } 187a66979abSDave Chinner 188a66979abSDave Chinner 189a66979abSDave Chinner /* 1906610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1916610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1926610a0bcSAndrew Morton * 1936610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 19466f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 1956610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 1966610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 1976610a0bcSAndrew Morton */ 1986610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 1996610a0bcSAndrew Morton { 20003ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 2016610a0bcSAndrew Morton 202a66979abSDave Chinner assert_spin_locked(&inode_wb_list_lock); 20303ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 20466f3b8e2SJens Axboe struct inode *tail; 2056610a0bcSAndrew Morton 2067ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 20766f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2086610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2096610a0bcSAndrew Morton } 2107ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_dirty); 2116610a0bcSAndrew Morton } 2126610a0bcSAndrew Morton 2136610a0bcSAndrew Morton /* 21466f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 215c986d1e2SAndrew Morton */ 2160e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 217c986d1e2SAndrew Morton { 21803ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 21903ba3782SJens Axboe 220a66979abSDave Chinner assert_spin_locked(&inode_wb_list_lock); 2217ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_more_io); 222c986d1e2SAndrew Morton } 223c986d1e2SAndrew Morton 2241c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2251c0eeaf5SJoern Engel { 2261c0eeaf5SJoern Engel /* 227a66979abSDave Chinner * Prevent speculative execution through 228a66979abSDave Chinner * spin_unlock(&inode_wb_list_lock); 2291c0eeaf5SJoern Engel */ 230a66979abSDave Chinner 2311c0eeaf5SJoern Engel smp_mb(); 2321c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2331c0eeaf5SJoern Engel } 2341c0eeaf5SJoern Engel 235d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 236d2caa3c5SJeff Layton { 237d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 238d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 239d2caa3c5SJeff Layton /* 240d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 241d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 242d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2435b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 244d2caa3c5SJeff Layton */ 245d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 246d2caa3c5SJeff Layton #endif 247d2caa3c5SJeff Layton return ret; 248d2caa3c5SJeff Layton } 249d2caa3c5SJeff Layton 250c986d1e2SAndrew Morton /* 2512c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 2522c136579SFengguang Wu */ 2532c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 2542c136579SFengguang Wu struct list_head *dispatch_queue, 2552c136579SFengguang Wu unsigned long *older_than_this) 2562c136579SFengguang Wu { 2575c03449dSShaohua Li LIST_HEAD(tmp); 2585c03449dSShaohua Li struct list_head *pos, *node; 259cf137307SJens Axboe struct super_block *sb = NULL; 2605c03449dSShaohua Li struct inode *inode; 261cf137307SJens Axboe int do_sb_sort = 0; 2625c03449dSShaohua Li 2632c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2647ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 2652c136579SFengguang Wu if (older_than_this && 266d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 2672c136579SFengguang Wu break; 268cf137307SJens Axboe if (sb && sb != inode->i_sb) 269cf137307SJens Axboe do_sb_sort = 1; 270cf137307SJens Axboe sb = inode->i_sb; 2717ccf19a8SNick Piggin list_move(&inode->i_wb_list, &tmp); 2725c03449dSShaohua Li } 2735c03449dSShaohua Li 274cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 275cf137307SJens Axboe if (!do_sb_sort) { 276cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 277cf137307SJens Axboe return; 278cf137307SJens Axboe } 279cf137307SJens Axboe 2805c03449dSShaohua Li /* Move inodes from one superblock together */ 2815c03449dSShaohua Li while (!list_empty(&tmp)) { 2827ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 2835c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2847ccf19a8SNick Piggin inode = wb_inode(pos); 2855c03449dSShaohua Li if (inode->i_sb == sb) 2867ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 2872c136579SFengguang Wu } 2882c136579SFengguang Wu } 2895c03449dSShaohua Li } 2902c136579SFengguang Wu 2912c136579SFengguang Wu /* 2922c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 2934ea879b9SWu Fengguang * Before 2944ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2954ea879b9SWu Fengguang * =============> gf edc BA 2964ea879b9SWu Fengguang * After 2974ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2984ea879b9SWu Fengguang * =============> g fBAedc 2994ea879b9SWu Fengguang * | 3004ea879b9SWu Fengguang * +--> dequeue for IO 3012c136579SFengguang Wu */ 30203ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 3032c136579SFengguang Wu { 304a66979abSDave Chinner assert_spin_locked(&inode_wb_list_lock); 3054ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 30603ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 30766f3b8e2SJens Axboe } 30866f3b8e2SJens Axboe 309a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 31066f3b8e2SJens Axboe { 31103ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 312a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 31303ba3782SJens Axboe return 0; 31466f3b8e2SJens Axboe } 31508d8e974SFengguang Wu 3162c136579SFengguang Wu /* 31701c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 31801c03194SChristoph Hellwig */ 31901c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 32001c03194SChristoph Hellwig { 32101c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 32201c03194SChristoph Hellwig wait_queue_head_t *wqh; 32301c03194SChristoph Hellwig 32401c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 32558a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 326250df6edSDave Chinner spin_unlock(&inode->i_lock); 327a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 32801c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 329a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 330250df6edSDave Chinner spin_lock(&inode->i_lock); 33158a9d3d8SRichard Kennedy } 33201c03194SChristoph Hellwig } 33301c03194SChristoph Hellwig 33401c03194SChristoph Hellwig /* 3350f1b1fd8SDave Chinner * Write out an inode's dirty pages. Called under inode_wb_list_lock and 3360f1b1fd8SDave Chinner * inode->i_lock. Either the caller has an active reference on the inode or 3370f1b1fd8SDave Chinner * the inode has I_WILL_FREE set. 33801c03194SChristoph Hellwig * 3391da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 3401da177e4SLinus Torvalds * 3411da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 3421da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 3431da177e4SLinus Torvalds * livelocks, etc. 3441da177e4SLinus Torvalds */ 3451da177e4SLinus Torvalds static int 34601c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 3471da177e4SLinus Torvalds { 3481da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 34901c03194SChristoph Hellwig unsigned dirty; 3501da177e4SLinus Torvalds int ret; 3511da177e4SLinus Torvalds 3520f1b1fd8SDave Chinner assert_spin_locked(&inode_wb_list_lock); 3530f1b1fd8SDave Chinner assert_spin_locked(&inode->i_lock); 3540f1b1fd8SDave Chinner 35501c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 35601c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 35701c03194SChristoph Hellwig else 35801c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 35901c03194SChristoph Hellwig 36001c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 36101c03194SChristoph Hellwig /* 36201c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 36366f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 36401c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 36501c03194SChristoph Hellwig * 36601c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 36766f3b8e2SJens Axboe * completed a full scan of b_io. 36801c03194SChristoph Hellwig */ 369a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 37001c03194SChristoph Hellwig requeue_io(inode); 37101c03194SChristoph Hellwig return 0; 37201c03194SChristoph Hellwig } 37301c03194SChristoph Hellwig 37401c03194SChristoph Hellwig /* 37501c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 37601c03194SChristoph Hellwig */ 37701c03194SChristoph Hellwig inode_wait_for_writeback(inode); 37801c03194SChristoph Hellwig } 37901c03194SChristoph Hellwig 3801c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 3811da177e4SLinus Torvalds 3825547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 3831c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 3845547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 385250df6edSDave Chinner spin_unlock(&inode->i_lock); 386a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 3891da177e4SLinus Torvalds 39026821ed4SChristoph Hellwig /* 39126821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 39226821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 39326821ed4SChristoph Hellwig * I/O completion. 39426821ed4SChristoph Hellwig */ 395a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 39626821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 3971da177e4SLinus Torvalds if (ret == 0) 3981da177e4SLinus Torvalds ret = err; 3991da177e4SLinus Torvalds } 4001da177e4SLinus Torvalds 4015547e8aaSDmitry Monakhov /* 4025547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 4035547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 4045547e8aaSDmitry Monakhov * write_inode() 4055547e8aaSDmitry Monakhov */ 406250df6edSDave Chinner spin_lock(&inode->i_lock); 4075547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 4085547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 409250df6edSDave Chinner spin_unlock(&inode->i_lock); 41026821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 41126821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 412a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 4131da177e4SLinus Torvalds if (ret == 0) 4141da177e4SLinus Torvalds ret = err; 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 417a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 418250df6edSDave Chinner spin_lock(&inode->i_lock); 4191c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 420a4ffdde6SAl Viro if (!(inode->i_state & I_FREEING)) { 42123539afcSWu Fengguang if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4221da177e4SLinus Torvalds /* 4231da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 424a50aeb40SWu Fengguang * sometimes bales out without doing anything. 4251da177e4SLinus Torvalds */ 4261da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4278bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 4288bc3be27SFengguang Wu /* 4298bc3be27SFengguang Wu * slice used up: queue for next turn 4308bc3be27SFengguang Wu */ 4310e0f4fc2SKen Chen requeue_io(inode); 4321da177e4SLinus Torvalds } else { 4331da177e4SLinus Torvalds /* 434a50aeb40SWu Fengguang * Writeback blocked by something other than 435a50aeb40SWu Fengguang * congestion. Delay the inode for some time to 436a50aeb40SWu Fengguang * avoid spinning on the CPU (100% iowait) 437a50aeb40SWu Fengguang * retrying writeback of the dirty page/inode 438a50aeb40SWu Fengguang * that cannot be performed immediately. 4398bc3be27SFengguang Wu */ 4408bc3be27SFengguang Wu redirty_tail(inode); 4418bc3be27SFengguang Wu } 44223539afcSWu Fengguang } else if (inode->i_state & I_DIRTY) { 44323539afcSWu Fengguang /* 44423539afcSWu Fengguang * Filesystems can dirty the inode during writeback 44523539afcSWu Fengguang * operations, such as delayed allocation during 44623539afcSWu Fengguang * submission or metadata updates after data IO 44723539afcSWu Fengguang * completion. 44823539afcSWu Fengguang */ 44923539afcSWu Fengguang redirty_tail(inode); 4501da177e4SLinus Torvalds } else { 4511da177e4SLinus Torvalds /* 4529e38d86fSNick Piggin * The inode is clean. At this point we either have 4539e38d86fSNick Piggin * a reference to the inode or it's on it's way out. 4549e38d86fSNick Piggin * No need to add it back to the LRU. 4551da177e4SLinus Torvalds */ 4567ccf19a8SNick Piggin list_del_init(&inode->i_wb_list); 4571da177e4SLinus Torvalds } 4581da177e4SLinus Torvalds } 4591c0eeaf5SJoern Engel inode_sync_complete(inode); 4601da177e4SLinus Torvalds return ret; 4611da177e4SLinus Torvalds } 4621da177e4SLinus Torvalds 46303ba3782SJens Axboe /* 464f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 465edadfb10SChristoph Hellwig * 466edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 467f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 468f11c9c5cSEdward Shishkin * in reverse order. 469edadfb10SChristoph Hellwig * 470f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 471f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 472f11c9c5cSEdward Shishkin */ 473edadfb10SChristoph Hellwig static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 474edadfb10SChristoph Hellwig struct writeback_control *wbc, bool only_this_sb) 47503ba3782SJens Axboe { 47603ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 477f11c9c5cSEdward Shishkin long pages_skipped; 4787ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 479edadfb10SChristoph Hellwig 480edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 481edadfb10SChristoph Hellwig if (only_this_sb) { 482edadfb10SChristoph Hellwig /* 483edadfb10SChristoph Hellwig * We only want to write back data for this 484edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 485edadfb10SChristoph Hellwig * to it back onto the dirty list. 486edadfb10SChristoph Hellwig */ 48766f3b8e2SJens Axboe redirty_tail(inode); 48866f3b8e2SJens Axboe continue; 48966f3b8e2SJens Axboe } 490edadfb10SChristoph Hellwig 491edadfb10SChristoph Hellwig /* 492edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 493edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 494edadfb10SChristoph Hellwig * pin the next superblock. 495edadfb10SChristoph Hellwig */ 496f11c9c5cSEdward Shishkin return 0; 497edadfb10SChristoph Hellwig } 498edadfb10SChristoph Hellwig 4999843b76aSChristoph Hellwig /* 5009843b76aSChristoph Hellwig * Don't bother with new inodes or inodes beeing freed, first 5019843b76aSChristoph Hellwig * kind does not need peridic writeout yet, and for the latter 5029843b76aSChristoph Hellwig * kind writeout is handled by the freer. 5039843b76aSChristoph Hellwig */ 504250df6edSDave Chinner spin_lock(&inode->i_lock); 5059843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 506250df6edSDave Chinner spin_unlock(&inode->i_lock); 5077ef0d737SNick Piggin requeue_io(inode); 5087ef0d737SNick Piggin continue; 5097ef0d737SNick Piggin } 5109843b76aSChristoph Hellwig 511d2caa3c5SJeff Layton /* 512d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 513d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 514d2caa3c5SJeff Layton */ 515250df6edSDave Chinner if (inode_dirtied_after(inode, wbc->wb_start)) { 516250df6edSDave Chinner spin_unlock(&inode->i_lock); 517f11c9c5cSEdward Shishkin return 1; 518250df6edSDave Chinner } 5191da177e4SLinus Torvalds 5201da177e4SLinus Torvalds __iget(inode); 521250df6edSDave Chinner 5221da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 52301c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 5241da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 5251da177e4SLinus Torvalds /* 5261da177e4SLinus Torvalds * writeback is not making progress due to locked 5271da177e4SLinus Torvalds * buffers. Skip this inode for now. 5281da177e4SLinus Torvalds */ 529f57b9b7bSAndrew Morton redirty_tail(inode); 5301da177e4SLinus Torvalds } 5310f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 532a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 5331da177e4SLinus Torvalds iput(inode); 5344ffc8444SOGAWA Hirofumi cond_resched(); 535a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 5368bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 5378bc3be27SFengguang Wu wbc->more_io = 1; 538f11c9c5cSEdward Shishkin return 1; 5391da177e4SLinus Torvalds } 54003ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 5418bc3be27SFengguang Wu wbc->more_io = 1; 5428bc3be27SFengguang Wu } 543f11c9c5cSEdward Shishkin /* b_io is empty */ 544f11c9c5cSEdward Shishkin return 1; 545f11c9c5cSEdward Shishkin } 54638f21977SNick Piggin 5479c3a8ee8SChristoph Hellwig void writeback_inodes_wb(struct bdi_writeback *wb, 548f11c9c5cSEdward Shishkin struct writeback_control *wbc) 549f11c9c5cSEdward Shishkin { 550f11c9c5cSEdward Shishkin int ret = 0; 5519ecc2738SJens Axboe 5527624ee72SJan Kara if (!wbc->wb_start) 553f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 554a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 555f11c9c5cSEdward Shishkin if (!wbc->for_kupdate || list_empty(&wb->b_io)) 556f11c9c5cSEdward Shishkin queue_io(wb, wbc->older_than_this); 557f11c9c5cSEdward Shishkin 558f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 5597ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 560f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 561f11c9c5cSEdward Shishkin 56212ad3ab6SDave Chinner if (!grab_super_passive(sb)) { 563334132aeSChristoph Hellwig requeue_io(inode); 564d19de7edSChristoph Hellwig continue; 565334132aeSChristoph Hellwig } 566edadfb10SChristoph Hellwig ret = writeback_sb_inodes(sb, wb, wbc, false); 567d19de7edSChristoph Hellwig drop_super(sb); 568f11c9c5cSEdward Shishkin 569f11c9c5cSEdward Shishkin if (ret) 570f11c9c5cSEdward Shishkin break; 571f11c9c5cSEdward Shishkin } 572a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 57366f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 57466f3b8e2SJens Axboe } 57566f3b8e2SJens Axboe 576edadfb10SChristoph Hellwig static void __writeback_inodes_sb(struct super_block *sb, 577edadfb10SChristoph Hellwig struct bdi_writeback *wb, struct writeback_control *wbc) 578edadfb10SChristoph Hellwig { 579edadfb10SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 580edadfb10SChristoph Hellwig 581a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 582edadfb10SChristoph Hellwig if (!wbc->for_kupdate || list_empty(&wb->b_io)) 583edadfb10SChristoph Hellwig queue_io(wb, wbc->older_than_this); 584edadfb10SChristoph Hellwig writeback_sb_inodes(sb, wb, wbc, true); 585a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 586edadfb10SChristoph Hellwig } 587edadfb10SChristoph Hellwig 58803ba3782SJens Axboe /* 58903ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 59003ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 59103ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 59203ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 59303ba3782SJens Axboe * the dirty each time it has written this many pages. 59403ba3782SJens Axboe */ 59503ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 59603ba3782SJens Axboe 59703ba3782SJens Axboe static inline bool over_bground_thresh(void) 59803ba3782SJens Axboe { 59903ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 60003ba3782SJens Axboe 60116c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 60203ba3782SJens Axboe 60303ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 6044cbec4c8SWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh); 60503ba3782SJens Axboe } 60603ba3782SJens Axboe 60703ba3782SJens Axboe /* 60803ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 60903ba3782SJens Axboe * 61003ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 61103ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 61203ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 61303ba3782SJens Axboe * older than a specific point in time. 61403ba3782SJens Axboe * 61503ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 61603ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 61703ba3782SJens Axboe * one-second gap. 61803ba3782SJens Axboe * 61903ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 62003ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 62103ba3782SJens Axboe */ 622c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 62383ba7b07SChristoph Hellwig struct wb_writeback_work *work) 62403ba3782SJens Axboe { 62503ba3782SJens Axboe struct writeback_control wbc = { 62683ba7b07SChristoph Hellwig .sync_mode = work->sync_mode, 62703ba3782SJens Axboe .older_than_this = NULL, 62883ba7b07SChristoph Hellwig .for_kupdate = work->for_kupdate, 62983ba7b07SChristoph Hellwig .for_background = work->for_background, 63083ba7b07SChristoph Hellwig .range_cyclic = work->range_cyclic, 63103ba3782SJens Axboe }; 63203ba3782SJens Axboe unsigned long oldest_jif; 63303ba3782SJens Axboe long wrote = 0; 634b9543dacSJan Kara long write_chunk; 635a5989bdcSJan Kara struct inode *inode; 63603ba3782SJens Axboe 63703ba3782SJens Axboe if (wbc.for_kupdate) { 63803ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 63903ba3782SJens Axboe oldest_jif = jiffies - 64003ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 64103ba3782SJens Axboe } 642c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 643c4a77a6cSJens Axboe wbc.range_start = 0; 644c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 645c4a77a6cSJens Axboe } 64603ba3782SJens Axboe 647b9543dacSJan Kara /* 648b9543dacSJan Kara * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 649b9543dacSJan Kara * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 650b9543dacSJan Kara * here avoids calling into writeback_inodes_wb() more than once. 651b9543dacSJan Kara * 652b9543dacSJan Kara * The intended call sequence for WB_SYNC_ALL writeback is: 653b9543dacSJan Kara * 654b9543dacSJan Kara * wb_writeback() 655b9543dacSJan Kara * __writeback_inodes_sb() <== called only once 656b9543dacSJan Kara * write_cache_pages() <== called once for each inode 657b9543dacSJan Kara * (quickly) tag currently dirty pages 658b9543dacSJan Kara * (maybe slowly) sync all tagged pages 659b9543dacSJan Kara */ 660b9543dacSJan Kara if (wbc.sync_mode == WB_SYNC_NONE) 661b9543dacSJan Kara write_chunk = MAX_WRITEBACK_PAGES; 662b9543dacSJan Kara else 663b9543dacSJan Kara write_chunk = LONG_MAX; 664b9543dacSJan Kara 6657624ee72SJan Kara wbc.wb_start = jiffies; /* livelock avoidance */ 66603ba3782SJens Axboe for (;;) { 66703ba3782SJens Axboe /* 668d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 66903ba3782SJens Axboe */ 67083ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 67103ba3782SJens Axboe break; 67203ba3782SJens Axboe 67303ba3782SJens Axboe /* 674aa373cf5SJan Kara * Background writeout and kupdate-style writeback may 675aa373cf5SJan Kara * run forever. Stop them if there is other work to do 676aa373cf5SJan Kara * so that e.g. sync can proceed. They'll be restarted 677aa373cf5SJan Kara * after the other works are all done. 678aa373cf5SJan Kara */ 679aa373cf5SJan Kara if ((work->for_background || work->for_kupdate) && 680aa373cf5SJan Kara !list_empty(&wb->bdi->work_list)) 681aa373cf5SJan Kara break; 682aa373cf5SJan Kara 683aa373cf5SJan Kara /* 684d3ddec76SWu Fengguang * For background writeout, stop when we are below the 685d3ddec76SWu Fengguang * background dirty threshold 68603ba3782SJens Axboe */ 68783ba7b07SChristoph Hellwig if (work->for_background && !over_bground_thresh()) 68803ba3782SJens Axboe break; 68903ba3782SJens Axboe 69003ba3782SJens Axboe wbc.more_io = 0; 691b9543dacSJan Kara wbc.nr_to_write = write_chunk; 69203ba3782SJens Axboe wbc.pages_skipped = 0; 693028c2dd1SDave Chinner 694028c2dd1SDave Chinner trace_wbc_writeback_start(&wbc, wb->bdi); 69583ba7b07SChristoph Hellwig if (work->sb) 69683ba7b07SChristoph Hellwig __writeback_inodes_sb(work->sb, wb, &wbc); 697edadfb10SChristoph Hellwig else 69803ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 699028c2dd1SDave Chinner trace_wbc_writeback_written(&wbc, wb->bdi); 700028c2dd1SDave Chinner 701b9543dacSJan Kara work->nr_pages -= write_chunk - wbc.nr_to_write; 702b9543dacSJan Kara wrote += write_chunk - wbc.nr_to_write; 70303ba3782SJens Axboe 70403ba3782SJens Axboe /* 70571fd05a8SJens Axboe * If we consumed everything, see if we have more 70603ba3782SJens Axboe */ 70771fd05a8SJens Axboe if (wbc.nr_to_write <= 0) 70871fd05a8SJens Axboe continue; 70971fd05a8SJens Axboe /* 71071fd05a8SJens Axboe * Didn't write everything and we don't have more IO, bail 71171fd05a8SJens Axboe */ 71271fd05a8SJens Axboe if (!wbc.more_io) 71371fd05a8SJens Axboe break; 71471fd05a8SJens Axboe /* 71571fd05a8SJens Axboe * Did we write something? Try for more 71671fd05a8SJens Axboe */ 717b9543dacSJan Kara if (wbc.nr_to_write < write_chunk) 71803ba3782SJens Axboe continue; 719a5989bdcSJan Kara /* 720a5989bdcSJan Kara * Nothing written. Wait for some inode to 721a5989bdcSJan Kara * become available for writeback. Otherwise 722a5989bdcSJan Kara * we'll just busyloop. 723a5989bdcSJan Kara */ 724a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 725a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 7267ccf19a8SNick Piggin inode = wb_inode(wb->b_more_io.prev); 727028c2dd1SDave Chinner trace_wbc_writeback_wait(&wbc, wb->bdi); 728250df6edSDave Chinner spin_lock(&inode->i_lock); 729a5989bdcSJan Kara inode_wait_for_writeback(inode); 730250df6edSDave Chinner spin_unlock(&inode->i_lock); 731a5989bdcSJan Kara } 732a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 73303ba3782SJens Axboe } 73403ba3782SJens Axboe 73503ba3782SJens Axboe return wrote; 73603ba3782SJens Axboe } 73703ba3782SJens Axboe 73803ba3782SJens Axboe /* 73983ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 74003ba3782SJens Axboe */ 74183ba7b07SChristoph Hellwig static struct wb_writeback_work * 74208852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 74303ba3782SJens Axboe { 74483ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 74503ba3782SJens Axboe 7466467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 74783ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 74883ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 74983ba7b07SChristoph Hellwig struct wb_writeback_work, list); 75083ba7b07SChristoph Hellwig list_del_init(&work->list); 75103ba3782SJens Axboe } 7526467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 75383ba7b07SChristoph Hellwig return work; 75403ba3782SJens Axboe } 75503ba3782SJens Axboe 756cdf01dd5SLinus Torvalds /* 757cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 758cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 759cdf01dd5SLinus Torvalds */ 760cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 761cdf01dd5SLinus Torvalds { 762cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 763cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 764cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 765cdf01dd5SLinus Torvalds } 766cdf01dd5SLinus Torvalds 7676585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 7686585027aSJan Kara { 7696585027aSJan Kara if (over_bground_thresh()) { 7706585027aSJan Kara 7716585027aSJan Kara struct wb_writeback_work work = { 7726585027aSJan Kara .nr_pages = LONG_MAX, 7736585027aSJan Kara .sync_mode = WB_SYNC_NONE, 7746585027aSJan Kara .for_background = 1, 7756585027aSJan Kara .range_cyclic = 1, 7766585027aSJan Kara }; 7776585027aSJan Kara 7786585027aSJan Kara return wb_writeback(wb, &work); 7796585027aSJan Kara } 7806585027aSJan Kara 7816585027aSJan Kara return 0; 7826585027aSJan Kara } 7836585027aSJan Kara 78403ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 78503ba3782SJens Axboe { 78603ba3782SJens Axboe unsigned long expired; 78703ba3782SJens Axboe long nr_pages; 78803ba3782SJens Axboe 78969b62d01SJens Axboe /* 79069b62d01SJens Axboe * When set to zero, disable periodic writeback 79169b62d01SJens Axboe */ 79269b62d01SJens Axboe if (!dirty_writeback_interval) 79369b62d01SJens Axboe return 0; 79469b62d01SJens Axboe 79503ba3782SJens Axboe expired = wb->last_old_flush + 79603ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 79703ba3782SJens Axboe if (time_before(jiffies, expired)) 79803ba3782SJens Axboe return 0; 79903ba3782SJens Axboe 80003ba3782SJens Axboe wb->last_old_flush = jiffies; 801cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 80203ba3782SJens Axboe 803c4a77a6cSJens Axboe if (nr_pages) { 80483ba7b07SChristoph Hellwig struct wb_writeback_work work = { 805c4a77a6cSJens Axboe .nr_pages = nr_pages, 806c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 807c4a77a6cSJens Axboe .for_kupdate = 1, 808c4a77a6cSJens Axboe .range_cyclic = 1, 809c4a77a6cSJens Axboe }; 810c4a77a6cSJens Axboe 81183ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 812c4a77a6cSJens Axboe } 81303ba3782SJens Axboe 81403ba3782SJens Axboe return 0; 81503ba3782SJens Axboe } 81603ba3782SJens Axboe 81703ba3782SJens Axboe /* 81803ba3782SJens Axboe * Retrieve work items and do the writeback they describe 81903ba3782SJens Axboe */ 82003ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 82103ba3782SJens Axboe { 82203ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 82383ba7b07SChristoph Hellwig struct wb_writeback_work *work; 824c4a77a6cSJens Axboe long wrote = 0; 82503ba3782SJens Axboe 82681d73a32SJan Kara set_bit(BDI_writeback_running, &wb->bdi->state); 82708852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 82803ba3782SJens Axboe /* 82903ba3782SJens Axboe * Override sync mode, in case we must wait for completion 83083ba7b07SChristoph Hellwig * because this thread is exiting now. 83103ba3782SJens Axboe */ 83203ba3782SJens Axboe if (force_wait) 83383ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 83483ba7b07SChristoph Hellwig 835455b2864SDave Chinner trace_writeback_exec(bdi, work); 836455b2864SDave Chinner 83783ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 83803ba3782SJens Axboe 83903ba3782SJens Axboe /* 84083ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 84183ba7b07SChristoph Hellwig * work item, otherwise just free it. 84203ba3782SJens Axboe */ 84383ba7b07SChristoph Hellwig if (work->done) 84483ba7b07SChristoph Hellwig complete(work->done); 84583ba7b07SChristoph Hellwig else 84683ba7b07SChristoph Hellwig kfree(work); 84703ba3782SJens Axboe } 84803ba3782SJens Axboe 84903ba3782SJens Axboe /* 85003ba3782SJens Axboe * Check for periodic writeback, kupdated() style 85103ba3782SJens Axboe */ 85203ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 8536585027aSJan Kara wrote += wb_check_background_flush(wb); 85481d73a32SJan Kara clear_bit(BDI_writeback_running, &wb->bdi->state); 85503ba3782SJens Axboe 85603ba3782SJens Axboe return wrote; 85703ba3782SJens Axboe } 85803ba3782SJens Axboe 85903ba3782SJens Axboe /* 86003ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 86103ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 86203ba3782SJens Axboe */ 86308243900SChristoph Hellwig int bdi_writeback_thread(void *data) 86403ba3782SJens Axboe { 86508243900SChristoph Hellwig struct bdi_writeback *wb = data; 86608243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 86703ba3782SJens Axboe long pages_written; 86803ba3782SJens Axboe 869766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 87008243900SChristoph Hellwig set_freezable(); 871ecd58403SArtem Bityutskiy wb->last_active = jiffies; 87203ba3782SJens Axboe 87303ba3782SJens Axboe /* 87408243900SChristoph Hellwig * Our parent may run at a different priority, just set us to normal 87503ba3782SJens Axboe */ 87608243900SChristoph Hellwig set_user_nice(current, 0); 87708243900SChristoph Hellwig 878455b2864SDave Chinner trace_writeback_thread_start(bdi); 879455b2864SDave Chinner 88003ba3782SJens Axboe while (!kthread_should_stop()) { 8816467716aSArtem Bityutskiy /* 8826467716aSArtem Bityutskiy * Remove own delayed wake-up timer, since we are already awake 8836467716aSArtem Bityutskiy * and we'll take care of the preriodic write-back. 8846467716aSArtem Bityutskiy */ 8856467716aSArtem Bityutskiy del_timer(&wb->wakeup_timer); 8866467716aSArtem Bityutskiy 88703ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 88803ba3782SJens Axboe 889455b2864SDave Chinner trace_writeback_pages_written(pages_written); 890455b2864SDave Chinner 89103ba3782SJens Axboe if (pages_written) 892ecd58403SArtem Bityutskiy wb->last_active = jiffies; 89303ba3782SJens Axboe 894297252c8SArtem Bityutskiy set_current_state(TASK_INTERRUPTIBLE); 895b76b4014SJ. Bruce Fields if (!list_empty(&bdi->work_list) || kthread_should_stop()) { 896297252c8SArtem Bityutskiy __set_current_state(TASK_RUNNING); 897297252c8SArtem Bityutskiy continue; 89803ba3782SJens Axboe } 89903ba3782SJens Axboe 900253c34e9SArtem Bityutskiy if (wb_has_dirty_io(wb) && dirty_writeback_interval) 901fff5b85aSArtem Bityutskiy schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 902253c34e9SArtem Bityutskiy else { 903253c34e9SArtem Bityutskiy /* 904253c34e9SArtem Bityutskiy * We have nothing to do, so can go sleep without any 905253c34e9SArtem Bityutskiy * timeout and save power. When a work is queued or 906253c34e9SArtem Bityutskiy * something is made dirty - we will be woken up. 907253c34e9SArtem Bityutskiy */ 90869b62d01SJens Axboe schedule(); 909f9eadbbdSJens Axboe } 91069b62d01SJens Axboe 91103ba3782SJens Axboe try_to_freeze(); 91203ba3782SJens Axboe } 91303ba3782SJens Axboe 914fff5b85aSArtem Bityutskiy /* Flush any work that raced with us exiting */ 91508243900SChristoph Hellwig if (!list_empty(&bdi->work_list)) 91608243900SChristoph Hellwig wb_do_writeback(wb, 1); 917455b2864SDave Chinner 918455b2864SDave Chinner trace_writeback_thread_stop(bdi); 91903ba3782SJens Axboe return 0; 92003ba3782SJens Axboe } 92103ba3782SJens Axboe 92208243900SChristoph Hellwig 92303ba3782SJens Axboe /* 92403ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 92503ba3782SJens Axboe * the whole world. 92603ba3782SJens Axboe */ 92703ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 92803ba3782SJens Axboe { 929b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 930b8c2f347SChristoph Hellwig 93183ba7b07SChristoph Hellwig if (!nr_pages) { 93283ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 93303ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 934b8c2f347SChristoph Hellwig } 935b8c2f347SChristoph Hellwig 936b8c2f347SChristoph Hellwig rcu_read_lock(); 937b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 938b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 939b8c2f347SChristoph Hellwig continue; 9406585027aSJan Kara __bdi_start_writeback(bdi, nr_pages, false); 941b8c2f347SChristoph Hellwig } 942b8c2f347SChristoph Hellwig rcu_read_unlock(); 94303ba3782SJens Axboe } 94403ba3782SJens Axboe 94503ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 94603ba3782SJens Axboe { 94703ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 94803ba3782SJens Axboe struct dentry *dentry; 94903ba3782SJens Axboe const char *name = "?"; 95003ba3782SJens Axboe 95103ba3782SJens Axboe dentry = d_find_alias(inode); 95203ba3782SJens Axboe if (dentry) { 95303ba3782SJens Axboe spin_lock(&dentry->d_lock); 95403ba3782SJens Axboe name = (const char *) dentry->d_name.name; 95503ba3782SJens Axboe } 95603ba3782SJens Axboe printk(KERN_DEBUG 95703ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 95803ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 95903ba3782SJens Axboe name, inode->i_sb->s_id); 96003ba3782SJens Axboe if (dentry) { 96103ba3782SJens Axboe spin_unlock(&dentry->d_lock); 96203ba3782SJens Axboe dput(dentry); 96303ba3782SJens Axboe } 96403ba3782SJens Axboe } 96503ba3782SJens Axboe } 96603ba3782SJens Axboe 96703ba3782SJens Axboe /** 96803ba3782SJens Axboe * __mark_inode_dirty - internal function 96903ba3782SJens Axboe * @inode: inode to mark 97003ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 97103ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 97203ba3782SJens Axboe * mark_inode_dirty_sync. 97303ba3782SJens Axboe * 97403ba3782SJens Axboe * Put the inode on the super block's dirty list. 97503ba3782SJens Axboe * 97603ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 97703ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 97803ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 97903ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 98003ba3782SJens Axboe * 98103ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 98203ba3782SJens Axboe * them dirty. 98303ba3782SJens Axboe * 98403ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 98503ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 98603ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 98703ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 98803ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 98903ba3782SJens Axboe * blockdev inode. 99003ba3782SJens Axboe */ 99103ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 99203ba3782SJens Axboe { 99303ba3782SJens Axboe struct super_block *sb = inode->i_sb; 994253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 99503ba3782SJens Axboe 99603ba3782SJens Axboe /* 99703ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 99803ba3782SJens Axboe * dirty the inode itself 99903ba3782SJens Axboe */ 100003ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 100103ba3782SJens Axboe if (sb->s_op->dirty_inode) 1002aa385729SChristoph Hellwig sb->s_op->dirty_inode(inode, flags); 100303ba3782SJens Axboe } 100403ba3782SJens Axboe 100503ba3782SJens Axboe /* 100603ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 100703ba3782SJens Axboe * -- mikulas 100803ba3782SJens Axboe */ 100903ba3782SJens Axboe smp_mb(); 101003ba3782SJens Axboe 101103ba3782SJens Axboe /* avoid the locking if we can */ 101203ba3782SJens Axboe if ((inode->i_state & flags) == flags) 101303ba3782SJens Axboe return; 101403ba3782SJens Axboe 101503ba3782SJens Axboe if (unlikely(block_dump)) 101603ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 101703ba3782SJens Axboe 1018250df6edSDave Chinner spin_lock(&inode->i_lock); 101903ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 102003ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 102103ba3782SJens Axboe 102203ba3782SJens Axboe inode->i_state |= flags; 102303ba3782SJens Axboe 102403ba3782SJens Axboe /* 102503ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 102603ba3782SJens Axboe * The unlocker will place the inode on the appropriate 102703ba3782SJens Axboe * superblock list, based upon its state. 102803ba3782SJens Axboe */ 102903ba3782SJens Axboe if (inode->i_state & I_SYNC) 1030250df6edSDave Chinner goto out_unlock_inode; 103103ba3782SJens Axboe 103203ba3782SJens Axboe /* 103303ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 103403ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 103503ba3782SJens Axboe */ 103603ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 10371d3382cbSAl Viro if (inode_unhashed(inode)) 1038250df6edSDave Chinner goto out_unlock_inode; 103903ba3782SJens Axboe } 1040a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 1041250df6edSDave Chinner goto out_unlock_inode; 104203ba3782SJens Axboe 104303ba3782SJens Axboe /* 104403ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 104503ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 104603ba3782SJens Axboe */ 104703ba3782SJens Axboe if (!was_dirty) { 1048a66979abSDave Chinner bool wakeup_bdi = false; 1049253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1050500b067cSJens Axboe 1051253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 1052253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 1053253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 1054253c34e9SArtem Bityutskiy 1055253c34e9SArtem Bityutskiy /* 1056253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 1057253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 1058253c34e9SArtem Bityutskiy * bdi thread to make sure background 1059253c34e9SArtem Bityutskiy * write-back happens later. 1060253c34e9SArtem Bityutskiy */ 1061253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 1062253c34e9SArtem Bityutskiy wakeup_bdi = true; 1063500b067cSJens Axboe } 106403ba3782SJens Axboe 1065a66979abSDave Chinner spin_unlock(&inode->i_lock); 1066a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 106703ba3782SJens Axboe inode->dirtied_when = jiffies; 10687ccf19a8SNick Piggin list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1069a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 1070253c34e9SArtem Bityutskiy 1071253c34e9SArtem Bityutskiy if (wakeup_bdi) 10726467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 1073a66979abSDave Chinner return; 1074a66979abSDave Chinner } 1075a66979abSDave Chinner } 1076a66979abSDave Chinner out_unlock_inode: 1077a66979abSDave Chinner spin_unlock(&inode->i_lock); 1078a66979abSDave Chinner 107903ba3782SJens Axboe } 108003ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 108103ba3782SJens Axboe 108266f3b8e2SJens Axboe /* 108366f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 108466f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 108566f3b8e2SJens Axboe * 108666f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 108766f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 108866f3b8e2SJens Axboe * 108966f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 109066f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 109166f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 109266f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 109366f3b8e2SJens Axboe * 109466f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 109566f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 109666f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 109766f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 109866f3b8e2SJens Axboe */ 1099b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 110066f3b8e2SJens Axboe { 110138f21977SNick Piggin struct inode *inode, *old_inode = NULL; 110238f21977SNick Piggin 110303ba3782SJens Axboe /* 110403ba3782SJens Axboe * We need to be protected against the filesystem going from 110503ba3782SJens Axboe * r/o to r/w or vice versa. 110603ba3782SJens Axboe */ 1107b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 110803ba3782SJens Axboe 110955fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 111066f3b8e2SJens Axboe 111138f21977SNick Piggin /* 111238f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 111338f21977SNick Piggin * because there may have been pages dirtied before our sync 111438f21977SNick Piggin * call, but which had writeout started before we write it out. 111538f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 111638f21977SNick Piggin * we still have to wait for that writeout. 111738f21977SNick Piggin */ 1118b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1119250df6edSDave Chinner struct address_space *mapping = inode->i_mapping; 112038f21977SNick Piggin 1121250df6edSDave Chinner spin_lock(&inode->i_lock); 1122250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1123250df6edSDave Chinner (mapping->nrpages == 0)) { 1124250df6edSDave Chinner spin_unlock(&inode->i_lock); 112538f21977SNick Piggin continue; 1126250df6edSDave Chinner } 112738f21977SNick Piggin __iget(inode); 1128250df6edSDave Chinner spin_unlock(&inode->i_lock); 112955fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 113055fa6091SDave Chinner 113138f21977SNick Piggin /* 113255fa6091SDave Chinner * We hold a reference to 'inode' so it couldn't have been 113355fa6091SDave Chinner * removed from s_inodes list while we dropped the 113455fa6091SDave Chinner * inode_sb_list_lock. We cannot iput the inode now as we can 113555fa6091SDave Chinner * be holding the last reference and we cannot iput it under 113655fa6091SDave Chinner * inode_sb_list_lock. So we keep the reference and iput it 113755fa6091SDave Chinner * later. 113838f21977SNick Piggin */ 113938f21977SNick Piggin iput(old_inode); 114038f21977SNick Piggin old_inode = inode; 114138f21977SNick Piggin 114238f21977SNick Piggin filemap_fdatawait(mapping); 114338f21977SNick Piggin 114438f21977SNick Piggin cond_resched(); 114538f21977SNick Piggin 114655fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 114738f21977SNick Piggin } 114855fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 114938f21977SNick Piggin iput(old_inode); 115066f3b8e2SJens Axboe } 11511da177e4SLinus Torvalds 1152d8a8559cSJens Axboe /** 11533259f8beSChris Mason * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1154d8a8559cSJens Axboe * @sb: the superblock 11553259f8beSChris Mason * @nr: the number of pages to write 11561da177e4SLinus Torvalds * 1157d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1158d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 11593259f8beSChris Mason * for IO completion of submitted IO. 11601da177e4SLinus Torvalds */ 11613259f8beSChris Mason void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) 11621da177e4SLinus Torvalds { 116383ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 116483ba7b07SChristoph Hellwig struct wb_writeback_work work = { 11653c4d7165SChristoph Hellwig .sb = sb, 11663c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 116783ba7b07SChristoph Hellwig .done = &done, 11683259f8beSChris Mason .nr_pages = nr, 11693c4d7165SChristoph Hellwig }; 11700e3c9a22SJens Axboe 1171cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 117283ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 117383ba7b07SChristoph Hellwig wait_for_completion(&done); 11741da177e4SLinus Torvalds } 11753259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 11763259f8beSChris Mason 11773259f8beSChris Mason /** 11783259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 11793259f8beSChris Mason * @sb: the superblock 11803259f8beSChris Mason * 11813259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 11823259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 11833259f8beSChris Mason * for IO completion of submitted IO. 11843259f8beSChris Mason */ 11853259f8beSChris Mason void writeback_inodes_sb(struct super_block *sb) 11863259f8beSChris Mason { 1187925d169fSLinus Torvalds return writeback_inodes_sb_nr(sb, get_nr_dirty_pages()); 11883259f8beSChris Mason } 1189d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1190d8a8559cSJens Axboe 1191d8a8559cSJens Axboe /** 119217bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 119317bd55d0SEric Sandeen * @sb: the superblock 119417bd55d0SEric Sandeen * 119517bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 119617bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 119717bd55d0SEric Sandeen */ 119817bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 119917bd55d0SEric Sandeen { 120017bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1201cf37e972SChristoph Hellwig down_read(&sb->s_umount); 120217bd55d0SEric Sandeen writeback_inodes_sb(sb); 1203cf37e972SChristoph Hellwig up_read(&sb->s_umount); 120417bd55d0SEric Sandeen return 1; 120517bd55d0SEric Sandeen } else 120617bd55d0SEric Sandeen return 0; 120717bd55d0SEric Sandeen } 120817bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 120917bd55d0SEric Sandeen 121017bd55d0SEric Sandeen /** 12113259f8beSChris Mason * writeback_inodes_sb_if_idle - start writeback if none underway 12123259f8beSChris Mason * @sb: the superblock 12133259f8beSChris Mason * @nr: the number of pages to write 12143259f8beSChris Mason * 12153259f8beSChris Mason * Invoke writeback_inodes_sb if no writeback is currently underway. 12163259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 12173259f8beSChris Mason */ 12183259f8beSChris Mason int writeback_inodes_sb_nr_if_idle(struct super_block *sb, 12193259f8beSChris Mason unsigned long nr) 12203259f8beSChris Mason { 12213259f8beSChris Mason if (!writeback_in_progress(sb->s_bdi)) { 12223259f8beSChris Mason down_read(&sb->s_umount); 12233259f8beSChris Mason writeback_inodes_sb_nr(sb, nr); 12243259f8beSChris Mason up_read(&sb->s_umount); 12253259f8beSChris Mason return 1; 12263259f8beSChris Mason } else 12273259f8beSChris Mason return 0; 12283259f8beSChris Mason } 12293259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle); 12303259f8beSChris Mason 12313259f8beSChris Mason /** 1232d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1233d8a8559cSJens Axboe * @sb: the superblock 1234d8a8559cSJens Axboe * 1235d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1236cb9ef8d5SStefan Hajnoczi * super_block. 1237d8a8559cSJens Axboe */ 1238b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1239d8a8559cSJens Axboe { 124083ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 124183ba7b07SChristoph Hellwig struct wb_writeback_work work = { 12423c4d7165SChristoph Hellwig .sb = sb, 12433c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 12443c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 12453c4d7165SChristoph Hellwig .range_cyclic = 0, 124683ba7b07SChristoph Hellwig .done = &done, 12473c4d7165SChristoph Hellwig }; 12483c4d7165SChristoph Hellwig 1249cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1250cf37e972SChristoph Hellwig 125183ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 125283ba7b07SChristoph Hellwig wait_for_completion(&done); 125383ba7b07SChristoph Hellwig 1254b6e51316SJens Axboe wait_sb_inodes(sb); 1255d8a8559cSJens Axboe } 1256d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds /** 12591da177e4SLinus Torvalds * write_inode_now - write an inode to disk 12601da177e4SLinus Torvalds * @inode: inode to write to disk 12611da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 12621da177e4SLinus Torvalds * 12637f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 12647f04c26dSAndrea Arcangeli * primarily needed by knfsd. 12657f04c26dSAndrea Arcangeli * 12667f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 12671da177e4SLinus Torvalds */ 12681da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 12691da177e4SLinus Torvalds { 12701da177e4SLinus Torvalds int ret; 12711da177e4SLinus Torvalds struct writeback_control wbc = { 12721da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 127318914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1274111ebb6eSOGAWA Hirofumi .range_start = 0, 1275111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 12761da177e4SLinus Torvalds }; 12771da177e4SLinus Torvalds 12781da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 127949364ce2SAndrew Morton wbc.nr_to_write = 0; 12801da177e4SLinus Torvalds 12811da177e4SLinus Torvalds might_sleep(); 1282a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 12830f1b1fd8SDave Chinner spin_lock(&inode->i_lock); 128401c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 12850f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 1286a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 12871da177e4SLinus Torvalds if (sync) 12881c0eeaf5SJoern Engel inode_sync_wait(inode); 12891da177e4SLinus Torvalds return ret; 12901da177e4SLinus Torvalds } 12911da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 12921da177e4SLinus Torvalds 12931da177e4SLinus Torvalds /** 12941da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 12951da177e4SLinus Torvalds * @inode: the inode to sync 12961da177e4SLinus Torvalds * @wbc: controls the writeback mode 12971da177e4SLinus Torvalds * 12981da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 12991da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 13001da177e4SLinus Torvalds * update inode->i_state. 13011da177e4SLinus Torvalds * 13021da177e4SLinus Torvalds * The caller must have a ref on the inode. 13031da177e4SLinus Torvalds */ 13041da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 13051da177e4SLinus Torvalds { 13061da177e4SLinus Torvalds int ret; 13071da177e4SLinus Torvalds 1308a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 13090f1b1fd8SDave Chinner spin_lock(&inode->i_lock); 131001c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 13110f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 1312a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 13131da177e4SLinus Torvalds return ret; 13141da177e4SLinus Torvalds } 13151da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1316c3765016SChristoph Hellwig 1317c3765016SChristoph Hellwig /** 1318c691b9d9SAndrew Morton * sync_inode_metadata - write an inode to disk 1319c3765016SChristoph Hellwig * @inode: the inode to sync 1320c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1321c3765016SChristoph Hellwig * 1322c691b9d9SAndrew Morton * Write an inode to disk and adjust its dirty state after completion. 1323c3765016SChristoph Hellwig * 1324c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1325c3765016SChristoph Hellwig */ 1326c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1327c3765016SChristoph Hellwig { 1328c3765016SChristoph Hellwig struct writeback_control wbc = { 1329c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1330c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1331c3765016SChristoph Hellwig }; 1332c3765016SChristoph Hellwig 1333c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1334c3765016SChristoph Hellwig } 1335c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1336