11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/fs-writeback.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Contains all the functions related to writing back and waiting 71da177e4SLinus Torvalds * upon dirty inodes against superblocks, and writing back dirty 81da177e4SLinus Torvalds * pages against inodes. ie: data writeback. Writeout of the 91da177e4SLinus Torvalds * inode itself is not handled here. 101da177e4SLinus Torvalds * 11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton 121da177e4SLinus Torvalds * Split out of fs/inode.c 131da177e4SLinus Torvalds * Additions for address_space-based writeback 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/kernel.h> 17f5ff8422SJens Axboe #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/sched.h> 211da177e4SLinus Torvalds #include <linux/fs.h> 221da177e4SLinus Torvalds #include <linux/mm.h> 2303ba3782SJens Axboe #include <linux/kthread.h> 2403ba3782SJens Axboe #include <linux/freezer.h> 251da177e4SLinus Torvalds #include <linux/writeback.h> 261da177e4SLinus Torvalds #include <linux/blkdev.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/buffer_head.h> 29455b2864SDave Chinner #include <linux/tracepoint.h> 3007f3f05cSDavid Howells #include "internal.h" 311da177e4SLinus Torvalds 32d0bceac7SJens Axboe /* 33c4a77a6cSJens Axboe * Passed into wb_writeback(), essentially a subset of writeback_control 34c4a77a6cSJens Axboe */ 3583ba7b07SChristoph Hellwig struct wb_writeback_work { 36c4a77a6cSJens Axboe long nr_pages; 37c4a77a6cSJens Axboe struct super_block *sb; 38c4a77a6cSJens Axboe enum writeback_sync_modes sync_mode; 3952957fe1SH Hartley Sweeten unsigned int for_kupdate:1; 4052957fe1SH Hartley Sweeten unsigned int range_cyclic:1; 4152957fe1SH Hartley Sweeten unsigned int for_background:1; 42c4a77a6cSJens Axboe 438010c3b6SJens Axboe struct list_head list; /* pending work list */ 4483ba7b07SChristoph Hellwig struct completion *done; /* set if the caller waits */ 4503ba3782SJens Axboe }; 4603ba3782SJens Axboe 47455b2864SDave Chinner /* 48455b2864SDave Chinner * Include the creation of the trace points after defining the 49455b2864SDave Chinner * wb_writeback_work structure so that the definition remains local to this 50455b2864SDave Chinner * file. 51455b2864SDave Chinner */ 52455b2864SDave Chinner #define CREATE_TRACE_POINTS 53455b2864SDave Chinner #include <trace/events/writeback.h> 54455b2864SDave Chinner 55455b2864SDave Chinner /* 56455b2864SDave Chinner * We don't actually have pdflush, but this one is exported though /proc... 57455b2864SDave Chinner */ 58455b2864SDave Chinner int nr_pdflush_threads; 59455b2864SDave Chinner 60f11b00f3SAdrian Bunk /** 61f11b00f3SAdrian Bunk * writeback_in_progress - determine whether there is writeback in progress 62f11b00f3SAdrian Bunk * @bdi: the device's backing_dev_info structure. 63f11b00f3SAdrian Bunk * 6403ba3782SJens Axboe * Determine whether there is writeback waiting to be handled against a 6503ba3782SJens Axboe * backing device. 66f11b00f3SAdrian Bunk */ 67f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi) 68f11b00f3SAdrian Bunk { 6981d73a32SJan Kara return test_bit(BDI_writeback_running, &bdi->state); 70f11b00f3SAdrian Bunk } 71f11b00f3SAdrian Bunk 72692ebd17SJan Kara static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 73692ebd17SJan Kara { 74692ebd17SJan Kara struct super_block *sb = inode->i_sb; 75692ebd17SJan Kara 76aaead25bSChristoph Hellwig if (strcmp(sb->s_type->name, "bdev") == 0) 77aaead25bSChristoph Hellwig return inode->i_mapping->backing_dev_info; 78aaead25bSChristoph Hellwig 79692ebd17SJan Kara return sb->s_bdi; 80692ebd17SJan Kara } 81692ebd17SJan Kara 827ccf19a8SNick Piggin static inline struct inode *wb_inode(struct list_head *head) 837ccf19a8SNick Piggin { 847ccf19a8SNick Piggin return list_entry(head, struct inode, i_wb_list); 857ccf19a8SNick Piggin } 867ccf19a8SNick Piggin 876585027aSJan Kara /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ 886585027aSJan Kara static void bdi_wakeup_flusher(struct backing_dev_info *bdi) 894195f73dSNick Piggin { 90fff5b85aSArtem Bityutskiy if (bdi->wb.task) { 91fff5b85aSArtem Bityutskiy wake_up_process(bdi->wb.task); 92fff5b85aSArtem Bityutskiy } else { 931da177e4SLinus Torvalds /* 94fff5b85aSArtem Bityutskiy * The bdi thread isn't there, wake up the forker thread which 95fff5b85aSArtem Bityutskiy * will create and run it. 961da177e4SLinus Torvalds */ 9703ba3782SJens Axboe wake_up_process(default_backing_dev_info.wb.task); 981da177e4SLinus Torvalds } 996585027aSJan Kara } 1006585027aSJan Kara 1016585027aSJan Kara static void bdi_queue_work(struct backing_dev_info *bdi, 1026585027aSJan Kara struct wb_writeback_work *work) 1036585027aSJan Kara { 1046585027aSJan Kara trace_writeback_queue(bdi, work); 1056585027aSJan Kara 1066585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1076585027aSJan Kara list_add_tail(&work->list, &bdi->work_list); 1086585027aSJan Kara if (!bdi->wb.task) 1096585027aSJan Kara trace_writeback_nothread(bdi, work); 1106585027aSJan Kara bdi_wakeup_flusher(bdi); 1116467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 11203ba3782SJens Axboe } 1131da177e4SLinus Torvalds 11483ba7b07SChristoph Hellwig static void 11583ba7b07SChristoph Hellwig __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 1166585027aSJan Kara bool range_cyclic) 1171da177e4SLinus Torvalds { 11883ba7b07SChristoph Hellwig struct wb_writeback_work *work; 11903ba3782SJens Axboe 120bcddc3f0SJens Axboe /* 121bcddc3f0SJens Axboe * This is WB_SYNC_NONE writeback, so if allocation fails just 122bcddc3f0SJens Axboe * wakeup the thread for old dirty data writeback 123bcddc3f0SJens Axboe */ 12483ba7b07SChristoph Hellwig work = kzalloc(sizeof(*work), GFP_ATOMIC); 12583ba7b07SChristoph Hellwig if (!work) { 126455b2864SDave Chinner if (bdi->wb.task) { 127455b2864SDave Chinner trace_writeback_nowork(bdi); 12883ba7b07SChristoph Hellwig wake_up_process(bdi->wb.task); 129455b2864SDave Chinner } 13083ba7b07SChristoph Hellwig return; 13183ba7b07SChristoph Hellwig } 13283ba7b07SChristoph Hellwig 13383ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_NONE; 13483ba7b07SChristoph Hellwig work->nr_pages = nr_pages; 13583ba7b07SChristoph Hellwig work->range_cyclic = range_cyclic; 13683ba7b07SChristoph Hellwig 137f11fcae8SJens Axboe bdi_queue_work(bdi, work); 13803ba3782SJens Axboe } 139b6e51316SJens Axboe 140b6e51316SJens Axboe /** 141b6e51316SJens Axboe * bdi_start_writeback - start writeback 142b6e51316SJens Axboe * @bdi: the backing device to write from 143b6e51316SJens Axboe * @nr_pages: the number of pages to write 144b6e51316SJens Axboe * 145b6e51316SJens Axboe * Description: 146b6e51316SJens Axboe * This does WB_SYNC_NONE opportunistic writeback. The IO is only 14725985edcSLucas De Marchi * started when this function returns, we make no guarantees on 1480e3c9a22SJens Axboe * completion. Caller need not hold sb s_umount semaphore. 149b6e51316SJens Axboe * 150b6e51316SJens Axboe */ 151c5444198SChristoph Hellwig void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 152b6e51316SJens Axboe { 1536585027aSJan Kara __bdi_start_writeback(bdi, nr_pages, true); 154d3ddec76SWu Fengguang } 155d3ddec76SWu Fengguang 156c5444198SChristoph Hellwig /** 157c5444198SChristoph Hellwig * bdi_start_background_writeback - start background writeback 158c5444198SChristoph Hellwig * @bdi: the backing device to write from 159c5444198SChristoph Hellwig * 160c5444198SChristoph Hellwig * Description: 1616585027aSJan Kara * This makes sure WB_SYNC_NONE background writeback happens. When 1626585027aSJan Kara * this function returns, it is only guaranteed that for given BDI 1636585027aSJan Kara * some IO is happening if we are over background dirty threshold. 1646585027aSJan Kara * Caller need not hold sb s_umount semaphore. 165c5444198SChristoph Hellwig */ 166c5444198SChristoph Hellwig void bdi_start_background_writeback(struct backing_dev_info *bdi) 167c5444198SChristoph Hellwig { 1686585027aSJan Kara /* 1696585027aSJan Kara * We just wake up the flusher thread. It will perform background 1706585027aSJan Kara * writeback as soon as there is no other work to do. 1716585027aSJan Kara */ 17271927e84SWu Fengguang trace_writeback_wake_background(bdi); 1736585027aSJan Kara spin_lock_bh(&bdi->wb_lock); 1746585027aSJan Kara bdi_wakeup_flusher(bdi); 1756585027aSJan Kara spin_unlock_bh(&bdi->wb_lock); 1761da177e4SLinus Torvalds } 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds /* 179a66979abSDave Chinner * Remove the inode from the writeback list it is on. 180a66979abSDave Chinner */ 181a66979abSDave Chinner void inode_wb_list_del(struct inode *inode) 182a66979abSDave Chinner { 183a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 184a66979abSDave Chinner list_del_init(&inode->i_wb_list); 185a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 186a66979abSDave Chinner } 187a66979abSDave Chinner 188a66979abSDave Chinner 189a66979abSDave Chinner /* 1906610a0bcSAndrew Morton * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1916610a0bcSAndrew Morton * furthest end of its superblock's dirty-inode list. 1926610a0bcSAndrew Morton * 1936610a0bcSAndrew Morton * Before stamping the inode's ->dirtied_when, we check to see whether it is 19466f3b8e2SJens Axboe * already the most-recently-dirtied inode on the b_dirty list. If that is 1956610a0bcSAndrew Morton * the case then the inode must have been redirtied while it was being written 1966610a0bcSAndrew Morton * out and we don't reset its dirtied_when. 1976610a0bcSAndrew Morton */ 1986610a0bcSAndrew Morton static void redirty_tail(struct inode *inode) 1996610a0bcSAndrew Morton { 20003ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 2016610a0bcSAndrew Morton 202a66979abSDave Chinner assert_spin_locked(&inode_wb_list_lock); 20303ba3782SJens Axboe if (!list_empty(&wb->b_dirty)) { 20466f3b8e2SJens Axboe struct inode *tail; 2056610a0bcSAndrew Morton 2067ccf19a8SNick Piggin tail = wb_inode(wb->b_dirty.next); 20766f3b8e2SJens Axboe if (time_before(inode->dirtied_when, tail->dirtied_when)) 2086610a0bcSAndrew Morton inode->dirtied_when = jiffies; 2096610a0bcSAndrew Morton } 2107ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_dirty); 2116610a0bcSAndrew Morton } 2126610a0bcSAndrew Morton 2136610a0bcSAndrew Morton /* 21466f3b8e2SJens Axboe * requeue inode for re-scanning after bdi->b_io list is exhausted. 215c986d1e2SAndrew Morton */ 2160e0f4fc2SKen Chen static void requeue_io(struct inode *inode) 217c986d1e2SAndrew Morton { 21803ba3782SJens Axboe struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 21903ba3782SJens Axboe 220a66979abSDave Chinner assert_spin_locked(&inode_wb_list_lock); 2217ccf19a8SNick Piggin list_move(&inode->i_wb_list, &wb->b_more_io); 222c986d1e2SAndrew Morton } 223c986d1e2SAndrew Morton 2241c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode) 2251c0eeaf5SJoern Engel { 2261c0eeaf5SJoern Engel /* 227a66979abSDave Chinner * Prevent speculative execution through 228a66979abSDave Chinner * spin_unlock(&inode_wb_list_lock); 2291c0eeaf5SJoern Engel */ 230a66979abSDave Chinner 2311c0eeaf5SJoern Engel smp_mb(); 2321c0eeaf5SJoern Engel wake_up_bit(&inode->i_state, __I_SYNC); 2331c0eeaf5SJoern Engel } 2341c0eeaf5SJoern Engel 235d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t) 236d2caa3c5SJeff Layton { 237d2caa3c5SJeff Layton bool ret = time_after(inode->dirtied_when, t); 238d2caa3c5SJeff Layton #ifndef CONFIG_64BIT 239d2caa3c5SJeff Layton /* 240d2caa3c5SJeff Layton * For inodes being constantly redirtied, dirtied_when can get stuck. 241d2caa3c5SJeff Layton * It _appears_ to be in the future, but is actually in distant past. 242d2caa3c5SJeff Layton * This test is necessary to prevent such wrapped-around relative times 2435b0830cbSJens Axboe * from permanently stopping the whole bdi writeback. 244d2caa3c5SJeff Layton */ 245d2caa3c5SJeff Layton ret = ret && time_before_eq(inode->dirtied_when, jiffies); 246d2caa3c5SJeff Layton #endif 247d2caa3c5SJeff Layton return ret; 248d2caa3c5SJeff Layton } 249d2caa3c5SJeff Layton 250c986d1e2SAndrew Morton /* 2512c136579SFengguang Wu * Move expired dirty inodes from @delaying_queue to @dispatch_queue. 2522c136579SFengguang Wu */ 2532c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue, 2542c136579SFengguang Wu struct list_head *dispatch_queue, 2552c136579SFengguang Wu unsigned long *older_than_this) 2562c136579SFengguang Wu { 2575c03449dSShaohua Li LIST_HEAD(tmp); 2585c03449dSShaohua Li struct list_head *pos, *node; 259cf137307SJens Axboe struct super_block *sb = NULL; 2605c03449dSShaohua Li struct inode *inode; 261cf137307SJens Axboe int do_sb_sort = 0; 2625c03449dSShaohua Li 2632c136579SFengguang Wu while (!list_empty(delaying_queue)) { 2647ccf19a8SNick Piggin inode = wb_inode(delaying_queue->prev); 2652c136579SFengguang Wu if (older_than_this && 266d2caa3c5SJeff Layton inode_dirtied_after(inode, *older_than_this)) 2672c136579SFengguang Wu break; 268cf137307SJens Axboe if (sb && sb != inode->i_sb) 269cf137307SJens Axboe do_sb_sort = 1; 270cf137307SJens Axboe sb = inode->i_sb; 2717ccf19a8SNick Piggin list_move(&inode->i_wb_list, &tmp); 2725c03449dSShaohua Li } 2735c03449dSShaohua Li 274cf137307SJens Axboe /* just one sb in list, splice to dispatch_queue and we're done */ 275cf137307SJens Axboe if (!do_sb_sort) { 276cf137307SJens Axboe list_splice(&tmp, dispatch_queue); 277cf137307SJens Axboe return; 278cf137307SJens Axboe } 279cf137307SJens Axboe 2805c03449dSShaohua Li /* Move inodes from one superblock together */ 2815c03449dSShaohua Li while (!list_empty(&tmp)) { 2827ccf19a8SNick Piggin sb = wb_inode(tmp.prev)->i_sb; 2835c03449dSShaohua Li list_for_each_prev_safe(pos, node, &tmp) { 2847ccf19a8SNick Piggin inode = wb_inode(pos); 2855c03449dSShaohua Li if (inode->i_sb == sb) 2867ccf19a8SNick Piggin list_move(&inode->i_wb_list, dispatch_queue); 2872c136579SFengguang Wu } 2882c136579SFengguang Wu } 2895c03449dSShaohua Li } 2902c136579SFengguang Wu 2912c136579SFengguang Wu /* 2922c136579SFengguang Wu * Queue all expired dirty inodes for io, eldest first. 2934ea879b9SWu Fengguang * Before 2944ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2954ea879b9SWu Fengguang * =============> gf edc BA 2964ea879b9SWu Fengguang * After 2974ea879b9SWu Fengguang * newly dirtied b_dirty b_io b_more_io 2984ea879b9SWu Fengguang * =============> g fBAedc 2994ea879b9SWu Fengguang * | 3004ea879b9SWu Fengguang * +--> dequeue for IO 3012c136579SFengguang Wu */ 30203ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 3032c136579SFengguang Wu { 304a66979abSDave Chinner assert_spin_locked(&inode_wb_list_lock); 3054ea879b9SWu Fengguang list_splice_init(&wb->b_more_io, &wb->b_io); 30603ba3782SJens Axboe move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 30766f3b8e2SJens Axboe } 30866f3b8e2SJens Axboe 309a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc) 31066f3b8e2SJens Axboe { 31103ba3782SJens Axboe if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 312a9185b41SChristoph Hellwig return inode->i_sb->s_op->write_inode(inode, wbc); 31303ba3782SJens Axboe return 0; 31466f3b8e2SJens Axboe } 31508d8e974SFengguang Wu 3162c136579SFengguang Wu /* 31701c03194SChristoph Hellwig * Wait for writeback on an inode to complete. 31801c03194SChristoph Hellwig */ 31901c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode) 32001c03194SChristoph Hellwig { 32101c03194SChristoph Hellwig DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 32201c03194SChristoph Hellwig wait_queue_head_t *wqh; 32301c03194SChristoph Hellwig 32401c03194SChristoph Hellwig wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 32558a9d3d8SRichard Kennedy while (inode->i_state & I_SYNC) { 326250df6edSDave Chinner spin_unlock(&inode->i_lock); 327a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 32801c03194SChristoph Hellwig __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 329a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 330250df6edSDave Chinner spin_lock(&inode->i_lock); 33158a9d3d8SRichard Kennedy } 33201c03194SChristoph Hellwig } 33301c03194SChristoph Hellwig 33401c03194SChristoph Hellwig /* 3350f1b1fd8SDave Chinner * Write out an inode's dirty pages. Called under inode_wb_list_lock and 3360f1b1fd8SDave Chinner * inode->i_lock. Either the caller has an active reference on the inode or 3370f1b1fd8SDave Chinner * the inode has I_WILL_FREE set. 33801c03194SChristoph Hellwig * 3391da177e4SLinus Torvalds * If `wait' is set, wait on the writeout. 3401da177e4SLinus Torvalds * 3411da177e4SLinus Torvalds * The whole writeout design is quite complex and fragile. We want to avoid 3421da177e4SLinus Torvalds * starvation of particular inodes when others are being redirtied, prevent 3431da177e4SLinus Torvalds * livelocks, etc. 3441da177e4SLinus Torvalds */ 3451da177e4SLinus Torvalds static int 34601c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 3471da177e4SLinus Torvalds { 3481da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 34901c03194SChristoph Hellwig unsigned dirty; 3501da177e4SLinus Torvalds int ret; 3511da177e4SLinus Torvalds 3520f1b1fd8SDave Chinner assert_spin_locked(&inode_wb_list_lock); 3530f1b1fd8SDave Chinner assert_spin_locked(&inode->i_lock); 3540f1b1fd8SDave Chinner 35501c03194SChristoph Hellwig if (!atomic_read(&inode->i_count)) 35601c03194SChristoph Hellwig WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 35701c03194SChristoph Hellwig else 35801c03194SChristoph Hellwig WARN_ON(inode->i_state & I_WILL_FREE); 35901c03194SChristoph Hellwig 36001c03194SChristoph Hellwig if (inode->i_state & I_SYNC) { 36101c03194SChristoph Hellwig /* 36201c03194SChristoph Hellwig * If this inode is locked for writeback and we are not doing 36366f3b8e2SJens Axboe * writeback-for-data-integrity, move it to b_more_io so that 36401c03194SChristoph Hellwig * writeback can proceed with the other inodes on s_io. 36501c03194SChristoph Hellwig * 36601c03194SChristoph Hellwig * We'll have another go at writing back this inode when we 36766f3b8e2SJens Axboe * completed a full scan of b_io. 36801c03194SChristoph Hellwig */ 369a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) { 37001c03194SChristoph Hellwig requeue_io(inode); 37101c03194SChristoph Hellwig return 0; 37201c03194SChristoph Hellwig } 37301c03194SChristoph Hellwig 37401c03194SChristoph Hellwig /* 37501c03194SChristoph Hellwig * It's a data-integrity sync. We must wait. 37601c03194SChristoph Hellwig */ 37701c03194SChristoph Hellwig inode_wait_for_writeback(inode); 37801c03194SChristoph Hellwig } 37901c03194SChristoph Hellwig 3801c0eeaf5SJoern Engel BUG_ON(inode->i_state & I_SYNC); 3811da177e4SLinus Torvalds 3825547e8aaSDmitry Monakhov /* Set I_SYNC, reset I_DIRTY_PAGES */ 3831c0eeaf5SJoern Engel inode->i_state |= I_SYNC; 3845547e8aaSDmitry Monakhov inode->i_state &= ~I_DIRTY_PAGES; 385250df6edSDave Chinner spin_unlock(&inode->i_lock); 386a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds ret = do_writepages(mapping, wbc); 3891da177e4SLinus Torvalds 39026821ed4SChristoph Hellwig /* 39126821ed4SChristoph Hellwig * Make sure to wait on the data before writing out the metadata. 39226821ed4SChristoph Hellwig * This is important for filesystems that modify metadata on data 39326821ed4SChristoph Hellwig * I/O completion. 39426821ed4SChristoph Hellwig */ 395a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) { 39626821ed4SChristoph Hellwig int err = filemap_fdatawait(mapping); 3971da177e4SLinus Torvalds if (ret == 0) 3981da177e4SLinus Torvalds ret = err; 3991da177e4SLinus Torvalds } 4001da177e4SLinus Torvalds 4015547e8aaSDmitry Monakhov /* 4025547e8aaSDmitry Monakhov * Some filesystems may redirty the inode during the writeback 4035547e8aaSDmitry Monakhov * due to delalloc, clear dirty metadata flags right before 4045547e8aaSDmitry Monakhov * write_inode() 4055547e8aaSDmitry Monakhov */ 406250df6edSDave Chinner spin_lock(&inode->i_lock); 4075547e8aaSDmitry Monakhov dirty = inode->i_state & I_DIRTY; 4085547e8aaSDmitry Monakhov inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 409250df6edSDave Chinner spin_unlock(&inode->i_lock); 41026821ed4SChristoph Hellwig /* Don't write the inode if only I_DIRTY_PAGES was set */ 41126821ed4SChristoph Hellwig if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 412a9185b41SChristoph Hellwig int err = write_inode(inode, wbc); 4131da177e4SLinus Torvalds if (ret == 0) 4141da177e4SLinus Torvalds ret = err; 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 417a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 418250df6edSDave Chinner spin_lock(&inode->i_lock); 4191c0eeaf5SJoern Engel inode->i_state &= ~I_SYNC; 420a4ffdde6SAl Viro if (!(inode->i_state & I_FREEING)) { 42123539afcSWu Fengguang if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4221da177e4SLinus Torvalds /* 4231da177e4SLinus Torvalds * We didn't write back all the pages. nfs_writepages() 424a50aeb40SWu Fengguang * sometimes bales out without doing anything. 4251da177e4SLinus Torvalds */ 4261da177e4SLinus Torvalds inode->i_state |= I_DIRTY_PAGES; 4278bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 4288bc3be27SFengguang Wu /* 4298bc3be27SFengguang Wu * slice used up: queue for next turn 4308bc3be27SFengguang Wu */ 4310e0f4fc2SKen Chen requeue_io(inode); 4321da177e4SLinus Torvalds } else { 4331da177e4SLinus Torvalds /* 434a50aeb40SWu Fengguang * Writeback blocked by something other than 435a50aeb40SWu Fengguang * congestion. Delay the inode for some time to 436a50aeb40SWu Fengguang * avoid spinning on the CPU (100% iowait) 437a50aeb40SWu Fengguang * retrying writeback of the dirty page/inode 438a50aeb40SWu Fengguang * that cannot be performed immediately. 4398bc3be27SFengguang Wu */ 4408bc3be27SFengguang Wu redirty_tail(inode); 4418bc3be27SFengguang Wu } 44223539afcSWu Fengguang } else if (inode->i_state & I_DIRTY) { 44323539afcSWu Fengguang /* 44423539afcSWu Fengguang * Filesystems can dirty the inode during writeback 44523539afcSWu Fengguang * operations, such as delayed allocation during 44623539afcSWu Fengguang * submission or metadata updates after data IO 44723539afcSWu Fengguang * completion. 44823539afcSWu Fengguang */ 44923539afcSWu Fengguang redirty_tail(inode); 4501da177e4SLinus Torvalds } else { 4511da177e4SLinus Torvalds /* 4529e38d86fSNick Piggin * The inode is clean. At this point we either have 4539e38d86fSNick Piggin * a reference to the inode or it's on it's way out. 4549e38d86fSNick Piggin * No need to add it back to the LRU. 4551da177e4SLinus Torvalds */ 4567ccf19a8SNick Piggin list_del_init(&inode->i_wb_list); 4571da177e4SLinus Torvalds } 4581da177e4SLinus Torvalds } 4591c0eeaf5SJoern Engel inode_sync_complete(inode); 4601da177e4SLinus Torvalds return ret; 4611da177e4SLinus Torvalds } 4621da177e4SLinus Torvalds 46303ba3782SJens Axboe /* 464d19de7edSChristoph Hellwig * For background writeback the caller does not have the sb pinned 46503ba3782SJens Axboe * before calling writeback. So make sure that we do pin it, so it doesn't 46603ba3782SJens Axboe * go away while we are writing inodes from it. 46703ba3782SJens Axboe */ 468d19de7edSChristoph Hellwig static bool pin_sb_for_writeback(struct super_block *sb) 4691da177e4SLinus Torvalds { 47003ba3782SJens Axboe spin_lock(&sb_lock); 47129cb4859SChristoph Hellwig if (list_empty(&sb->s_instances)) { 47203ba3782SJens Axboe spin_unlock(&sb_lock); 47329cb4859SChristoph Hellwig return false; 47403ba3782SJens Axboe } 47529cb4859SChristoph Hellwig 47629cb4859SChristoph Hellwig sb->s_count++; 47729cb4859SChristoph Hellwig spin_unlock(&sb_lock); 47829cb4859SChristoph Hellwig 47929cb4859SChristoph Hellwig if (down_read_trylock(&sb->s_umount)) { 48029cb4859SChristoph Hellwig if (sb->s_root) 48129cb4859SChristoph Hellwig return true; 48203ba3782SJens Axboe up_read(&sb->s_umount); 48303ba3782SJens Axboe } 48429cb4859SChristoph Hellwig 48529cb4859SChristoph Hellwig put_super(sb); 486d19de7edSChristoph Hellwig return false; 48703ba3782SJens Axboe } 48803ba3782SJens Axboe 489f11c9c5cSEdward Shishkin /* 490f11c9c5cSEdward Shishkin * Write a portion of b_io inodes which belong to @sb. 491edadfb10SChristoph Hellwig * 492edadfb10SChristoph Hellwig * If @only_this_sb is true, then find and write all such 493f11c9c5cSEdward Shishkin * inodes. Otherwise write only ones which go sequentially 494f11c9c5cSEdward Shishkin * in reverse order. 495edadfb10SChristoph Hellwig * 496f11c9c5cSEdward Shishkin * Return 1, if the caller writeback routine should be 497f11c9c5cSEdward Shishkin * interrupted. Otherwise return 0. 498f11c9c5cSEdward Shishkin */ 499edadfb10SChristoph Hellwig static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, 500edadfb10SChristoph Hellwig struct writeback_control *wbc, bool only_this_sb) 50103ba3782SJens Axboe { 50203ba3782SJens Axboe while (!list_empty(&wb->b_io)) { 503f11c9c5cSEdward Shishkin long pages_skipped; 5047ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 505edadfb10SChristoph Hellwig 506edadfb10SChristoph Hellwig if (inode->i_sb != sb) { 507edadfb10SChristoph Hellwig if (only_this_sb) { 508edadfb10SChristoph Hellwig /* 509edadfb10SChristoph Hellwig * We only want to write back data for this 510edadfb10SChristoph Hellwig * superblock, move all inodes not belonging 511edadfb10SChristoph Hellwig * to it back onto the dirty list. 512edadfb10SChristoph Hellwig */ 51366f3b8e2SJens Axboe redirty_tail(inode); 51466f3b8e2SJens Axboe continue; 51566f3b8e2SJens Axboe } 516edadfb10SChristoph Hellwig 517edadfb10SChristoph Hellwig /* 518edadfb10SChristoph Hellwig * The inode belongs to a different superblock. 519edadfb10SChristoph Hellwig * Bounce back to the caller to unpin this and 520edadfb10SChristoph Hellwig * pin the next superblock. 521edadfb10SChristoph Hellwig */ 522f11c9c5cSEdward Shishkin return 0; 523edadfb10SChristoph Hellwig } 524edadfb10SChristoph Hellwig 5259843b76aSChristoph Hellwig /* 5269843b76aSChristoph Hellwig * Don't bother with new inodes or inodes beeing freed, first 5279843b76aSChristoph Hellwig * kind does not need peridic writeout yet, and for the latter 5289843b76aSChristoph Hellwig * kind writeout is handled by the freer. 5299843b76aSChristoph Hellwig */ 530250df6edSDave Chinner spin_lock(&inode->i_lock); 5319843b76aSChristoph Hellwig if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 532250df6edSDave Chinner spin_unlock(&inode->i_lock); 5337ef0d737SNick Piggin requeue_io(inode); 5347ef0d737SNick Piggin continue; 5357ef0d737SNick Piggin } 5369843b76aSChristoph Hellwig 537d2caa3c5SJeff Layton /* 538d2caa3c5SJeff Layton * Was this inode dirtied after sync_sb_inodes was called? 539d2caa3c5SJeff Layton * This keeps sync from extra jobs and livelock. 540d2caa3c5SJeff Layton */ 541250df6edSDave Chinner if (inode_dirtied_after(inode, wbc->wb_start)) { 542250df6edSDave Chinner spin_unlock(&inode->i_lock); 543f11c9c5cSEdward Shishkin return 1; 544250df6edSDave Chinner } 5451da177e4SLinus Torvalds 5461da177e4SLinus Torvalds __iget(inode); 547250df6edSDave Chinner 5481da177e4SLinus Torvalds pages_skipped = wbc->pages_skipped; 54901c03194SChristoph Hellwig writeback_single_inode(inode, wbc); 5501da177e4SLinus Torvalds if (wbc->pages_skipped != pages_skipped) { 5511da177e4SLinus Torvalds /* 5521da177e4SLinus Torvalds * writeback is not making progress due to locked 5531da177e4SLinus Torvalds * buffers. Skip this inode for now. 5541da177e4SLinus Torvalds */ 555f57b9b7bSAndrew Morton redirty_tail(inode); 5561da177e4SLinus Torvalds } 5570f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 558a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 5591da177e4SLinus Torvalds iput(inode); 5604ffc8444SOGAWA Hirofumi cond_resched(); 561a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 5628bc3be27SFengguang Wu if (wbc->nr_to_write <= 0) { 5638bc3be27SFengguang Wu wbc->more_io = 1; 564f11c9c5cSEdward Shishkin return 1; 5651da177e4SLinus Torvalds } 56603ba3782SJens Axboe if (!list_empty(&wb->b_more_io)) 5678bc3be27SFengguang Wu wbc->more_io = 1; 5688bc3be27SFengguang Wu } 569f11c9c5cSEdward Shishkin /* b_io is empty */ 570f11c9c5cSEdward Shishkin return 1; 571f11c9c5cSEdward Shishkin } 57238f21977SNick Piggin 5739c3a8ee8SChristoph Hellwig void writeback_inodes_wb(struct bdi_writeback *wb, 574f11c9c5cSEdward Shishkin struct writeback_control *wbc) 575f11c9c5cSEdward Shishkin { 576f11c9c5cSEdward Shishkin int ret = 0; 5779ecc2738SJens Axboe 5787624ee72SJan Kara if (!wbc->wb_start) 579f11c9c5cSEdward Shishkin wbc->wb_start = jiffies; /* livelock avoidance */ 580a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 581f11c9c5cSEdward Shishkin if (!wbc->for_kupdate || list_empty(&wb->b_io)) 582f11c9c5cSEdward Shishkin queue_io(wb, wbc->older_than_this); 583f11c9c5cSEdward Shishkin 584f11c9c5cSEdward Shishkin while (!list_empty(&wb->b_io)) { 5857ccf19a8SNick Piggin struct inode *inode = wb_inode(wb->b_io.prev); 586f11c9c5cSEdward Shishkin struct super_block *sb = inode->i_sb; 587f11c9c5cSEdward Shishkin 588334132aeSChristoph Hellwig if (!pin_sb_for_writeback(sb)) { 589334132aeSChristoph Hellwig requeue_io(inode); 590d19de7edSChristoph Hellwig continue; 591334132aeSChristoph Hellwig } 592edadfb10SChristoph Hellwig ret = writeback_sb_inodes(sb, wb, wbc, false); 593d19de7edSChristoph Hellwig drop_super(sb); 594f11c9c5cSEdward Shishkin 595f11c9c5cSEdward Shishkin if (ret) 596f11c9c5cSEdward Shishkin break; 597f11c9c5cSEdward Shishkin } 598a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 59966f3b8e2SJens Axboe /* Leave any unwritten inodes on b_io */ 60066f3b8e2SJens Axboe } 60166f3b8e2SJens Axboe 602edadfb10SChristoph Hellwig static void __writeback_inodes_sb(struct super_block *sb, 603edadfb10SChristoph Hellwig struct bdi_writeback *wb, struct writeback_control *wbc) 604edadfb10SChristoph Hellwig { 605edadfb10SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 606edadfb10SChristoph Hellwig 607a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 608edadfb10SChristoph Hellwig if (!wbc->for_kupdate || list_empty(&wb->b_io)) 609edadfb10SChristoph Hellwig queue_io(wb, wbc->older_than_this); 610edadfb10SChristoph Hellwig writeback_sb_inodes(sb, wb, wbc, true); 611a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 612edadfb10SChristoph Hellwig } 613edadfb10SChristoph Hellwig 61403ba3782SJens Axboe /* 61503ba3782SJens Axboe * The maximum number of pages to writeout in a single bdi flush/kupdate 61603ba3782SJens Axboe * operation. We do this so we don't hold I_SYNC against an inode for 61703ba3782SJens Axboe * enormous amounts of time, which would block a userspace task which has 61803ba3782SJens Axboe * been forced to throttle against that inode. Also, the code reevaluates 61903ba3782SJens Axboe * the dirty each time it has written this many pages. 62003ba3782SJens Axboe */ 62103ba3782SJens Axboe #define MAX_WRITEBACK_PAGES 1024 62203ba3782SJens Axboe 62303ba3782SJens Axboe static inline bool over_bground_thresh(void) 62403ba3782SJens Axboe { 62503ba3782SJens Axboe unsigned long background_thresh, dirty_thresh; 62603ba3782SJens Axboe 62716c4042fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh); 62803ba3782SJens Axboe 62903ba3782SJens Axboe return (global_page_state(NR_FILE_DIRTY) + 6304cbec4c8SWu Fengguang global_page_state(NR_UNSTABLE_NFS) > background_thresh); 63103ba3782SJens Axboe } 63203ba3782SJens Axboe 63303ba3782SJens Axboe /* 63403ba3782SJens Axboe * Explicit flushing or periodic writeback of "old" data. 63503ba3782SJens Axboe * 63603ba3782SJens Axboe * Define "old": the first time one of an inode's pages is dirtied, we mark the 63703ba3782SJens Axboe * dirtying-time in the inode's address_space. So this periodic writeback code 63803ba3782SJens Axboe * just walks the superblock inode list, writing back any inodes which are 63903ba3782SJens Axboe * older than a specific point in time. 64003ba3782SJens Axboe * 64103ba3782SJens Axboe * Try to run once per dirty_writeback_interval. But if a writeback event 64203ba3782SJens Axboe * takes longer than a dirty_writeback_interval interval, then leave a 64303ba3782SJens Axboe * one-second gap. 64403ba3782SJens Axboe * 64503ba3782SJens Axboe * older_than_this takes precedence over nr_to_write. So we'll only write back 64603ba3782SJens Axboe * all dirty pages if they are all attached to "old" mappings. 64703ba3782SJens Axboe */ 648c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb, 64983ba7b07SChristoph Hellwig struct wb_writeback_work *work) 65003ba3782SJens Axboe { 65103ba3782SJens Axboe struct writeback_control wbc = { 65283ba7b07SChristoph Hellwig .sync_mode = work->sync_mode, 65303ba3782SJens Axboe .older_than_this = NULL, 65483ba7b07SChristoph Hellwig .for_kupdate = work->for_kupdate, 65583ba7b07SChristoph Hellwig .for_background = work->for_background, 65683ba7b07SChristoph Hellwig .range_cyclic = work->range_cyclic, 65703ba3782SJens Axboe }; 65803ba3782SJens Axboe unsigned long oldest_jif; 65903ba3782SJens Axboe long wrote = 0; 660b9543dacSJan Kara long write_chunk; 661a5989bdcSJan Kara struct inode *inode; 66203ba3782SJens Axboe 66303ba3782SJens Axboe if (wbc.for_kupdate) { 66403ba3782SJens Axboe wbc.older_than_this = &oldest_jif; 66503ba3782SJens Axboe oldest_jif = jiffies - 66603ba3782SJens Axboe msecs_to_jiffies(dirty_expire_interval * 10); 66703ba3782SJens Axboe } 668c4a77a6cSJens Axboe if (!wbc.range_cyclic) { 669c4a77a6cSJens Axboe wbc.range_start = 0; 670c4a77a6cSJens Axboe wbc.range_end = LLONG_MAX; 671c4a77a6cSJens Axboe } 67203ba3782SJens Axboe 673b9543dacSJan Kara /* 674b9543dacSJan Kara * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 675b9543dacSJan Kara * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 676b9543dacSJan Kara * here avoids calling into writeback_inodes_wb() more than once. 677b9543dacSJan Kara * 678b9543dacSJan Kara * The intended call sequence for WB_SYNC_ALL writeback is: 679b9543dacSJan Kara * 680b9543dacSJan Kara * wb_writeback() 681b9543dacSJan Kara * __writeback_inodes_sb() <== called only once 682b9543dacSJan Kara * write_cache_pages() <== called once for each inode 683b9543dacSJan Kara * (quickly) tag currently dirty pages 684b9543dacSJan Kara * (maybe slowly) sync all tagged pages 685b9543dacSJan Kara */ 686b9543dacSJan Kara if (wbc.sync_mode == WB_SYNC_NONE) 687b9543dacSJan Kara write_chunk = MAX_WRITEBACK_PAGES; 688b9543dacSJan Kara else 689b9543dacSJan Kara write_chunk = LONG_MAX; 690b9543dacSJan Kara 6917624ee72SJan Kara wbc.wb_start = jiffies; /* livelock avoidance */ 69203ba3782SJens Axboe for (;;) { 69303ba3782SJens Axboe /* 694d3ddec76SWu Fengguang * Stop writeback when nr_pages has been consumed 69503ba3782SJens Axboe */ 69683ba7b07SChristoph Hellwig if (work->nr_pages <= 0) 69703ba3782SJens Axboe break; 69803ba3782SJens Axboe 69903ba3782SJens Axboe /* 700aa373cf5SJan Kara * Background writeout and kupdate-style writeback may 701aa373cf5SJan Kara * run forever. Stop them if there is other work to do 702aa373cf5SJan Kara * so that e.g. sync can proceed. They'll be restarted 703aa373cf5SJan Kara * after the other works are all done. 704aa373cf5SJan Kara */ 705aa373cf5SJan Kara if ((work->for_background || work->for_kupdate) && 706aa373cf5SJan Kara !list_empty(&wb->bdi->work_list)) 707aa373cf5SJan Kara break; 708aa373cf5SJan Kara 709aa373cf5SJan Kara /* 710d3ddec76SWu Fengguang * For background writeout, stop when we are below the 711d3ddec76SWu Fengguang * background dirty threshold 71203ba3782SJens Axboe */ 71383ba7b07SChristoph Hellwig if (work->for_background && !over_bground_thresh()) 71403ba3782SJens Axboe break; 71503ba3782SJens Axboe 71603ba3782SJens Axboe wbc.more_io = 0; 717b9543dacSJan Kara wbc.nr_to_write = write_chunk; 71803ba3782SJens Axboe wbc.pages_skipped = 0; 719028c2dd1SDave Chinner 720028c2dd1SDave Chinner trace_wbc_writeback_start(&wbc, wb->bdi); 72183ba7b07SChristoph Hellwig if (work->sb) 72283ba7b07SChristoph Hellwig __writeback_inodes_sb(work->sb, wb, &wbc); 723edadfb10SChristoph Hellwig else 72403ba3782SJens Axboe writeback_inodes_wb(wb, &wbc); 725028c2dd1SDave Chinner trace_wbc_writeback_written(&wbc, wb->bdi); 726028c2dd1SDave Chinner 727b9543dacSJan Kara work->nr_pages -= write_chunk - wbc.nr_to_write; 728b9543dacSJan Kara wrote += write_chunk - wbc.nr_to_write; 72903ba3782SJens Axboe 73003ba3782SJens Axboe /* 73171fd05a8SJens Axboe * If we consumed everything, see if we have more 73203ba3782SJens Axboe */ 73371fd05a8SJens Axboe if (wbc.nr_to_write <= 0) 73471fd05a8SJens Axboe continue; 73571fd05a8SJens Axboe /* 73671fd05a8SJens Axboe * Didn't write everything and we don't have more IO, bail 73771fd05a8SJens Axboe */ 73871fd05a8SJens Axboe if (!wbc.more_io) 73971fd05a8SJens Axboe break; 74071fd05a8SJens Axboe /* 74171fd05a8SJens Axboe * Did we write something? Try for more 74271fd05a8SJens Axboe */ 743b9543dacSJan Kara if (wbc.nr_to_write < write_chunk) 74403ba3782SJens Axboe continue; 745a5989bdcSJan Kara /* 746a5989bdcSJan Kara * Nothing written. Wait for some inode to 747a5989bdcSJan Kara * become available for writeback. Otherwise 748a5989bdcSJan Kara * we'll just busyloop. 749a5989bdcSJan Kara */ 750a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 751a5989bdcSJan Kara if (!list_empty(&wb->b_more_io)) { 7527ccf19a8SNick Piggin inode = wb_inode(wb->b_more_io.prev); 753028c2dd1SDave Chinner trace_wbc_writeback_wait(&wbc, wb->bdi); 754250df6edSDave Chinner spin_lock(&inode->i_lock); 755a5989bdcSJan Kara inode_wait_for_writeback(inode); 756250df6edSDave Chinner spin_unlock(&inode->i_lock); 757a5989bdcSJan Kara } 758a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 75903ba3782SJens Axboe } 76003ba3782SJens Axboe 76103ba3782SJens Axboe return wrote; 76203ba3782SJens Axboe } 76303ba3782SJens Axboe 76403ba3782SJens Axboe /* 76583ba7b07SChristoph Hellwig * Return the next wb_writeback_work struct that hasn't been processed yet. 76603ba3782SJens Axboe */ 76783ba7b07SChristoph Hellwig static struct wb_writeback_work * 76808852b6dSMinchan Kim get_next_work_item(struct backing_dev_info *bdi) 76903ba3782SJens Axboe { 77083ba7b07SChristoph Hellwig struct wb_writeback_work *work = NULL; 77103ba3782SJens Axboe 7726467716aSArtem Bityutskiy spin_lock_bh(&bdi->wb_lock); 77383ba7b07SChristoph Hellwig if (!list_empty(&bdi->work_list)) { 77483ba7b07SChristoph Hellwig work = list_entry(bdi->work_list.next, 77583ba7b07SChristoph Hellwig struct wb_writeback_work, list); 77683ba7b07SChristoph Hellwig list_del_init(&work->list); 77703ba3782SJens Axboe } 7786467716aSArtem Bityutskiy spin_unlock_bh(&bdi->wb_lock); 77983ba7b07SChristoph Hellwig return work; 78003ba3782SJens Axboe } 78103ba3782SJens Axboe 782cdf01dd5SLinus Torvalds /* 783cdf01dd5SLinus Torvalds * Add in the number of potentially dirty inodes, because each inode 784cdf01dd5SLinus Torvalds * write can dirty pagecache in the underlying blockdev. 785cdf01dd5SLinus Torvalds */ 786cdf01dd5SLinus Torvalds static unsigned long get_nr_dirty_pages(void) 787cdf01dd5SLinus Torvalds { 788cdf01dd5SLinus Torvalds return global_page_state(NR_FILE_DIRTY) + 789cdf01dd5SLinus Torvalds global_page_state(NR_UNSTABLE_NFS) + 790cdf01dd5SLinus Torvalds get_nr_dirty_inodes(); 791cdf01dd5SLinus Torvalds } 792cdf01dd5SLinus Torvalds 7936585027aSJan Kara static long wb_check_background_flush(struct bdi_writeback *wb) 7946585027aSJan Kara { 7956585027aSJan Kara if (over_bground_thresh()) { 7966585027aSJan Kara 7976585027aSJan Kara struct wb_writeback_work work = { 7986585027aSJan Kara .nr_pages = LONG_MAX, 7996585027aSJan Kara .sync_mode = WB_SYNC_NONE, 8006585027aSJan Kara .for_background = 1, 8016585027aSJan Kara .range_cyclic = 1, 8026585027aSJan Kara }; 8036585027aSJan Kara 8046585027aSJan Kara return wb_writeback(wb, &work); 8056585027aSJan Kara } 8066585027aSJan Kara 8076585027aSJan Kara return 0; 8086585027aSJan Kara } 8096585027aSJan Kara 81003ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb) 81103ba3782SJens Axboe { 81203ba3782SJens Axboe unsigned long expired; 81303ba3782SJens Axboe long nr_pages; 81403ba3782SJens Axboe 81569b62d01SJens Axboe /* 81669b62d01SJens Axboe * When set to zero, disable periodic writeback 81769b62d01SJens Axboe */ 81869b62d01SJens Axboe if (!dirty_writeback_interval) 81969b62d01SJens Axboe return 0; 82069b62d01SJens Axboe 82103ba3782SJens Axboe expired = wb->last_old_flush + 82203ba3782SJens Axboe msecs_to_jiffies(dirty_writeback_interval * 10); 82303ba3782SJens Axboe if (time_before(jiffies, expired)) 82403ba3782SJens Axboe return 0; 82503ba3782SJens Axboe 82603ba3782SJens Axboe wb->last_old_flush = jiffies; 827cdf01dd5SLinus Torvalds nr_pages = get_nr_dirty_pages(); 82803ba3782SJens Axboe 829c4a77a6cSJens Axboe if (nr_pages) { 83083ba7b07SChristoph Hellwig struct wb_writeback_work work = { 831c4a77a6cSJens Axboe .nr_pages = nr_pages, 832c4a77a6cSJens Axboe .sync_mode = WB_SYNC_NONE, 833c4a77a6cSJens Axboe .for_kupdate = 1, 834c4a77a6cSJens Axboe .range_cyclic = 1, 835c4a77a6cSJens Axboe }; 836c4a77a6cSJens Axboe 83783ba7b07SChristoph Hellwig return wb_writeback(wb, &work); 838c4a77a6cSJens Axboe } 83903ba3782SJens Axboe 84003ba3782SJens Axboe return 0; 84103ba3782SJens Axboe } 84203ba3782SJens Axboe 84303ba3782SJens Axboe /* 84403ba3782SJens Axboe * Retrieve work items and do the writeback they describe 84503ba3782SJens Axboe */ 84603ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait) 84703ba3782SJens Axboe { 84803ba3782SJens Axboe struct backing_dev_info *bdi = wb->bdi; 84983ba7b07SChristoph Hellwig struct wb_writeback_work *work; 850c4a77a6cSJens Axboe long wrote = 0; 85103ba3782SJens Axboe 85281d73a32SJan Kara set_bit(BDI_writeback_running, &wb->bdi->state); 85308852b6dSMinchan Kim while ((work = get_next_work_item(bdi)) != NULL) { 85403ba3782SJens Axboe /* 85503ba3782SJens Axboe * Override sync mode, in case we must wait for completion 85683ba7b07SChristoph Hellwig * because this thread is exiting now. 85703ba3782SJens Axboe */ 85803ba3782SJens Axboe if (force_wait) 85983ba7b07SChristoph Hellwig work->sync_mode = WB_SYNC_ALL; 86083ba7b07SChristoph Hellwig 861455b2864SDave Chinner trace_writeback_exec(bdi, work); 862455b2864SDave Chinner 86383ba7b07SChristoph Hellwig wrote += wb_writeback(wb, work); 86403ba3782SJens Axboe 86503ba3782SJens Axboe /* 86683ba7b07SChristoph Hellwig * Notify the caller of completion if this is a synchronous 86783ba7b07SChristoph Hellwig * work item, otherwise just free it. 86803ba3782SJens Axboe */ 86983ba7b07SChristoph Hellwig if (work->done) 87083ba7b07SChristoph Hellwig complete(work->done); 87183ba7b07SChristoph Hellwig else 87283ba7b07SChristoph Hellwig kfree(work); 87303ba3782SJens Axboe } 87403ba3782SJens Axboe 87503ba3782SJens Axboe /* 87603ba3782SJens Axboe * Check for periodic writeback, kupdated() style 87703ba3782SJens Axboe */ 87803ba3782SJens Axboe wrote += wb_check_old_data_flush(wb); 8796585027aSJan Kara wrote += wb_check_background_flush(wb); 88081d73a32SJan Kara clear_bit(BDI_writeback_running, &wb->bdi->state); 88103ba3782SJens Axboe 88203ba3782SJens Axboe return wrote; 88303ba3782SJens Axboe } 88403ba3782SJens Axboe 88503ba3782SJens Axboe /* 88603ba3782SJens Axboe * Handle writeback of dirty data for the device backed by this bdi. Also 88703ba3782SJens Axboe * wakes up periodically and does kupdated style flushing. 88803ba3782SJens Axboe */ 88908243900SChristoph Hellwig int bdi_writeback_thread(void *data) 89003ba3782SJens Axboe { 89108243900SChristoph Hellwig struct bdi_writeback *wb = data; 89208243900SChristoph Hellwig struct backing_dev_info *bdi = wb->bdi; 89303ba3782SJens Axboe long pages_written; 89403ba3782SJens Axboe 895766f9164SPeter Zijlstra current->flags |= PF_SWAPWRITE; 89608243900SChristoph Hellwig set_freezable(); 897ecd58403SArtem Bityutskiy wb->last_active = jiffies; 89803ba3782SJens Axboe 89903ba3782SJens Axboe /* 90008243900SChristoph Hellwig * Our parent may run at a different priority, just set us to normal 90103ba3782SJens Axboe */ 90208243900SChristoph Hellwig set_user_nice(current, 0); 90308243900SChristoph Hellwig 904455b2864SDave Chinner trace_writeback_thread_start(bdi); 905455b2864SDave Chinner 90603ba3782SJens Axboe while (!kthread_should_stop()) { 9076467716aSArtem Bityutskiy /* 9086467716aSArtem Bityutskiy * Remove own delayed wake-up timer, since we are already awake 9096467716aSArtem Bityutskiy * and we'll take care of the preriodic write-back. 9106467716aSArtem Bityutskiy */ 9116467716aSArtem Bityutskiy del_timer(&wb->wakeup_timer); 9126467716aSArtem Bityutskiy 91303ba3782SJens Axboe pages_written = wb_do_writeback(wb, 0); 91403ba3782SJens Axboe 915455b2864SDave Chinner trace_writeback_pages_written(pages_written); 916455b2864SDave Chinner 91703ba3782SJens Axboe if (pages_written) 918ecd58403SArtem Bityutskiy wb->last_active = jiffies; 91903ba3782SJens Axboe 920297252c8SArtem Bityutskiy set_current_state(TASK_INTERRUPTIBLE); 921b76b4014SJ. Bruce Fields if (!list_empty(&bdi->work_list) || kthread_should_stop()) { 922297252c8SArtem Bityutskiy __set_current_state(TASK_RUNNING); 923297252c8SArtem Bityutskiy continue; 92403ba3782SJens Axboe } 92503ba3782SJens Axboe 926253c34e9SArtem Bityutskiy if (wb_has_dirty_io(wb) && dirty_writeback_interval) 927fff5b85aSArtem Bityutskiy schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 928253c34e9SArtem Bityutskiy else { 929253c34e9SArtem Bityutskiy /* 930253c34e9SArtem Bityutskiy * We have nothing to do, so can go sleep without any 931253c34e9SArtem Bityutskiy * timeout and save power. When a work is queued or 932253c34e9SArtem Bityutskiy * something is made dirty - we will be woken up. 933253c34e9SArtem Bityutskiy */ 93469b62d01SJens Axboe schedule(); 935f9eadbbdSJens Axboe } 93669b62d01SJens Axboe 93703ba3782SJens Axboe try_to_freeze(); 93803ba3782SJens Axboe } 93903ba3782SJens Axboe 940fff5b85aSArtem Bityutskiy /* Flush any work that raced with us exiting */ 94108243900SChristoph Hellwig if (!list_empty(&bdi->work_list)) 94208243900SChristoph Hellwig wb_do_writeback(wb, 1); 943455b2864SDave Chinner 944455b2864SDave Chinner trace_writeback_thread_stop(bdi); 94503ba3782SJens Axboe return 0; 94603ba3782SJens Axboe } 94703ba3782SJens Axboe 94808243900SChristoph Hellwig 94903ba3782SJens Axboe /* 95003ba3782SJens Axboe * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 95103ba3782SJens Axboe * the whole world. 95203ba3782SJens Axboe */ 95303ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages) 95403ba3782SJens Axboe { 955b8c2f347SChristoph Hellwig struct backing_dev_info *bdi; 956b8c2f347SChristoph Hellwig 95783ba7b07SChristoph Hellwig if (!nr_pages) { 95883ba7b07SChristoph Hellwig nr_pages = global_page_state(NR_FILE_DIRTY) + 95903ba3782SJens Axboe global_page_state(NR_UNSTABLE_NFS); 960b8c2f347SChristoph Hellwig } 961b8c2f347SChristoph Hellwig 962b8c2f347SChristoph Hellwig rcu_read_lock(); 963b8c2f347SChristoph Hellwig list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 964b8c2f347SChristoph Hellwig if (!bdi_has_dirty_io(bdi)) 965b8c2f347SChristoph Hellwig continue; 9666585027aSJan Kara __bdi_start_writeback(bdi, nr_pages, false); 967b8c2f347SChristoph Hellwig } 968b8c2f347SChristoph Hellwig rcu_read_unlock(); 96903ba3782SJens Axboe } 97003ba3782SJens Axboe 97103ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode) 97203ba3782SJens Axboe { 97303ba3782SJens Axboe if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 97403ba3782SJens Axboe struct dentry *dentry; 97503ba3782SJens Axboe const char *name = "?"; 97603ba3782SJens Axboe 97703ba3782SJens Axboe dentry = d_find_alias(inode); 97803ba3782SJens Axboe if (dentry) { 97903ba3782SJens Axboe spin_lock(&dentry->d_lock); 98003ba3782SJens Axboe name = (const char *) dentry->d_name.name; 98103ba3782SJens Axboe } 98203ba3782SJens Axboe printk(KERN_DEBUG 98303ba3782SJens Axboe "%s(%d): dirtied inode %lu (%s) on %s\n", 98403ba3782SJens Axboe current->comm, task_pid_nr(current), inode->i_ino, 98503ba3782SJens Axboe name, inode->i_sb->s_id); 98603ba3782SJens Axboe if (dentry) { 98703ba3782SJens Axboe spin_unlock(&dentry->d_lock); 98803ba3782SJens Axboe dput(dentry); 98903ba3782SJens Axboe } 99003ba3782SJens Axboe } 99103ba3782SJens Axboe } 99203ba3782SJens Axboe 99303ba3782SJens Axboe /** 99403ba3782SJens Axboe * __mark_inode_dirty - internal function 99503ba3782SJens Axboe * @inode: inode to mark 99603ba3782SJens Axboe * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 99703ba3782SJens Axboe * Mark an inode as dirty. Callers should use mark_inode_dirty or 99803ba3782SJens Axboe * mark_inode_dirty_sync. 99903ba3782SJens Axboe * 100003ba3782SJens Axboe * Put the inode on the super block's dirty list. 100103ba3782SJens Axboe * 100203ba3782SJens Axboe * CAREFUL! We mark it dirty unconditionally, but move it onto the 100303ba3782SJens Axboe * dirty list only if it is hashed or if it refers to a blockdev. 100403ba3782SJens Axboe * If it was not hashed, it will never be added to the dirty list 100503ba3782SJens Axboe * even if it is later hashed, as it will have been marked dirty already. 100603ba3782SJens Axboe * 100703ba3782SJens Axboe * In short, make sure you hash any inodes _before_ you start marking 100803ba3782SJens Axboe * them dirty. 100903ba3782SJens Axboe * 101003ba3782SJens Axboe * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 101103ba3782SJens Axboe * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 101203ba3782SJens Axboe * the kernel-internal blockdev inode represents the dirtying time of the 101303ba3782SJens Axboe * blockdev's pages. This is why for I_DIRTY_PAGES we always use 101403ba3782SJens Axboe * page->mapping->host, so the page-dirtying time is recorded in the internal 101503ba3782SJens Axboe * blockdev inode. 101603ba3782SJens Axboe */ 101703ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags) 101803ba3782SJens Axboe { 101903ba3782SJens Axboe struct super_block *sb = inode->i_sb; 1020253c34e9SArtem Bityutskiy struct backing_dev_info *bdi = NULL; 102103ba3782SJens Axboe 102203ba3782SJens Axboe /* 102303ba3782SJens Axboe * Don't do this for I_DIRTY_PAGES - that doesn't actually 102403ba3782SJens Axboe * dirty the inode itself 102503ba3782SJens Axboe */ 102603ba3782SJens Axboe if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 102703ba3782SJens Axboe if (sb->s_op->dirty_inode) 1028aa385729SChristoph Hellwig sb->s_op->dirty_inode(inode, flags); 102903ba3782SJens Axboe } 103003ba3782SJens Axboe 103103ba3782SJens Axboe /* 103203ba3782SJens Axboe * make sure that changes are seen by all cpus before we test i_state 103303ba3782SJens Axboe * -- mikulas 103403ba3782SJens Axboe */ 103503ba3782SJens Axboe smp_mb(); 103603ba3782SJens Axboe 103703ba3782SJens Axboe /* avoid the locking if we can */ 103803ba3782SJens Axboe if ((inode->i_state & flags) == flags) 103903ba3782SJens Axboe return; 104003ba3782SJens Axboe 104103ba3782SJens Axboe if (unlikely(block_dump)) 104203ba3782SJens Axboe block_dump___mark_inode_dirty(inode); 104303ba3782SJens Axboe 1044250df6edSDave Chinner spin_lock(&inode->i_lock); 104503ba3782SJens Axboe if ((inode->i_state & flags) != flags) { 104603ba3782SJens Axboe const int was_dirty = inode->i_state & I_DIRTY; 104703ba3782SJens Axboe 104803ba3782SJens Axboe inode->i_state |= flags; 104903ba3782SJens Axboe 105003ba3782SJens Axboe /* 105103ba3782SJens Axboe * If the inode is being synced, just update its dirty state. 105203ba3782SJens Axboe * The unlocker will place the inode on the appropriate 105303ba3782SJens Axboe * superblock list, based upon its state. 105403ba3782SJens Axboe */ 105503ba3782SJens Axboe if (inode->i_state & I_SYNC) 1056250df6edSDave Chinner goto out_unlock_inode; 105703ba3782SJens Axboe 105803ba3782SJens Axboe /* 105903ba3782SJens Axboe * Only add valid (hashed) inodes to the superblock's 106003ba3782SJens Axboe * dirty list. Add blockdev inodes as well. 106103ba3782SJens Axboe */ 106203ba3782SJens Axboe if (!S_ISBLK(inode->i_mode)) { 10631d3382cbSAl Viro if (inode_unhashed(inode)) 1064250df6edSDave Chinner goto out_unlock_inode; 106503ba3782SJens Axboe } 1066a4ffdde6SAl Viro if (inode->i_state & I_FREEING) 1067250df6edSDave Chinner goto out_unlock_inode; 106803ba3782SJens Axboe 106903ba3782SJens Axboe /* 107003ba3782SJens Axboe * If the inode was already on b_dirty/b_io/b_more_io, don't 107103ba3782SJens Axboe * reposition it (that would break b_dirty time-ordering). 107203ba3782SJens Axboe */ 107303ba3782SJens Axboe if (!was_dirty) { 1074a66979abSDave Chinner bool wakeup_bdi = false; 1075253c34e9SArtem Bityutskiy bdi = inode_to_bdi(inode); 1076500b067cSJens Axboe 1077253c34e9SArtem Bityutskiy if (bdi_cap_writeback_dirty(bdi)) { 1078253c34e9SArtem Bityutskiy WARN(!test_bit(BDI_registered, &bdi->state), 1079253c34e9SArtem Bityutskiy "bdi-%s not registered\n", bdi->name); 1080253c34e9SArtem Bityutskiy 1081253c34e9SArtem Bityutskiy /* 1082253c34e9SArtem Bityutskiy * If this is the first dirty inode for this 1083253c34e9SArtem Bityutskiy * bdi, we have to wake-up the corresponding 1084253c34e9SArtem Bityutskiy * bdi thread to make sure background 1085253c34e9SArtem Bityutskiy * write-back happens later. 1086253c34e9SArtem Bityutskiy */ 1087253c34e9SArtem Bityutskiy if (!wb_has_dirty_io(&bdi->wb)) 1088253c34e9SArtem Bityutskiy wakeup_bdi = true; 1089500b067cSJens Axboe } 109003ba3782SJens Axboe 1091a66979abSDave Chinner spin_unlock(&inode->i_lock); 1092a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 109303ba3782SJens Axboe inode->dirtied_when = jiffies; 10947ccf19a8SNick Piggin list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1095a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 1096253c34e9SArtem Bityutskiy 1097253c34e9SArtem Bityutskiy if (wakeup_bdi) 10986467716aSArtem Bityutskiy bdi_wakeup_thread_delayed(bdi); 1099a66979abSDave Chinner return; 1100a66979abSDave Chinner } 1101a66979abSDave Chinner } 1102a66979abSDave Chinner out_unlock_inode: 1103a66979abSDave Chinner spin_unlock(&inode->i_lock); 1104a66979abSDave Chinner 110503ba3782SJens Axboe } 110603ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty); 110703ba3782SJens Axboe 110866f3b8e2SJens Axboe /* 110966f3b8e2SJens Axboe * Write out a superblock's list of dirty inodes. A wait will be performed 111066f3b8e2SJens Axboe * upon no inodes, all inodes or the final one, depending upon sync_mode. 111166f3b8e2SJens Axboe * 111266f3b8e2SJens Axboe * If older_than_this is non-NULL, then only write out inodes which 111366f3b8e2SJens Axboe * had their first dirtying at a time earlier than *older_than_this. 111466f3b8e2SJens Axboe * 111566f3b8e2SJens Axboe * If `bdi' is non-zero then we're being asked to writeback a specific queue. 111666f3b8e2SJens Axboe * This function assumes that the blockdev superblock's inodes are backed by 111766f3b8e2SJens Axboe * a variety of queues, so all inodes are searched. For other superblocks, 111866f3b8e2SJens Axboe * assume that all inodes are backed by the same queue. 111966f3b8e2SJens Axboe * 112066f3b8e2SJens Axboe * The inodes to be written are parked on bdi->b_io. They are moved back onto 112166f3b8e2SJens Axboe * bdi->b_dirty as they are selected for writing. This way, none can be missed 112266f3b8e2SJens Axboe * on the writer throttling path, and we get decent balancing between many 112366f3b8e2SJens Axboe * throttled threads: we don't want them all piling up on inode_sync_wait. 112466f3b8e2SJens Axboe */ 1125b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb) 112666f3b8e2SJens Axboe { 112738f21977SNick Piggin struct inode *inode, *old_inode = NULL; 112838f21977SNick Piggin 112903ba3782SJens Axboe /* 113003ba3782SJens Axboe * We need to be protected against the filesystem going from 113103ba3782SJens Axboe * r/o to r/w or vice versa. 113203ba3782SJens Axboe */ 1133b6e51316SJens Axboe WARN_ON(!rwsem_is_locked(&sb->s_umount)); 113403ba3782SJens Axboe 113555fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 113666f3b8e2SJens Axboe 113738f21977SNick Piggin /* 113838f21977SNick Piggin * Data integrity sync. Must wait for all pages under writeback, 113938f21977SNick Piggin * because there may have been pages dirtied before our sync 114038f21977SNick Piggin * call, but which had writeout started before we write it out. 114138f21977SNick Piggin * In which case, the inode may not be on the dirty list, but 114238f21977SNick Piggin * we still have to wait for that writeout. 114338f21977SNick Piggin */ 1144b6e51316SJens Axboe list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1145250df6edSDave Chinner struct address_space *mapping = inode->i_mapping; 114638f21977SNick Piggin 1147250df6edSDave Chinner spin_lock(&inode->i_lock); 1148250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 1149250df6edSDave Chinner (mapping->nrpages == 0)) { 1150250df6edSDave Chinner spin_unlock(&inode->i_lock); 115138f21977SNick Piggin continue; 1152250df6edSDave Chinner } 115338f21977SNick Piggin __iget(inode); 1154250df6edSDave Chinner spin_unlock(&inode->i_lock); 115555fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 115655fa6091SDave Chinner 115738f21977SNick Piggin /* 115855fa6091SDave Chinner * We hold a reference to 'inode' so it couldn't have been 115955fa6091SDave Chinner * removed from s_inodes list while we dropped the 116055fa6091SDave Chinner * inode_sb_list_lock. We cannot iput the inode now as we can 116155fa6091SDave Chinner * be holding the last reference and we cannot iput it under 116255fa6091SDave Chinner * inode_sb_list_lock. So we keep the reference and iput it 116355fa6091SDave Chinner * later. 116438f21977SNick Piggin */ 116538f21977SNick Piggin iput(old_inode); 116638f21977SNick Piggin old_inode = inode; 116738f21977SNick Piggin 116838f21977SNick Piggin filemap_fdatawait(mapping); 116938f21977SNick Piggin 117038f21977SNick Piggin cond_resched(); 117138f21977SNick Piggin 117255fa6091SDave Chinner spin_lock(&inode_sb_list_lock); 117338f21977SNick Piggin } 117455fa6091SDave Chinner spin_unlock(&inode_sb_list_lock); 117538f21977SNick Piggin iput(old_inode); 117666f3b8e2SJens Axboe } 11771da177e4SLinus Torvalds 1178d8a8559cSJens Axboe /** 11793259f8beSChris Mason * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1180d8a8559cSJens Axboe * @sb: the superblock 11813259f8beSChris Mason * @nr: the number of pages to write 11821da177e4SLinus Torvalds * 1183d8a8559cSJens Axboe * Start writeback on some inodes on this super_block. No guarantees are made 1184d8a8559cSJens Axboe * on how many (if any) will be written, and this function does not wait 11853259f8beSChris Mason * for IO completion of submitted IO. 11861da177e4SLinus Torvalds */ 11873259f8beSChris Mason void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) 11881da177e4SLinus Torvalds { 118983ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 119083ba7b07SChristoph Hellwig struct wb_writeback_work work = { 11913c4d7165SChristoph Hellwig .sb = sb, 11923c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_NONE, 119383ba7b07SChristoph Hellwig .done = &done, 11943259f8beSChris Mason .nr_pages = nr, 11953c4d7165SChristoph Hellwig }; 11960e3c9a22SJens Axboe 1197cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 119883ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 119983ba7b07SChristoph Hellwig wait_for_completion(&done); 12001da177e4SLinus Torvalds } 12013259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr); 12023259f8beSChris Mason 12033259f8beSChris Mason /** 12043259f8beSChris Mason * writeback_inodes_sb - writeback dirty inodes from given super_block 12053259f8beSChris Mason * @sb: the superblock 12063259f8beSChris Mason * 12073259f8beSChris Mason * Start writeback on some inodes on this super_block. No guarantees are made 12083259f8beSChris Mason * on how many (if any) will be written, and this function does not wait 12093259f8beSChris Mason * for IO completion of submitted IO. 12103259f8beSChris Mason */ 12113259f8beSChris Mason void writeback_inodes_sb(struct super_block *sb) 12123259f8beSChris Mason { 1213925d169fSLinus Torvalds return writeback_inodes_sb_nr(sb, get_nr_dirty_pages()); 12143259f8beSChris Mason } 1215d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb); 1216d8a8559cSJens Axboe 1217d8a8559cSJens Axboe /** 121817bd55d0SEric Sandeen * writeback_inodes_sb_if_idle - start writeback if none underway 121917bd55d0SEric Sandeen * @sb: the superblock 122017bd55d0SEric Sandeen * 122117bd55d0SEric Sandeen * Invoke writeback_inodes_sb if no writeback is currently underway. 122217bd55d0SEric Sandeen * Returns 1 if writeback was started, 0 if not. 122317bd55d0SEric Sandeen */ 122417bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb) 122517bd55d0SEric Sandeen { 122617bd55d0SEric Sandeen if (!writeback_in_progress(sb->s_bdi)) { 1227cf37e972SChristoph Hellwig down_read(&sb->s_umount); 122817bd55d0SEric Sandeen writeback_inodes_sb(sb); 1229cf37e972SChristoph Hellwig up_read(&sb->s_umount); 123017bd55d0SEric Sandeen return 1; 123117bd55d0SEric Sandeen } else 123217bd55d0SEric Sandeen return 0; 123317bd55d0SEric Sandeen } 123417bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle); 123517bd55d0SEric Sandeen 123617bd55d0SEric Sandeen /** 12373259f8beSChris Mason * writeback_inodes_sb_if_idle - start writeback if none underway 12383259f8beSChris Mason * @sb: the superblock 12393259f8beSChris Mason * @nr: the number of pages to write 12403259f8beSChris Mason * 12413259f8beSChris Mason * Invoke writeback_inodes_sb if no writeback is currently underway. 12423259f8beSChris Mason * Returns 1 if writeback was started, 0 if not. 12433259f8beSChris Mason */ 12443259f8beSChris Mason int writeback_inodes_sb_nr_if_idle(struct super_block *sb, 12453259f8beSChris Mason unsigned long nr) 12463259f8beSChris Mason { 12473259f8beSChris Mason if (!writeback_in_progress(sb->s_bdi)) { 12483259f8beSChris Mason down_read(&sb->s_umount); 12493259f8beSChris Mason writeback_inodes_sb_nr(sb, nr); 12503259f8beSChris Mason up_read(&sb->s_umount); 12513259f8beSChris Mason return 1; 12523259f8beSChris Mason } else 12533259f8beSChris Mason return 0; 12543259f8beSChris Mason } 12553259f8beSChris Mason EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle); 12563259f8beSChris Mason 12573259f8beSChris Mason /** 1258d8a8559cSJens Axboe * sync_inodes_sb - sync sb inode pages 1259d8a8559cSJens Axboe * @sb: the superblock 1260d8a8559cSJens Axboe * 1261d8a8559cSJens Axboe * This function writes and waits on any dirty inode belonging to this 1262cb9ef8d5SStefan Hajnoczi * super_block. 1263d8a8559cSJens Axboe */ 1264b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb) 1265d8a8559cSJens Axboe { 126683ba7b07SChristoph Hellwig DECLARE_COMPLETION_ONSTACK(done); 126783ba7b07SChristoph Hellwig struct wb_writeback_work work = { 12683c4d7165SChristoph Hellwig .sb = sb, 12693c4d7165SChristoph Hellwig .sync_mode = WB_SYNC_ALL, 12703c4d7165SChristoph Hellwig .nr_pages = LONG_MAX, 12713c4d7165SChristoph Hellwig .range_cyclic = 0, 127283ba7b07SChristoph Hellwig .done = &done, 12733c4d7165SChristoph Hellwig }; 12743c4d7165SChristoph Hellwig 1275cf37e972SChristoph Hellwig WARN_ON(!rwsem_is_locked(&sb->s_umount)); 1276cf37e972SChristoph Hellwig 127783ba7b07SChristoph Hellwig bdi_queue_work(sb->s_bdi, &work); 127883ba7b07SChristoph Hellwig wait_for_completion(&done); 127983ba7b07SChristoph Hellwig 1280b6e51316SJens Axboe wait_sb_inodes(sb); 1281d8a8559cSJens Axboe } 1282d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb); 12831da177e4SLinus Torvalds 12841da177e4SLinus Torvalds /** 12851da177e4SLinus Torvalds * write_inode_now - write an inode to disk 12861da177e4SLinus Torvalds * @inode: inode to write to disk 12871da177e4SLinus Torvalds * @sync: whether the write should be synchronous or not 12881da177e4SLinus Torvalds * 12897f04c26dSAndrea Arcangeli * This function commits an inode to disk immediately if it is dirty. This is 12907f04c26dSAndrea Arcangeli * primarily needed by knfsd. 12917f04c26dSAndrea Arcangeli * 12927f04c26dSAndrea Arcangeli * The caller must either have a ref on the inode or must have set I_WILL_FREE. 12931da177e4SLinus Torvalds */ 12941da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync) 12951da177e4SLinus Torvalds { 12961da177e4SLinus Torvalds int ret; 12971da177e4SLinus Torvalds struct writeback_control wbc = { 12981da177e4SLinus Torvalds .nr_to_write = LONG_MAX, 129918914b18SMike Galbraith .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1300111ebb6eSOGAWA Hirofumi .range_start = 0, 1301111ebb6eSOGAWA Hirofumi .range_end = LLONG_MAX, 13021da177e4SLinus Torvalds }; 13031da177e4SLinus Torvalds 13041da177e4SLinus Torvalds if (!mapping_cap_writeback_dirty(inode->i_mapping)) 130549364ce2SAndrew Morton wbc.nr_to_write = 0; 13061da177e4SLinus Torvalds 13071da177e4SLinus Torvalds might_sleep(); 1308a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 13090f1b1fd8SDave Chinner spin_lock(&inode->i_lock); 131001c03194SChristoph Hellwig ret = writeback_single_inode(inode, &wbc); 13110f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 1312a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 13131da177e4SLinus Torvalds if (sync) 13141c0eeaf5SJoern Engel inode_sync_wait(inode); 13151da177e4SLinus Torvalds return ret; 13161da177e4SLinus Torvalds } 13171da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now); 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds /** 13201da177e4SLinus Torvalds * sync_inode - write an inode and its pages to disk. 13211da177e4SLinus Torvalds * @inode: the inode to sync 13221da177e4SLinus Torvalds * @wbc: controls the writeback mode 13231da177e4SLinus Torvalds * 13241da177e4SLinus Torvalds * sync_inode() will write an inode and its pages to disk. It will also 13251da177e4SLinus Torvalds * correctly update the inode on its superblock's dirty inode lists and will 13261da177e4SLinus Torvalds * update inode->i_state. 13271da177e4SLinus Torvalds * 13281da177e4SLinus Torvalds * The caller must have a ref on the inode. 13291da177e4SLinus Torvalds */ 13301da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc) 13311da177e4SLinus Torvalds { 13321da177e4SLinus Torvalds int ret; 13331da177e4SLinus Torvalds 1334a66979abSDave Chinner spin_lock(&inode_wb_list_lock); 13350f1b1fd8SDave Chinner spin_lock(&inode->i_lock); 133601c03194SChristoph Hellwig ret = writeback_single_inode(inode, wbc); 13370f1b1fd8SDave Chinner spin_unlock(&inode->i_lock); 1338a66979abSDave Chinner spin_unlock(&inode_wb_list_lock); 13391da177e4SLinus Torvalds return ret; 13401da177e4SLinus Torvalds } 13411da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode); 1342c3765016SChristoph Hellwig 1343c3765016SChristoph Hellwig /** 1344c691b9d9SAndrew Morton * sync_inode_metadata - write an inode to disk 1345c3765016SChristoph Hellwig * @inode: the inode to sync 1346c3765016SChristoph Hellwig * @wait: wait for I/O to complete. 1347c3765016SChristoph Hellwig * 1348c691b9d9SAndrew Morton * Write an inode to disk and adjust its dirty state after completion. 1349c3765016SChristoph Hellwig * 1350c3765016SChristoph Hellwig * Note: only writes the actual inode, no associated data or other metadata. 1351c3765016SChristoph Hellwig */ 1352c3765016SChristoph Hellwig int sync_inode_metadata(struct inode *inode, int wait) 1353c3765016SChristoph Hellwig { 1354c3765016SChristoph Hellwig struct writeback_control wbc = { 1355c3765016SChristoph Hellwig .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1356c3765016SChristoph Hellwig .nr_to_write = 0, /* metadata-only */ 1357c3765016SChristoph Hellwig }; 1358c3765016SChristoph Hellwig 1359c3765016SChristoph Hellwig return sync_inode(inode, &wbc); 1360c3765016SChristoph Hellwig } 1361c3765016SChristoph Hellwig EXPORT_SYMBOL(sync_inode_metadata); 1362